metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JeremyGibson/xml_parser",
"score": 3
}
|
#### File: xml_parser/tests/test__html_to_text.py
```python
import sys; sys.path.append("..")
import logging
import plac
import unittest
from tomes_tagger.lib.html_to_text import *
# enable logging.
logging.basicConfig(level=logging.DEBUG)
class Test_HTMLToText(unittest.TestCase):
def setUp(self):
# set attributes.
self.h2t = HTMLToText()
self.sample = "<html><head></head><body><a href=\"http://h.w\">{}</a></body></html>"
def test__shift_links(self):
""" Does extracting a.@href values into body text work? """
# format sample text.
html = self.sample.format("Hello World!")
# remove links.
html = ModifyHTML(html)
html.shift_links()
html = html.raw()
# check if result is as expected.
expected = self.sample.format("Hello World! [http://h.w]")
self.assertEqual(html, expected)
def test__remove_images(self):
""" Does removing image tags work? """
# add image tag to sample text.
img = "Hello World!<img src='hw.jpg' alt='Hello World!'>"
html = self.sample.format(img)
# remove images.
html = ModifyHTML(html)
html.remove_images()
html = html.raw()
# check if result is as expected.
expected = self.sample.format("Hello World!")
self.assertEqual(html, expected)
def test__html_to_text(self):
""" Does HTML to text conversion work? """
# format sample text.
html = self.sample.format("Hello World!")
# convert to plain text.
plain = self.h2t.get_text(html, is_raw=True)
plain = plain.strip()
# check if result is as expected.
expected = "Hello World!"
self.assertEqual(plain, expected)
def test__bad_data_gets_empty(self):
""" Does passing the wrong data type return an empty string? """
# try to convert an int and a non-existant file.
empty_01 = self.h2t.get_text(1, is_raw=True)
empty_02 = self.h2t.get_text("file_not_exists.fake", is_raw=False)
# check if result is as expected.
self.assertEqual([empty_01, empty_02], ["", ""])
# CLI.
def main(html_file: "HTML file"):
"Prints plain text version of an HTML file.\
\nexample: `python3 test__html_to_text sample_files/sampleHTML.html`"
# read HTML file.
html = open(html_file).read()
# modify HTML.
html = ModifyHTML(html)
html.shift_links()
html.remove_images()
html = html.raw()
# convert to plain text.
h2t = HTMLToText()
plain = h2t.get_text(html, is_raw=True)
print(plain)
if __name__ == "__main__":
plac.call(main)
```
#### File: tomes_tagger/lib/text_to_nlp.py
```python
import json
import logging
import pycorenlp
import unicodedata
from textwrap import TextWrapper
class _CoreNLP():
""" A class to wrap pycorenlp (https://github.com/smilli/py-corenlp) and capture its
exceptions more explicitly.
Example:
>>> tagger = CoreNLP(host="http://localhost:9003")
>>> tagger.annotate("North Carolina") # dict.
"""
def __init__(self, host, mapping_file="", tags_to_override=[], *args, **kwargs):
""" Sets instance attributes.
Args:
- host (str): The URL for the CoreNLP server (ex: "http://localhost:9003").
- mapping_file (str): The relative path for the regexNER mapping file. This must
be located within the CoreNLP server's file directory.
- tags_to_override (list): The CoreNLP NER tag values to override if they
conflict with a custom tag in @mapping_file.
-*args/**kwargs: Any additional, optional arguments to pass to pycorenlp.
"""
# set logger; suppress logging by default.
self.logger = logging.getLogger(__name__)
self.logger.addHandler(logging.NullHandler())
# suppress third party logging that is not a warning or higher.
# per: https://stackoverflow.com/a/11029841
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# set CoreNLP server location and options.
self.host = host
self.mapping_file = mapping_file
self.tags_to_override = tags_to_override
self.options = {"annotators": "tokenize, ssplit, pos, ner, regexner",
"ner.useSUTime": "false", "ner.applyNumericClassifiers": "false",
"outputFormat": "json"}
# if specified, add option to use mapping file.
if self.mapping_file != "":
self.options["regexner.mapping"] = self.mapping_file
# if specified, add option to override default tags.
if len(self.tags_to_override) > 0:
self.options["regexner.backgroundSymbol"] = ",".join(self.tags_to_override)
# compose instance of main pycorenlp class.
self.nlp = pycorenlp.StanfordCoreNLP(self.host, *args, **kwargs)
def annotate(self, text):
""" Runs CoreNLP's NER tagger on @text.
Args:
- text (str): The text to send to CoreNLP's NER tagger.
Returns:
dict: The return value.
The CoreNLP NER tagger results.
Raises:
- TypeError: If @text is not a string.
- ConnectionError: If pycorenlp can't connect to the CoreNLP server.
"""
# verify that @text is a string.
if not isinstance(text, str):
msg = "Argument @text must be a string, got '{}' instead.".format(
type(text).__name__)
raise TypeError(msg)
# get NER tag results.
try:
results = self.nlp.annotate(text, properties=self.options)
return results
except Exception as err:
msg = "Can't connect to CoreNLP at: {}".format(self.host)
raise ConnectionError(msg)
class TextToNLP():
""" A class to extract tokens and their corresponding NER tags from a given text using
Stanford's CoreNLP.
Example:
>>> t2n = TextToNLP()
>>> t2n.get_NER("North Carolina") # list.
"""
def __init__(self, host="http://localhost:9003", chunk_size=50000, retry=True,
mapping_file="regexner_TOMES/mappings.txt", tags_to_remove=["DATE", "DURATION",
"MISC", "MONEY", "NUMBER", "O", "ORDINAL", "PERCENT", "SET", "TIME"]):
""" Sets instance attributes.
Args:
- host (str): The URL for the CoreNLP server (ex: "http://localhost:9003").
- chunk_size (int): The maximum string length to send to CoreNLP at once. Increase
it at your own risk.
- retry (bool) : If True and the call to self.get_NER() is an empty list, one more
attempt will be made to retrieve results. This is because occassional glitches in
the CoreNLP server result in empty results.
- mapping_file (str): See "help(_CoreNLP)" for more info.
- tags_to_remove (list): If CoreNLP returns one on these NER tags, the tag will be
replaced with an empty string.
"""
# set logger; suppress logging by default.
self.logger = logging.getLogger(__name__)
self.logger.addHandler(logging.NullHandler())
# set attributes.
self.host = host
self.chunk_size = chunk_size
self.retry = retry
self.mapping_file = mapping_file
self.tags_to_remove = tags_to_remove
self.stanford_tags = ["DATE", "DURATION", "LOCATION", "MISC", "MONEY", "NUMBER", "O",
"ORDINAL", "ORGANIZATION", "PERCENT", "PERSON", "SET", "TIME"]
# compose instance of CoreNLP wrapper class.
self.corenlp = _CoreNLP(self.host, mapping_file=self.mapping_file,
tags_to_override=self.stanford_tags)
@staticmethod
def _get_outer_space(text):
""" Returns the leading and trailing whitespace for a given @text.
Args:
text (str): The string from which to extract leading and trailing whitespace.
Returns:
tuple: The return value.
The first item (str) is @text's leading whitespace.
The second item (str) is @text's trailing whitespace.
"""
# assume values.
leading_space, trailing_space = "", ""
# get length of leading and trailing whitespace.
leading_distance = len(text) - len(text.lstrip())
trailing_distance = len(text) - len(text.rstrip())
# if leading or trailing space exists, update return values.
if leading_distance > 0 and leading_distance < len(text):
leading_space = text[:leading_distance]
if trailing_distance > 0 and trailing_distance < len(text):
trailing_space = text[-trailing_distance:]
return (leading_space, trailing_space)
@staticmethod
def _legalize_json_text(jtext):
""" A static method that alters @jtext by replacing vertical tabs, form feeds, and
carriage returns with line breaks and by removing control characters except for line
breaks and tabs. This is so that @jtext can be passed to json.loads() without raising
a JSONDecodeError.
Args:
- jtext (str): The text to alter.
Returns:
str: The return value.
"""
# legalize @jtext for use with json.loads().
for ws in ["\f","\r","\v"]:
jtext = jtext.replace(ws, "\n")
jtext = "".join([char for char in jtext if unicodedata.category(char)[0] != "C" or
char in ("\t", "\n")])
return jtext
def _encode_bad_response(self, response, charset="ascii", max_length=500):
""" In the event CoreNLP returns an erroneous or unexpected @response; this function
can convert that response to a string for logging purposes. The string will be encoded
using @charset as the encoding. The returned value will be truncated if its length
exceeds @max_length.
Args:
response (str): The response from CoreNLP.
charset (str): The character encoding with which to encode @response.
max_length (int): The maximum string length for the return value.
Returns:
str: The return value.
"""
# verify @response is a string; decode @response.
if not isinstance(response, str):
response = str(response)
response = response.encode(charset, errors="ignore").decode(charset, errors="ignore")
# if needed, truncate @response.
if len(response) > max_length:
self.logger.debug("Response exceeds @max_length of '{}'; truncating.".format(
max_length))
response = response[:max_length] + "..."
return response
def __process_NER_requests(def__get_NER):
""" A decorator for @def__get_NER that splits text into chunks if the string passed
to @def__get_NER exceeds @self.chunk_size in length. This is due to size limitations
in terms of how much data should be sent to @def__get_NER. This decorator also makes
one more call to @def__get_NER if @self.retry is True and @def__get_NER returns an
empty list for a given chunk, such as in cases where the NLP server doesn't respond
due to a temporary glitch.
Args:
- def__get_NER (function): An alias intended for self.get_NER().
Returns:
function: The return value.
Raises:
- TypeError: If @text passed to self.get_NER() is not a string.
"""
def processor(self, text):
# prepare output container.
ner_output = []
# verify that @text is a string.
if not isinstance(text, str):
self.logger.error("Argument @text must be a string, got: {}".format(
type(text).__name__))
self.logger.warning("Falling back to empty output.")
return []
# verify @text is not empty.
if len(text) == 0:
self.logger.warning("Argument @text was empty.")
self.logger.warning("Falling back to empty output.")
return []
# if needed, break @text into smaller chunks.
if len(text) <= self.chunk_size:
text_list = [text]
else:
self.logger.info("Text exceeds chunk size of: {}".format(self.chunk_size))
try:
wrapper = TextWrapper(width=self.chunk_size, break_long_words=False,
break_on_hyphens=False, drop_whitespace=False,
replace_whitespace=False)
text_list = wrapper.wrap(text)
except Exception as err:
self.logger.error(err)
self.logger.warning("Failed to chunk text; falling back to empty output.")
return []
# get NER tags for each item in @text_list.
i = 1
total_chunks = len(text_list)
for text_chunk in text_list:
self.logger.info("Getting NER tags for chunk {} of {}.".format(i,
total_chunks))
try:
tokenized_tagged = def__get_NER(self, text_chunk)
if len(tokenized_tagged) == 0 and self.retry:
self.logger.error("Failed to get NER tags for chunk.")
self.logger.info("Making another attempt to get NER tags for chunk.")
tokenized_tagged = def__get_NER(self, text_chunk)
except Exception as err:
self.logger.error(err)
tokenized_tagged = []
# if no tokens were returned, report on giving up.
if len(tokenized_tagged) == 0:
self.logger.warning("Falling back to empty output.")
return []
# otherwise, if tokens were returned, add them to @ner_output.
else:
# check for orphaned whitespace.
leading_space, trailing_space = self._get_outer_space(text_chunk)
# if missing, add leading whitespace to @ner_output.
if leading_space != "":
ner_output += [("", "", leading_space)]
# add tokens to @ner_output.
ner_output += tokenized_tagged
# if missing, add trailing whitespace to @ner_output.
if trailing_space != "":
ner_output += [("", "", trailing_space)]
i += 1
return ner_output
return processor
@__process_NER_requests
def get_NER(self, text):
""" Performs tokenization and NER tagging on @text.
Args:
- text (str): The text to tokenize and tag.
Returns:
list: The return value.
The NER tagger results as a list of tuples.
The first item in the tuple is a token. The second and third items are the NER
tag value and the trailing whitespace, respectively. All items are strings.
"""
# prepare output container.
ner_output = []
# get NER tags.
results = {}
try:
results = self.corenlp.annotate(text)
except ConnectionError as err:
self.logger.error(err)
return []
# ensure @results is correct data type.
if not isinstance(results, dict):
result_type = type(results).__name__
self.logger.warning("CoreNLP wrapper returned '{}', expected dict.".format(
result_type))
self.logger.info("Converting '{}' to dict.".format(result_type))
try:
results = self._legalize_json_text(results)
results = json.loads(results)
except json.decoder.JSONDecodeError as err:
self.logger.error(err)
self.logger.warning("Failed to convert results to dict.")
results_err = self._encode_bad_response(results)
self.logger.debug("CoreNLP response: {}".format(results_err))
return []
# verify @results contains required key.
if "sentences" not in results:
self.logger.warning("CoreNLP response is missing required field 'sentences'.")
results_err = self._encode_bad_response(results)
self.logger.debug("CoreNLP response: {}".format(results_err))
return []
# get values to loop through.
sentences = results["sentences"]
# if @sentences is null, return tuple with @text value as last item.
# Why? It appears CoreNLP will return a null when asked to tag only whitespace.
if sentences == [] and text.strip() == "":
ner_output.append(("", "", text))
return ner_output
# loop through data; append necessary values to @ner_output.
for sentence in sentences:
# verify @sentence contains required key.
if "tokens" not in sentence:
msg = "'tokens' field not found; nothing to append to output."
self.logger.warning(msg)
continue
# get token values.
tokens = sentence["tokens"]
for token in tokens:
# determine key to use for original text.
text_key = "originalText"
if text_key not in token:
text_key = "word"
# get tuple values.
try:
text, tag, tspace = token[text_key], token["ner"], token["after"]
except KeyError as err:
self.logger.error(err)
self.logger.warning("Token data not found; nothing to append to output.")
continue
# remove built-in CoreNLP tags as required.
if tag in self.tags_to_remove:
tag = ""
# if @tag is from Stanford, prepend null pattern ID and authority domain.
if tag in self.stanford_tags:
tag = "::stanford.edu::" + tag
# append final values to @ner_output.
token_group = text, tag, tspace
ner_output.append(token_group)
return ner_output
if __name__ == "__main__":
pass
```
|
{
"source": "jeremygold/hexapod",
"score": 3
}
|
#### File: servo/scripts/Servo.py
```python
from math import *
import os
class Servo:
def __init__(self, number):
self.number = number
self.angle = 0
def set_servo_angle(self, angle):
self.angle = angle
self.write_value_to_hardware()
def get_servo_angle(self):
return self.angle
def write_value_to_hardware(self):
# Compensate for 500us - 2500us representing +/- 90 degrees, and ServoBlaster is in units of 10us
pwm_delay = 150 + (self.angle * 100 / 90)
if os.path.exists("/dev/servoblaster"):
# Send to PWM output
servo_command = "%u=%u\n" % (self.number, pwm_delay)
with open("/dev/servoblaster", "wb") as servo_device:
servo_device.write(servo_command)
```
|
{
"source": "jeremy-goldwasser/rule-vetting",
"score": 3
}
|
#### File: projects/tbi_pecarn/dataset.py
```python
from os.path import join as oj
import numpy as np
import os
import pandas as pd
from typing import Dict
import rulevetting.projects.tbi_pecarn.helper as helper
import rulevetting
import rulevetting.api.util
from rulevetting.templates.dataset import DatasetTemplate
class Dataset(DatasetTemplate):
def clean_data(self, data_path: str = rulevetting.DATA_PATH, **kwargs) -> pd.DataFrame:
'''
Convert the raw data file into a pandas dataframe.
Params
------
data_path: str
Path to all data files
kwargs: dict
Dictionary of hyperparameters specifying judgement calls
Returns
-------
cleaned_data: pandas DataFrames
'''
fname = "TBI PUD 10-08-2013.csv"
path_to_csv = os.path.join(data_path, "tbi_pecarn", "raw", fname)
try:
cleaned_data = pd.read_csv(path_to_csv, encoding="ISO-8859-1")
return cleaned_data
except:
print(f"Error: Raw .csv file is not at {path_to_csv}.")
return -1
def preprocess_data(self, data: pd.DataFrame, **kwargs) -> pd.DataFrame:
'''
Preprocesses the dataset by subsetting columns, removing rows,
imputing missing data, encoding categorical variables as binary column vectors,
and changing the name of the outcome variable.
Params
------
data: pd.DataFrame
kwargs: dict
Dictionary of hyperparameters specifying judgement calls
Returns
-------
preprocessed_data: pd.DataFrame
'''
if len(kwargs)==0:
kwargs = helper.default_judgement_calls_preprocessing()
print(kwargs)
# Recalculate outcome variable
data = helper.recalc_outcome(data)
# Remove patients with missing ciTBI outcomes, GCS below 14,
# or clear signs they need a CT scan
data = helper.subset_rows(data, **kwargs)
# Remove columns that are irrelevant, have too many NAs, or
# give instant positive diagnoses
outcome_name = self.get_outcome_name()
data = helper.subset_cols(data, outcome_name, **kwargs)
# Impute data
data = helper.impute_data(data, **kwargs)
# Encode categorical variables as binary variables
data = helper.binarize_data(data, outcome_name, **kwargs)
# Splits dataset by age
data = helper.split_by_age(data, **kwargs)
# Make new column variables
data = helper.combine_features(data)
# Change outcome name to "outcome"
preprocessed_data = helper.change_outcome_name(data, outcome_name)
return preprocessed_data
def extract_features(self, preprocessed_data: pd.DataFrame, **kwargs) -> pd.DataFrame:
'''
Performs feature selection procedure using logistic regression.
Also adds features necessary for baseline model to run.
Params
------
preprocessed_data: pd.DataFrame
kwargs: dict
Dictionary of hyperparameters specifying judgement calls
Returns
-------
extracted_data: pd.DataFrame
'''
# Contains outcome variable and most informative features
df = helper.derived_feats(preprocessed_data)
features = list(df.columns)
# Adds baseline columns
baseline = self.get_baseline_keys()
for feature in baseline:
if feature in preprocessed_data.columns:
features.append(feature)
extracted_data = preprocessed_data[features]
return extracted_data
def get_outcome_name(self) -> str:
return 'PosIntFinal' # return the name of the outcome we are predicting
def get_dataset_id(self) -> str:
return 'tbi_pecarn' # return the name of the dataset id
def get_meta_keys(self) -> list:
'''
Return list of keys which should not be used in fitting but are still useful for analysis.
'''
return []
def get_baseline_keys(self) -> list:
'''
Returns predictors used in paper's baseline model.
'''
return ['AMS_1',
'AgeTwoPlus_1',
'HemaLoc_2_or_3',
'LocLen_2_3_4',
'High_impact_InjSev_3',
'SFxPalp_1_or_2',
'ActNorm_0',
'Vomit_1',
'SFxBas_1',
'HASeverity_3'
]
def get_judgement_calls_dictionary(self) -> Dict[str, Dict[str, list]]:
return {
'clean_data': {},
'preprocess_data': {
# drop cols with vals missing over this fraction of the time
'frac_missing_allowed': [0.1, 0.01],
# Method for imputing missing data
# KNN works but is slow, so I've commented it out
'imputation': ["median", "none"],#"KNN",
# Whether or not to exclude patients & variables that clearly indicate a CT scan is necessary
# Namely, signs of a basilar or palpable skull fracture, or a bulging anterior fontanelle
'only_mildest_trauma': [False, True],
# "no" = return whole dataset;
# "older" = subset children 2 and older; "younger" = age <2
'split_by_age': ["no", "older", "younger"]
},
'extract_features': {},
}
if __name__ == '__main__':
dset = Dataset()
df_train, df_tune, df_test = dset.get_data(save_csvs=True, run_perturbations=True)
print('successfuly processed data\nshapes:',
df_train.shape, df_tune.shape, df_test.shape,
'\nfeatures:', list(df_train.columns))
```
|
{
"source": "jeremyGoupil/cucumber-jvm",
"score": 3
}
|
#### File: jython/bin/cucumber-jvm.py
```python
import sys, inspect, os
jar_path = os.path.dirname(inspect.getfile(inspect.currentframe())) + "/cucumber-jython-full.jar"
sys.path.append(jar_path)
from java.io import File
from java.net import URLClassLoader
from cucumber.cli import Main
from cucumber.runtime import Runtime
from cucumber.runtime.jython import JythonBackend
url = File(jar_path).toURL()
cl = URLClassLoader([url], Main.getClassLoader())
def createRuntime(resourceLoader, gluePaths, classLoader, dryRun):
# TODO - pass in current jython runtime - PythonInterpreter
jythonBackend = JythonBackend(resourceLoader)
return Runtime(resourceLoader, gluePaths, classLoader, [jythonBackend], dryRun)
Main.run(sys.argv[1:], cl, createRuntime)
```
|
{
"source": "jeremygray/easygui_qt",
"score": 3
}
|
#### File: easygui_qt/demos/launcher.py
```python
import locale
import subprocess
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../"))
import easygui_qt
from PyQt4 import QtCore, QtGui
def launch(name, *args):
"""Executes a script designed specifically for this launcher.
The parameter "name" is the name of the function to be tested
which is passed as an argument to the script.
"""
filename = os.path.join(os.path.dirname(__file__), '_launch_widget.py')
command = ['python', filename, name]
if args:
command.extend(args)
output = subprocess.check_output(command)
try:
output = output.decode(encoding='UTF-8')
except:
try:
output = output.decode(encoding=locale.getdefaultlocale()[1])
except:
print("could not decode")
return output
class Dialog(QtGui.QDialog):
def __init__(self, parent=None):
flags = QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint
super(Dialog, self).__init__(parent, flags=flags)
frameStyle = QtGui.QFrame.Sunken | QtGui.QFrame.Panel
layout = QtGui.QGridLayout()
layout.setColumnStretch(1, 1)
layout.setColumnMinimumWidth(1, 250)
# generate a bunch of function-demo buttons and output labels:
self.button = {}
self.setStyleSheet("""QToolTip {
color: black;
}
QLabel{
background-color: white;
}
QPushButton {
font-weight: bold;
}""")
self.label = {}
fxns = ['get_string', 'get_password', 'get_username_password',
'get_many_strings', 'get_new_password', 'get_int', 'get_float',
'get_choice', 'get_list_of_choices',
'get_yes_or_no', 'get_continue_or_cancel',
'get_color_hex', 'get_color_rgb',
'get_date', 'get_directory_name',
'get_file_names', 'get_save_file_name',
'get_language', 'set_font_size',
'show_file', 'show_code', 'get_abort', 'find_help']
for n, fxn in enumerate(fxns):
self.button[fxn] = QtGui.QPushButton(fxn + "()")
self.button[fxn].clicked.connect(getattr(self, fxn))
self.button[fxn].setToolTip(getattr(easygui_qt, fxn).__doc__)
self.label[fxn] = QtGui.QLabel()
self.label[fxn].setFrameStyle(frameStyle)
layout.addWidget(self.button[fxn], n, 0)
layout.addWidget(self.label[fxn], n, 1)
# handle special-case display items separately:
n += 1
self.python_version_label = QtGui.QLabel()
layout.addWidget(self.python_version_label, n, 0, 2, 2)
output = subprocess.check_output(
['python', '-c', "import sys;print(sys.version)"])
self.python_version_label.setText(
"Python version: {}".format(output.decode()))
n += 2
self.cancel_btn = QtGui.QPushButton("Quit")
self.cancel_btn.clicked.connect(self.close)
layout.addWidget(self.cancel_btn, n, 0)
self.handle_exception_label = QtGui.QLabel()
self.handle_exception_label.setToolTip(
easygui_qt.handle_exception.__doc__)
self.handle_exception_label.setText(" handle_exception() not shown" +
" [hover mouse for more info]")
layout.addWidget(self.handle_exception_label, n, 1)
self._layout = layout
self.setLayout(layout)
self.setWindowTitle("EasyGUI_Qt Widget Launcher")
self.show()
self.raise_()
self.activateWindow()
def get_string(self):
output = launch('get_string')
if sys.version_info < (3,):
output = output.encode(encoding=locale.getdefaultlocale()[1])
self.label['get_string'].setText("{}".format(output))
def get_password(self):
output = launch('get_password')
if sys.version_info < (3,):
output = output.encode(encoding=locale.getdefaultlocale()[1])
self.label['get_password'].setText("{}".format(output))
def get_username_password(self):
output = launch('get_username_password')
if sys.version_info < (3,):
output = output.encode(encoding=locale.getdefaultlocale()[1])
self.label['get_username_password'].setText("{}".format(output))
def get_many_strings(self):
output = launch('get_many_strings')
if sys.version_info < (3,):
output = output.encode(encoding=locale.getdefaultlocale()[1])
self.label['get_many_strings'].setText("{}".format(output))
def get_new_password(self):
output = launch('get_new_password')
if sys.version_info < (3,):
output = output.encode(encoding=locale.getdefaultlocale()[1])
self.label['get_new_password'].setText("{}".format(output))
def get_int(self):
output = launch('get_int')
self.label['get_int'].setText("{}".format(output))
def get_float(self):
output = launch('get_float')
self.label['get_float'].setText("{}".format(output))
def get_choice(self):
output = launch('get_choice')
self.label['get_choice'].setText("{}".format(output))
def get_list_of_choices(self):
output = launch('get_list_of_choices')
self.label['get_list_of_choices'].setText("{}".format(output))
def get_yes_or_no(self):
output = launch('get_yes_or_no')
self.label['get_yes_or_no'].setText("{}".format(output))
def get_continue_or_cancel(self):
output = launch('get_continue_or_cancel')
self.label['get_continue_or_cancel'].setText("{}".format(output))
def get_color_hex(self):
color = launch('get_color_hex')
self.label['get_color_hex'].setText(color)
def get_color_rgb(self):
color = launch('get_color_rgb')
self.label['get_color_rgb'].setText(color)
def get_date(self):
output = launch('get_date')
if sys.version_info < (3,):
output = output.encode(encoding=locale.getdefaultlocale()[1])
self.label['get_date'].setText("{}".format(output))
def get_directory_name(self):
output = launch('get_directory_name')
self.label['get_directory_name'].setText("{}".format(output))
def get_file_names(self):
output = launch('get_file_names')
self.label['get_file_names'].setText("{}".format(output))
self.label['get_file_names'].setWordWrap(True)
self.adjustSize()
def get_save_file_name(self):
output = launch('get_save_file_name')
self.label['get_save_file_name'].setText("{}".format(output))
def get_language(self):
output = launch('get_language')
output = output.split()[0]
self.label['get_language'].setText("{}".format(output))
def set_language(self):
_loc = launch('get_string', "Enter desired language code: 'fr', 'es', etc.")
output = launch('set_language', _loc)
self.set_language_label.setText("{}".format(output))
def set_font_size(self):
font_size = launch('get_int', "Enter desired font-size.", "title",
"12", "10", "20")
output = launch('set_font_size', font_size)
output = output.split()[0]
self.label['set_font_size'].setText("{}".format(output))
def show_file(self):
launch('show_file')
def show_code(self):
launch('show_code', 'Zen of Python', 'import this')
def get_abort(self):
launch('get_abort')
def find_help(self):
launch('find_help')
def main():
_ = QtGui.QApplication([])
dialog = Dialog()
dialog.exec_()
if __name__ == '__main__':
main()
```
|
{
"source": "JeremyGrosser/carrier",
"score": 2
}
|
#### File: carrier/carrier/control.py
```python
from subprocess import Popen
from threading import Thread
import os
class VirtualMachine(object):
STORAGE_PATH = '/mnt/vm'
def __init__(self, config):
self.config = config
self.process = None
def create_disk(self):
p = Popen('/usr/bin/qemu-img create -f qcow2 %s/%s.root %iG' % (
self.STORAGE_PATH, self.config['name'], self.config['disk']), shell=True)
p.wait()
p = Popen('/usr/bin/qemu-img create -f raw %s/%s.swap 1G' % (
self.STORAGE_PATH, self.config['name']), shell=True)
p.wait()
def delete_disk(self):
os.unlink('%s/%s.root' % (self.STORAGE_PATH, self.config['name']))
os.unlink('%s/%s.swap' % (self.STORAGE_PATH, self.config['name']))
def update(self, newconfig):
for key in newconfig:
if not key in ('boot', 'memory', 'mac', 'console', 'nic', 'vnc'):
raise Exception('Cannot modify config option %s, it is read-only' % key)
else:
self.config[key] = newconfig[key]
return self.config
def delete(self):
self.delete_disk()
def start(self):
self.process = Popen(('/usr/bin/kvm -hda %s/%s.root -hdb %s/%s.swap -m %i -vnc :%i -serial mon:telnet:127.0.0.1:%i,server,nowait -net nic,macaddr=%s,model=%s -net tap -boot %s -monitor /dev/null' % (
self.STORAGE_PATH, self.config['name'],
self.STORAGE_PATH, self.config['name'],
self.config['memory'],
self.config['vnc'],
self.config['console'],
self.config['mac'],
self.config['nic'],
self.config['boot'],
)).split(' '))
def stop(self):
#self.process.terminate()
os.kill(self.process.pid, 15)
self.process.wait()
def get_state(self):
if not self.process:
return 'STOPPED'
self.process.poll()
if self.process.returncode == None:
return 'RUNNING'
else:
return 'STOPPED'
def get_config(self):
return self.config
```
|
{
"source": "JeremyGrosser/clusto",
"score": 3
}
|
#### File: clusto/commands/info.py
```python
import argparse
import re
import sys
import clusto
from clusto import script_helper
JSON=False
YAML=False
try:
import yaml
YAML=True
except ImportError:
pass
try:
import simplejson as json
JSON=True
except ImportError:
try:
import json
JSON=True
except:
pass
class Info(script_helper.Script):
'''
This is a script that displays information about a certain object
(or list of objects) and displays it to stdout.
'''
def __init__(self):
script_helper.Script.__init__(self)
def format_line(self, key, value, pad=20):
if value:
if isinstance(value, list):
value = ', '.join(value)
key += ':'
print key.ljust(pad, ' '), value
def print_summary(self, items):
self.debug('Printing with format=summary')
for item in items:
self.format_line('Name', item.pop('name'))
self.format_line('Type', item.pop('type'))
if 'ip' in item.keys():
self.format_line('IP', item.pop('ip'))
self.format_line('Description', item.pop('description'))
if 'parents' in item.keys():
self.format_line('Parents', item.pop('parents'))
if 'contents' in item.keys():
self.format_line('Contents', item.pop('contents'))
print '\n'
keys = sorted(item.keys())
for k in keys:
self.format_line(k.capitalize(), item[k])
print '-' * 80
def print_oneline(self, items):
self.debug('Printing with format=oneline')
for item in items:
line = '%s(%s);' % (item.pop('name'), item.pop('type'))
line += '%s;' % (','.join([ '"%s"' % _ for _ in item.pop('description') ]))
if 'ip' in item.keys():
line += 'ip=%s;' % (','.join([ _ for _ in item.pop('ip') ]))
if 'parents' in item.keys():
line += 'parents=%s;' % (','.join([ _ for _ in item.pop('parents') ]))
if 'contents' in item.keys():
line += 'contents=%s;' % (','.join([ _ for _ in item.pop('contents') ]))
keys = sorted(item.keys())
for k in keys:
line += '%s=%s;' % (k, item[k])
print line
def print_json(self, items):
self.debug('Printing with format=json')
print json.dumps(items, sort_keys=True, indent=2)
def print_yaml(self, items):
self.debug('Printing with format=yaml')
print yaml.safe_dump(items, encoding='utf-8',
explicit_start=True, default_flow_style=False)
def run(self, args):
if not args.items:
print 'You need to provide at least one item. Use --help'
return 0
item_list = []
self.debug('Fetching the list of items: %s' % ','.join(args.items))
for item in args.items:
obj = clusto.get(item)
if not obj:
self.warn("The item %s couldn't be found" % item)
continue
obj = obj[0]
self.debug('Object found! %s' % obj)
item_attrs = {
'name': obj.name,
'type': obj.type,
}
# Fetch system attrs
for attr in obj.attrs(key='system'):
item_attrs[attr.subkey] = attr.value
# fetch description(s)
values = obj.attrs(key='description')
if values:
item_attrs['description'] = [ _.value for _ in values]
else:
item_attrs['description'] = ''
# fetch parent(s)
values = obj.parents()
if values:
item_attrs['parents'] = [ _.name for _ in values ]
# fetch content(s)
values = obj.contents()
if values:
item_attrs['contents'] = [ _.name for _ in values ]
# fetch ip(s)
if 'get_ips' in dir(obj) and callable(getattr(obj, 'get_ips')):
values = obj.get_ips()
if values:
item_attrs['ip'] = [ _ for _ in values ]
# fetch mac(s)
values = [ _ for _ in obj.attrs(key='port-nic-eth') if _.subkey.find('mac') != -1 ]
if values:
for value in values:
item_attrs['mac%d' % value.number] = value.value
item_list.append(item_attrs)
getattr(self, 'print_%s' % args.format)(item_list)
def _add_arguments(self, parser):
choices = ['summary', 'oneline']
if JSON:
choices.append('json')
if YAML:
choices.append('yaml')
parser.add_argument('--format', choices=choices, default='summary',
help='What format to use to display the info, defaults to "summary"')
parser.add_argument('items', nargs='*', metavar='item',
help='List of one or more objects to show info')
def main():
info, args = script_helper.init_arguments(Info)
return(info.run(args))
if __name__ == '__main__':
sys.exit(main())
```
#### File: drivers/resourcemanagers/ipmanagertest.py
```python
import clusto
from clusto.test import testbase
from clusto.drivers import IPManager, BasicServer, ResourceTypeException, ResourceException
import IPy
class IPManagerTest(testbase.ClustoTestBase):
def data(self):
ip1 = IPManager('a1', gateway='192.168.1.1', netmask='255.255.255.0',
baseip='192.168.1.0')
ip2 = IPManager('b1', gateway='10.0.128.1', netmask='255.255.252.0',
baseip='10.0.128.0')
ip3 = IPManager('c1', gateway='172.16.40.0', netmask='255.255.255.0',
baseip='172.16.40.0')
ip4 = IPManager('c2', gateway='172.16.0.0', netmask='255.255.0.0',
baseip='172.16.0.0')
s = BasicServer('s1')
def testBadIPAllocation(self):
ip1, ip2, s1 = map(clusto.get_by_name, ['a1', 'b1', 's1'])
self.assertRaises(ResourceTypeException, ip1.allocate, s1, '10.2.3.4')
def testNewIP(self):
ip1, ip2, s1 = map(clusto.get_by_name, ['a1', 'b1', 's1'])
num = 50
for i in range(num):
ip1.allocate(s1)
self.assertEqual(ip1.count, num)
self.assertEqual(len(ip1.resources(s1)), num)
self.assertEqual(ip1.owners('192.168.1.' + str(num+1)), [s1])
def testGetIPManager(self):
ip1, ip2 = map(clusto.get_by_name, ['a1', 'b1'])
self.assertEqual(ip1, IPManager.get_ip_manager('192.168.1.23'))
self.assertEqual(ip2, IPManager.get_ip_manager('10.0.129.22'))
def testGetIPManagers(self):
ip3, ip4 = map(clusto.get_by_name, ['c1', 'c2'])
self.assertEqual([ip3, ip4], IPManager.get_ip_managers('172.16.40.2'))
self.assertEqual([ip4], IPManager.get_ip_managers('172.16.0.2'))
self.assertEqual([], IPManager.get_ip_managers('192.168.40.1'))
def testGetIP(self):
ip1, ip2, s1 = map(clusto.get_by_name, ['a1', 'b1', 's1'])
ip1.allocate(s1)
ip2.allocate(s1)
self.assertEqual(sorted(IPManager.get_ips(s1)),
sorted(['192.168.1.2', '10.0.128.2']))
def testReserveIP(self):
ip1, ip2, s1 = map(clusto.get_by_name, ['a1', 'b1', 's1'])
ip2.allocate(ip2, '10.0.128.4')
self.assertRaises(ResourceException, ip2.allocate, s1, '10.0.128.4')
def testAdditionalAttrs(self):
ip1, ip2, s1 = map(clusto.get_by_name, ['a1', 'b1', 's1'])
ip1.allocate(s1, '192.168.1.20')
self.assertEqual(str(s1.attr_value(key='ip', subkey='ipstring')), '192.168.1.20')
self.assertEqual(str(s1.attr_value(key='ip', subkey='cidr')), '192.168.1.20/24')
```
|
{
"source": "JeremyGrosser/dewpoint",
"score": 3
}
|
#### File: dewpoint/dewpoint/aws.py
```python
import urllib.parse
import urllib.request
import urllib.error
import hashlib
import hmac
import time
import json
def format_time(ts):
return time.strftime('%Y%m%dT%H%M%SZ', ts)
def format_date(ts):
return time.strftime('%Y%m%d', ts)
def parse_uri(req):
method = req.get_method()
if method == b'POST':
query = req.data
uri = req.selector
else:
s = req.selector
if s.find('?') != -1:
uri, query = s.split('?', 1)
else:
uri = s
query = ''
return method, uri, query
def canonical_headers(req):
keys = []
headers = []
for key, value in sorted(req.headers.items()):
key = key.strip().lower()
value = value.strip()
headers.append('%s:%s' % (key, value))
keys.append(key)
canon = '\n'.join(headers) + '\n\n' + ';'.join(keys)
return canon
def canonical_hash(req):
method, uri, query = parse_uri(req)
query = urllib.parse.parse_qsl(query)
query.sort(key=lambda x: x[0])
query = urllib.parse.urlencode(query)
headers = canonical_headers(req)
if req.data is not None:
payload = req.data
else:
payload = b''
canon = '{method}\n{uri}\n{query}\n{headers}\n'.format(
method=method,
uri=uri,
query=query,
headers=headers)
canon += hashlib.sha256(payload).hexdigest()
return hashlib.sha256(canon.encode('utf8')).hexdigest()
class AWSAuthHandlerV4(urllib.request.BaseHandler):
def __init__(self, key, secret, region, service, timeout=None):
self.key = key
self.secret = secret
self.region = region
self.service = service
self.timeout = timeout
def signing_key(self, scope):
key = b'AWS4' + self.secret.encode('utf8')
for msg in scope.split('/'):
key = hmac.digest(key, msg.encode('utf8'), hashlib.sha256)
return key
def sign(self, req):
canon_hash = canonical_hash(req)
scope = '{date}/{region}/{service}/aws4_request'.format(
date=format_date(req.timestamp),
region=self.region,
service=self.service)
signing_key = self.signing_key(scope)
string_to_sign = '\n'.join([
'AWS4-HMAC-SHA256',
format_time(req.timestamp),
scope,
canon_hash,
])
string_to_sign = string_to_sign.encode('utf8')
signature = hmac.digest(signing_key, string_to_sign, hashlib.sha256)
req.add_header('Authorization', 'AWS4-HMAC-SHA256 Credential={key}/{scope}, SignedHeaders={signed_headers}, Signature={signature}'.format(
key=self.key,
scope=scope,
signed_headers=canonical_headers(req).rsplit('\n', 1)[1],
signature=signature.hex()))
def http_request(self, req):
req.timestamp = time.gmtime(time.time())
if 'Host' not in req.headers:
req.add_header('Host', req.host)
if 'x-amz-date' not in req.headers:
req.add_header('x-amz-date', format_time(req.timestamp))
self.sign(req)
req.timeout = self.timeout
return req
def https_request(self, req):
return self.http_request(req)
class AWSClient:
def __init__(self, auth_handler, endpoint, timeout=None):
self.opener = urllib.request.build_opener(auth_handler)
self.endpoint = endpoint
self.timeout = timeout
def request(self, method, url, data=None, headers=None):
url = self.endpoint + url
if data is not None:
if method != 'POST':
url = '%s?%s' % (url, urllib.parse.urlencode(data))
data = None
if headers is None:
headers = {}
req = urllib.request.Request(url, data, headers, method=method)
try:
resp = self.opener.open(req, timeout=self.timeout)
status = resp.code
headers = resp.headers
response = resp.read()
except urllib.error.HTTPError as e:
status = e.code
headers = e.headers
response = e.fp.read()
return status, headers, response
```
|
{
"source": "JeremyGrosser/gophernews",
"score": 3
}
|
#### File: gophernews/digg/api.py
```python
from exceptions import Exception
from urllib import urlencode
import urllib2
from rfc822 import parsedate
from time import time, mktime
try:
import json
except ImportError:
import simplejson as json
from digg_globals import POST_ACTIONS
class DiggError(Exception):
"""
Exception thrown by the Digg object when there is an
error interacting with digg.com.
"""
pass
class DiggCall(object):
def __init__(self, endpoint, methodname='', user_agent=None, cache=None):
self.endpoint = endpoint
self.methodname = methodname
self.user_agent = user_agent
self.cache = cache
def __getattr__(self, k):
try:
return object.__getattr__(self, k)
except AttributeError:
return DiggCall(self.endpoint, '.'.join((self.methodname, k)).lstrip('.'), cache=self.cache)
def __call__(self, **kwargs):
kwargs['method'] = self.methodname
kwargs['type'] = 'json'
kwargs = urlencode(kwargs)
if self.cache:
if kwargs in self.cache:
expires, response = self.cache[kwargs]
if expires < time():
return response
else:
del self.cache[kwargs]
if self.methodname in POST_ACTIONS:
# HTTP POST
req = urllib2.Request('%s?method=%s' % (self.endpoint, self.methodname), kwargs)
raise NotImplementedError("This method requires OAuth, which hasn't been implemented yet")
else:
# HTTP GET
req = urllib2.Request('%s?%s' % (self.endpoint, kwargs))
if self.user_agent:
req.add_header('User-Agent', self.user_agent)
try:
handle = urllib2.urlopen(req)
response = json.loads(handle.read())
if self.cache != None:
self.update_cache(key=kwargs, value=response, headers=handle.info())
return response
except urllib2.HTTPError, e:
raise DiggError('Digg sent status %i for method: %s\ndetails: %s' % (e.code, self.methodname, e.fp.read()))
def update_cache(self, key, value, headers):
cache = True
expires = int(time()) + 3600
expires = headers.get('Expires', None)
if expires:
expires = mktime(parsedate(expires))
else:
expires = int(time()) + 3600
cache_control = headers.get('Cache-Control', '')
for control in cache_control.split(','):
control = control.strip(' ')
control = control.split('=')
if len(control) == 2:
k, v = control
else:
k = control
v = None
if k in ('private', 'no-cache', 'no-store', 'must-revalidate'):
cache = False
if k in ('max-age', 'min-fresh'):
try:
expires = int(time()) + int(v)
except ValueError:
pass
if cache:
self.cache[key] = (expires, value)
class Digg(DiggCall):
def __init__(self, endpoint='http://services.digg.com/1.0/endpoint', user_agent='python-digg/0.1', cache=None):
DiggCall.__init__(self, endpoint=endpoint, user_agent=user_agent, cache=cache)
__all__ = ['Digg']
```
#### File: JeremyGrosser/gophernews/gopher.py
```python
from SocketServer import TCPServer, ThreadingMixIn, StreamRequestHandler
from digg.api import Digg
from time import time
from urlparse import urlparse
import simplejson as json
import textwrap
import urllib
import re
digg = Digg()
ADDRESS = 'synack.me\t70'
htmlpattern = re.compile('<[^>]+>')
def clean_text(text, indent=0, wrap=72):
delim = '\r\n' + (' ' * indent)
text = text.encode('ascii', 'ignore')
text = htmlpattern.sub('', text)
text = textwrap.wrap(text, wrap)
text = delim.join(text)
return text
def shorturl(url):
res = urllib.urlopen('http://is.gd/api.php?longurl=' + url)
return res.read().strip('\r\n\t ')
class ThreadedTCPServer(ThreadingMixIn, TCPServer):
allow_reuse_address = True
class GopherHandler(StreamRequestHandler):
def __init__(self, request, client_address, server):
StreamRequestHandler.__init__(self, request, client_address, server)
def handle(self):
self.sites = {
'digg': DiggSite(self),
'hackernews': HNSite(self),
}
line = self.rfile.readline().rstrip('\r\n')
log = file('/home/synack/src/news-gopher/access.log', 'a+')
log.write('%s %i %s\n' % (self.client_address[0], int(time()), line))
log.close()
if line == '' or line == '-)':
self.handle_index()
return
site = line.split('/')[0]
if site in self.sites:
self.sites[site].handle(line)
else:
self.handle_notfound(line)
def handle_index(self):
#self.wfile.write(file('motd.txt', 'r').read())
for site in self.sites:
self.wfile.write('1%s\t%s/\t%s\r\n' % (site, site, ADDRESS))
self.wfile.write('.\r\n')
def handle_notfound(self, line):
print 'GopherHandler.handle_notfound', repr(line)
return
class Site(object):
def __init__(self, handler):
self.handler = handler
def write(self, data):
self.handler.wfile.write(data)
def handle(self, line):
pass
def handle_notfound(self, line):
print 'Site.handle_notfound', repr(line)
return
class HNSite(Site):
def __init__(self, *args, **kwargs):
Site.__init__(self, *args, **kwargs)
self.pages = {
'frontpage.list': self.page_frontpage,
'ask.list': self.page_ask,
'new.list': self.page_new,
}
def request(self, path):
res = urllib.urlopen('http://api.ihackernews.com' + path)
return json.loads(res.read())
def handle(self, line):
print 'HNHandler.handle', repr(line)
line = line.split('/', 1)[1]
if line == '':
self.handle_index()
if line.endswith('.story'):
self.handle_story(line)
return
if line in self.pages:
self.pages[line]()
return
def handle_index(self):
self.write(file('/home/synack/src/news-gopher/motd/hn.txt', 'r').read())
for page in ('frontpage', 'new', 'ask'):
self.write('1%s\thackernews/%s.list\t%s\r\n' % (page, page, ADDRESS))
self.write('.\r\n')
def page_frontpage(self):
self.page('/page')
def page_ask(self):
self.page('/ask')
def page_new(self):
self.page('/new')
def page(self, endpoint):
try:
data = self.request(endpoint)
except:
self.write('Error reading from api.ihackernews.com\r\n\r\n.\r\n')
return
for story in data['items']:
self.write('0%s\thackernews/%s.story\t%s\r\ni%s\r\ni%i points - %i comments\r\n\r\n' % (
story['title'].encode('ascii', 'replace'),
story['id'],
ADDRESS,
urlparse(story['url']).netloc,
story['points'],
story['commentCount']))
self.write('.\r\n')
def handle_story(self, line):
id = line.rsplit('.', 1)[0]
try:
post = self.request('/post/' + id)
except:
self.write('Error from api.ihackernews.com\r\n\r\n.\r\n')
return
self.write('%s\r\n%s -- %s\r\n%i points by %s %s\r\n%i comments\r\n\r\n%s\r\n' % (
post['title'].encode('ascii', 'replace'),
urlparse(post['url']).netloc,
shorturl(post['url']),
post['points'],
post['postedBy'],
post['postedAgo'],
post['commentCount'],
clean_text(post.get('text', ''))))
self.write('\r\nComments\r\n' + ('-' * 72) + '\r\n')
for comment in post['comments']:
self.write_comment(comment)
def write_comment(self, comment, indent=0):
space = ' ' * indent
wrap = 72 - (indent * 4)
self.write('%s%i points by %s %s\r\n%s%s\r\n' % (
space,
comment['points'],
comment['postedBy'],
comment['postedAgo'],
space, clean_text(comment['comment'], indent, wrap)))
self.write(space + ('-' * wrap) + '\r\n')
for comment in comment.get('children', []):
self.write_comment(comment, indent + 1)
class DiggSite(Site):
def handle(self, line=None):
print 'DiggHandler.handle', repr(line)
line = line.split('/', 1)[1]
if line == '':
self.handle_index()
if line.endswith('.list'):
self.handle_storylist(line)
return
if line.endswith('.story'):
self.handle_story(line)
return
self.handle_notfound(line)
def handle_index(self):
self.write(file('/home/synack/src/news-gopher/motd/digg.txt', 'r').read())
topics = digg.topic.getAll()['topics']
for topic in topics:
self.write('1%s\tdigg/%s.list\t%s\r\n' % (topic['name'], topic['short_name'], ADDRESS))
self.write('.\r\n')
def handle_storylist(self, topic):
topic = topic.rsplit('.')[0]
for story in digg.story.getPopular(topic=topic)['stories']:
self.write('0%s\tdigg/%s/%s.story\t%s\r\ni%i diggs - %i comments\r\ni%s\r\n\r\n' % (
story['title'].encode('ascii', 'replace'),
story['topic']['short_name'],
story['id'],
ADDRESS,
story['diggs'],
story['comments'],
story['description'].encode('ascii', 'replace')))
self.write('.\r\n')
def handle_story(self, story):
id = story.rsplit('/', 1)[1].rsplit('.', 1)[0]
story = digg.story.getInfo(story_id=id)['stories'][0]
comments = digg.story.getComments(story_id=id)['comments']
self.write('%s\r\n%s\r\n%i diggs - %i comments\r\n\r\n%s\r\n\r\n' % (
story['title'].encode('ascii', 'replace'),
story['link'],
story['diggs'],
story['comments'],
story['description'].encode('ascii', 'replace')))
self.write('Comments\r\n' + ('-' * 72) + '\r\n')
for comment in comments:
self.write('%i diggs, %i buries\r\n%s\r\n' % (
comment['up'],
comment['down'],
comment['content'].encode('ascii', 'replace')))
self.write(('-' * 72) + '\r\n')
self.write('.\r\n')
if __name__ == '__main__':
server = ThreadedTCPServer(('0.0.0.0', 70), GopherHandler)
server.serve_forever()
```
|
{
"source": "JeremyGrosser/pagercal",
"score": 3
}
|
#### File: JeremyGrosser/pagercal/server.py
```python
import urllib2
import hashlib
import json
import time
import dateutil.parser
import bottle
from config import *
def request(path):
url = APIBASE + path
req = urllib2.Request(url, headers={
'Authorization': 'Basic %s' % ('%s:%s' % (USERNAME, PASSWORD)).encode('base64'),
})
try:
resp = urllib2.urlopen(req)
return json.loads(resp.read())
except urllib2.HTTPError, e:
print str(e)
return json.loads(e.read())
except Exception, e:
print str(e)
return None
def unixtime_to_iso8601(ts):
return time.strftime('%Y-%m-%dT%H:%MZ', time.gmtime(ts))
def iso8601_to_unixtime(ts):
return dateutil.parser.parse(ts).timetuple()
def unixtime_to_vcal(ts):
return time.strftime('%Y%m%d', ts)
def shorten_line(line, maxwidth):
lines = []
while line:
lines.append(line[:75])
line = line[75:]
return '\r\n '.join(lines)
@bottle.get('/calendar/:schedule_name.ics')
def get_schedule(schedule_name):
schedule_id = SCHEDULES[schedule_name]
now = time.time()
since = now - (86400 * 3) # One week ago
until = now + (86400 * 90) # Three months from now
resp = request('/schedules/%s/entries?since=%s&until=%s' % (
schedule_id,
unixtime_to_iso8601(since),
unixtime_to_iso8601(until),
))
cal = [
'BEGIN:VCALENDAR',
'VERSION:2.0',
'PRODID:-//pagercal//PagerDuty Calendar//EN',
]
for entry in resp['entries']:
eventid = hashlib.new('sha1', json.dumps(entry)).hexdigest()
start_time = iso8601_to_unixtime(entry['start'])
end_time = iso8601_to_unixtime(entry['end'])
cal += [
'BEGIN:VEVENT',
'UID:%s@%s.pagercal' % (eventid, schedule_id),
'DTSTAMP:%s' % (unixtime_to_vcal(start_time)),
'DTSTART:%s' % (unixtime_to_vcal(start_time)),
'DTEND:%s' % (unixtime_to_vcal(end_time)),
'SUMMARY:%s' % entry['user']['name'],
'CONTACT:%s' % entry['user']['email'],
'END:VEVENT',
]
cal.append('END:VCALENDAR')
result = []
for line in cal:
if len(line) > 75:
line = shorten_line(line, 75)
result.append(line)
bottle.response.content_type = 'text/calendar'
return '\r\n'.join(result) + '\r\n'
if __name__ == '__main__':
bottle.debug(True)
bottle.run(port=LISTEN_PORT)
```
|
{
"source": "jeremyh/agdc",
"score": 2
}
|
#### File: contrib/agdc_workshop_exercises/pqa-finished.py
```python
import os
import sys
import logging
import re
import numpy
from datetime import datetime, time
from osgeo import gdal
from agdc.stacker import Stacker
from EOtools.utils import log_multiline
# Set top level standard output
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
logger = logging.getLogger(__name__)
if not logger.level:
logger.setLevel(logging.DEBUG) # Default logging level for all modules
logger.addHandler(console_handler)
# Creates derived datasets by getting masking all NBAR datasets in a stack with PQA data
class PQAStacker(Stacker):
""" Subclass of Stacker
Used to implement specific functionality to create stacks of derived datasets.
"""
def derive_datasets(self, input_dataset_dict, stack_output_info, tile_type_info):
assert type(input_dataset_dict) == dict, 'input_dataset_dict must be a dict'
log_multiline(logger.debug, input_dataset_dict, 'input_dataset_dict', '\t')
# Figure out our input/output files
nbar_dataset_path = input_dataset_dict['NBAR']['tile_pathname']
nbar_dataset = gdal.Open(nbar_dataset_path)
assert nbar_dataset, 'Unable to open dataset %s' % nbar_dataset
total_bands = nbar_dataset.RasterCount
logger.debug('Opened NBAR dataset %s', nbar_dataset_path)
# Get the pixel mask as a single numpy array
# Be mindful of memory usage, should be fine in this instance
pqa_mask = self.get_pqa_mask(input_dataset_dict['PQA']['tile_pathname'])
# Instead of receiving one entry, this will have a number of entries = to the number of bands
output_dataset_dict = {}
# Instead of creating 1 file with many bands
# Let's create many files with a single band
for index in range(1, total_bands + 1):
output_tile_path = os.path.join(self.output_dir, re.sub('\.\w+$',
'_pqa_masked_band_%s%s' % (index, tile_type_info['file_extension']),
os.path.basename(nbar_dataset_path)))
output_stack_path = os.path.join(self.output_dir, 'pqa_masked_band_%s.vrt' % (index))
# Copy metadata for eventual inclusion in stack file output
# This could also be written to the output tile if required
output_dataset_info = dict(input_dataset_dict['NBAR'])
output_dataset_info['tile_pathname'] = output_tile_path # This is the most important modification - used to find
output_dataset_info['band_name'] = 'NBAR band %s with PQA mask applied' % (index)
output_dataset_info['band_tag'] = 'NBAR-PQA-%s' % (index)
output_dataset_info['tile_layer'] = 1
# Create a new geotiff for the masked output
gdal_driver = gdal.GetDriverByName(tile_type_info['file_format'])
output_dataset = gdal_driver.Create(output_tile_path,
nbar_dataset.RasterXSize, nbar_dataset.RasterYSize,
1, nbar_dataset.GetRasterBand(index).DataType,
tile_type_info['format_options'].split(','))
assert output_dataset, 'Unable to open output dataset %s' % output_dataset
output_dataset.SetGeoTransform(nbar_dataset.GetGeoTransform())
output_dataset.SetProjection(nbar_dataset.GetProjection())
# Mask our band (each band is a numpy array of values)
input_band = nbar_dataset.GetRasterBand(index)
input_band_data = input_band.ReadAsArray()
# Apply the mask in place on input_band_data
no_data_value = -32767
self.apply_pqa_mask(input_band_data, pqa_mask, no_data_value)
# Write the data as a new band
output_band = output_dataset.GetRasterBand(1)
output_band.WriteArray(input_band_data)
output_band.SetNoDataValue(no_data_value)
output_band.FlushCache()
# This is not strictly necessary - copy metadata to output dataset
output_dataset_metadata = nbar_dataset.GetMetadata()
if output_dataset_metadata:
output_dataset.SetMetadata(output_dataset_metadata)
output_dataset.FlushCache()
logger.info('Finished writing %s', output_tile_path)
output_dataset_dict[output_stack_path] = output_dataset_info
log_multiline(logger.debug, output_dataset_dict, 'output_dataset_dict', '\t')
return output_dataset_dict
# This is the main function when this script is directly executed - You can mostly
# ignore it's contents. The bulk of the "interesting work" is in the above class
if __name__ == '__main__':
def date2datetime(input_date, time_offset=time.min):
if not input_date:
return None
return datetime.combine(input_date, time_offset)
# Stacker class takes care of command line parameters
stacker = PQAStacker()
if stacker.debug:
console_handler.setLevel(logging.DEBUG)
# Check for required command line parameters
assert (stacker.x_index and stacker.y_index), 'You must specify Tile X/Y-index (-x/-y or --x_index/--y_index)'
assert stacker.output_dir, 'Output directory not specified (-o or --output)'
stack_info_dict = stacker.stack_derived(x_index=stacker.x_index,
y_index=stacker.y_index,
stack_output_dir=stacker.output_dir,
start_datetime=date2datetime(stacker.start_date, time.min),
end_datetime=date2datetime(stacker.end_date, time.max),
satellite=stacker.satellite,
sensor=stacker.sensor)
log_multiline(logger.debug, stack_info_dict, 'stack_info_dict', '\t')
logger.info('Finished creating %d temporal stack files in %s.', len(stack_info_dict), stacker.output_dir)
```
|
{
"source": "jeremyhahn/altcoin-autosell",
"score": 3
}
|
#### File: jeremyhahn/altcoin-autosell/coinex_api.py
```python
import exchange_api
import hashlib
import hmac
import json
import urllib2
class CoinEx(exchange_api.Exchange):
name = 'CoinEx'
def __init__(self, api_key, api_secret):
self.api_url = 'https://coinex.pw/api/v2/'
self.api_headers = {'Content-type' : 'application/json',
'Accept' : 'application/json',
'User-Agent' : 'autocoin-autosell'}
self.api_key = api_key
self.api_secret = api_secret
def GetName(self):
return 'CoinEx'
def _Request(self, method, headers=None, post_data=None):
if headers is None:
headers = {}
headers.update(self.api_headers.items())
try:
request = urllib2.Request(self.api_url + method, post_data, headers)
response = urllib2.urlopen(request)
try:
response_json = json.loads(response.read())
if not method in response_json:
raise exchange_api.ExchangeException('Root not in %s.' % method)
return response_json[method]
finally:
response.close()
except (urllib2.URLError, urllib2.HTTPError, ValueError) as e:
raise exchange_api.ExchangeException(e)
def _PrivateRequest(self, method, post_data=None):
hmac_data = '' if not post_data else post_data
digest = hmac.new(self.api_secret, hmac_data, hashlib.sha512).hexdigest()
headers = {'API-Key' : self.api_key,
'API-Sign': digest}
return self._Request(method, headers, post_data)
def GetCurrencies(self):
currencies = {}
try:
for currency in self._Request('currencies'):
currencies[currency['name']] = currency['id']
except (TypeError, KeyError) as e:
raise exchange_api.ExchangeException(e)
return currencies
def GetBalances(self):
balances = {}
try:
for balance in self._PrivateRequest('balances'):
balances[balance['currency_id']] = float(balance['amount']) / pow(10, 8)
except (TypeError, KeyError) as e:
raise exchange_api.ExchangeException(e)
return balances
def GetMarkets(self):
try:
return [exchange_api.Market(trade_pair['currency_id'], trade_pair['market_id'],
trade_pair['id']) for
trade_pair in self._Request('trade_pairs')]
except (TypeError, KeyError) as e:
raise exchange_api.ExchangeException(e)
return markets
def CreateOrder(self, market_id, amount, bid=True, price=0):
order = {'trade_pair_id' : market_id,
'amount' : int(amount * pow(10, 8)),
'bid' : bid,
'rate' : max(1, int(price * pow(10, 8)))}
post_data = json.dumps({'order' : order})
try:
return self._PrivateRequest('orders', post_data)[0]['id']
except (TypeError, KeyError, IndexError) as e:
raise exchange_api.ExchangeException(e)
```
|
{
"source": "jeremyhamm/garden-zero",
"score": 3
}
|
#### File: garden-zero/sensors/equipment.py
```python
import sys
import os
import RPi.GPIO as GPIO
import dht11
import time
import datetime
from dotenv import load_dotenv
#sys.path.append(os.path.abspath('./services'))
from gardenzero.services.mqtt import get_connection
# initialize GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.cleanup()
# read data using pin 21
instance = dht11.DHT11(pin = 21)
result = instance.read()
client = get_connection()
def formatTemperature(temp):
return str(round((temp * 1.8) + 32, 1)) + '°F'
def formatHumidity(humidity):
return str(round(humidity, 1)) + '%';
while True:
result = instance.read()
if result.is_valid():
temp = formatTemperature(result.temperature)
humidity = formatHumidity(result.humidity)
interval = int(os.environ.get("INTERVAL_SECONDS"))
client.publish("garden/equipment/temperature", temp)
client.publish("garden/equipment/humidity", humidity)
time.sleep(interval)
```
|
{
"source": "jeremyhamm/pi-weather-dashboard",
"score": 3
}
|
#### File: pi-weather-dashboard/weather-station/rainfall.py
```python
from gpiozero import Button
import time
rain_sensor = Button(6)
bucket_size = 0.2794
count = 0
def bucket_tipped():
global count
count +=1
print(count * bucket_size)
def reset_rainfall():
global count
count = 0
rain_sensor.when_pressed = bucket_tipped
while True:
count = 0
time.sleep(5)
```
|
{
"source": "jeremyhardy/AirSim",
"score": 3
}
|
#### File: PythonClient/astrodrone/utils.py
```python
import airsim
def qnorm(quaternion):
while(quaternion.get_length()!=1):
quaternion = quaternion.sgn()
return quaternion
def quat2vec(quaternion):
vector = airsim.Vector3r(quaternion.x_val, quaternion.y_val, quaternion.z_val)
return vector
def vec2quat(vector):
quaternion = airsim.Quaternionr(vector.x_val, vector.y_val, vector.z_val, 0)
return quaternion
def mess2quat(message):
quaternion = airsim.Quaternionr(message.x_val, message.y_val, message.z_val, message.w_val)
return quaternion
```
|
{
"source": "Jeremy-Harper/Synthetic-Data-Replica-for-Healthcare",
"score": 4
}
|
#### File: Synthetic-Data-Replica-for-Healthcare/tutorial/deidentify.py
```python
import random
import time
import string
import pandas as pd
import numpy as np
import filepaths
def main():
print('running de-identification steps...')
start = time.time()
# "_df" is the usual way people refer to a Pandas DataFrame object
OMOP_data_df = pd.read_csv(filepaths.OMOP_data)
print('removing source...')
OMOP_data_df = remove_source(OMOP_data_df)
#print('replacing Hospital with random number...')
#OMOP_data_df = replace_hospital_with_random_number(OMOP_data_df)
print('removing non-male-or-female from gender ...')
OMOP_data_df = remove_non_male_or_female(OMOP_data_df)
print('putting ages in age brackets...')
OMOP_data_df = add_age_brackets(OMOP_data_df)
OMOP_data_df.to_csv(filepaths.omop_data_deidentify, index=False)
elapsed = round(time.time() - start, 2)
print('done in ' + str(elapsed) + ' seconds.')
def remove_source(OMOP_data_df: pd.DataFrame) -> pd.DataFrame:
"""Drops the Source column from the dataset, we don't need it and it gives you a framework to delete columns
Keyword arguments:
OMOP_data_df --
"""
OMOP_data_df = OMOP_data_df.drop('Source', 1)
return OMOP_data_df
def replace_hospital_with_random_number(
OMOP_data_df: pd.DataFrame) -> pd.DataFrame:
"""
Gives each hospital a random integer number and adds a new column
with these numbers. Drops the hospital name column.
Keyword arguments:
OMOP_data_df -- Hopsitals A&E records dataframe
"""
hospitals = OMOP_data_df['Hospital'].unique().tolist()
random.shuffle(hospitals)
hospitals_map = {
hospital : ''.join(random.choices(string.digits, k=6))
for hospital in hospitals
}
OMOP_data_df['Hospital ID'] = OMOP_data_df['Hospital'].map(hospitals_map)
OMOP_data_df = OMOP_data_df.drop('Hospital', 1)
return OMOP_data_df
def remove_non_male_or_female(OMOP_data_df: pd.DataFrame) -> pd.DataFrame:
"""
Removes any record which has a non-male-or-female entry for gender.
Keyword arguments:
OMOP_data_df -- Hopsitals A&E records dataframe
"""
OMOP_data_df = OMOP_data_df[OMOP_data_df['Gender'].isin(['Male', 'Female'])]
return OMOP_data_df
def add_age_brackets(OMOP_data_df: pd.DataFrame) -> pd.DataFrame:
"""
Put the ages in to age brackets
Keyword arguments:
OMOP_data_df -- Hopsitals A&E records dataframe
"""
OMOP_data_df['Age bracket'] = pd.cut(
OMOP_data_df['Age'],
bins=[0, 18, 25, 45, 65, 85, 150],
labels=['0-17', '18-24', '25-44', '45-64', '65-84', '85-'],
include_lowest=True
)
OMOP_data_df = OMOP_data_df.drop('Age', 1)
return OMOP_data_df
if __name__ == "__main__":
main()
```
#### File: Synthetic-Data-Replica-for-Healthcare/tutorial/synthesize.py
```python
import random
import os
import time
import argparse
import pandas as pd
import numpy as np
import filepaths
from DataDescriber import DataDescriber
from DataGenerator import DataGenerator
from ModelInspector import ModelInspector
from lib.utils import read_json_file
attribute_to_datatype = {
'person_id': 'Integer',
'start_date': 'String',
'C19_identification_visit_id': 'String',
'Gender': 'String',
'race_source_value': 'String',
'ethnicity_source_value': 'String',
'Age bracket': 'String'
}
attribute_is_categorical = {
'person_id': False,
'start_date': False,
'C19_identification_visit_id': False,
'Gender': True,
'race_source_value': True,
'ethnicity_source_value': True,
'Age bracket': True
}
#args: str
def main():
start = time.time()
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", default='./data_output', type=str, help="*NOT IMPLEMENTED* Where you want to have the comparison images and the datafiles saved.")
parser.add_argument("--input_file_name", default='COVID19Patients_deidentify', type=str, help="Batch size per GPU/CPU for training.")
parser.add_argument("--bayesian_network_degree", default=1, type=int, help="The degrees of bayesian relationships being looked at.")
parser.add_argument("--epsilon_count", default=10, type=int, help="The amount of fuzziness injected to further obfuscate patient information")
parser.add_argument("--mode", default='correlation', type=str, help="The three output options, 'random', 'independent', 'correlated'")
parser.add_argument("--row_count", default=0, type=int, help="The default return value uses the same number of rows as you put in. This is overridden when you turn on the percentage!!")
parser.add_argument("--activate_percentage", default='no', type=str, help="To use percentage mark this as 'Yes' otherwise percentage is ignored")
parser.add_argument("--row_percentage", default=10, type=int, help="The default percentage of return is +-10% we use a random number generator to identify what direction we will go, you will have to edit code to make it work for your use case if that is unacceptable")
args = parser.parse_args()
# "_df" is the Pandas DataFrame we have to bring it in first so we can default the row count
omop_data_deidentify = os.path.join(filepaths.data_dir, args.input_file_name+'.csv')
omop_df = pd.read_csv(omop_data_deidentify)
# Specify the number of rows to return to the engine
if args.row_count == 0:
num_rows = len(omop_df)
else:
num_rows = args.row_count
if args.activate_percentage == 'Yes':
#this chooses a random number between the top and bottom percentage
random_number_between_percentage = random.randint(round(len(omop_df)*(1-(args.row_percentage/100))),round(len(omop_df)*(1+(args.row_percentage/100))))
num_rows = round(random_number_between_percentage)
# iterate through the mode variable to generate synthetic data, note this also assigns the mode variable
for mode in [args.mode]:
#declaring the output data
jsonfile = os.path.join(filepaths.data_dir, args.input_file_name+'_description_'+mode+'.json')
outputfile = os.path.join(filepaths.data_dir, args.input_file_name+'_data_synthetic_'+mode+'.csv')
print('describing synthetic data for', mode, 'mode...')
describe_synthetic_data(args, mode, jsonfile)
print('generating synthetic data for', mode, 'mode...')
generate_synthetic_data(
mode,
num_rows,
jsonfile,
outputfile
)
print('comparing histograms for', mode, 'mode...')
compare_histograms(
mode,
omop_df,
jsonfile,
outputfile
)
print('comparing pairwise mutual information for', mode, outputfile, 'mode...')
compare_pairwise_mutual_information(
mode,
omop_df,
jsonfile,
outputfile
)
elapsed = round(time.time() - start, 0)
print('done in ' + str(elapsed) + ' seconds.')
def describe_synthetic_data(args, mode: str, description_filepath:str):
'''
Describes the synthetic data and saves it to the data/ directory.
Keyword arguments:
mode -- what type of synthetic data
category_threshold -- limit at which categories are considered blah
description_filepath -- filepath to the data description
'''
describer = DataDescriber()
print('second synthetic data for', mode, 'epsilon_count', args.epsilon_count, 'mode...')
if mode == 'random':
describer.describe_dataset_in_random_mode(
filepaths.omop_data_deidentify,
attribute_to_datatype=attribute_to_datatype,
attribute_to_is_categorical=attribute_is_categorical)
elif mode == 'independent':
describer.describe_dataset_in_independent_attribute_mode(
filepaths.omop_data_deidentify,
attribute_to_datatype=attribute_to_datatype,
attribute_to_is_categorical=attribute_is_categorical)
elif mode == 'correlated':
# Increase epsilon value to reduce the injected noises.
# We're using differential privacy in this tutorial,
# set epsilon=0 to turn off differential privacy
epsilon = args.epsilon_count
# The maximum number of parents in Bayesian network
# i.e., the maximum number of incoming edges.
degree_of_bayesian_network = args.bayesian_network_degree
describer.describe_dataset_in_correlated_attribute_mode(
dataset_file=filepaths.omop_data_deidentify,
epsilon=epsilon,
k=degree_of_bayesian_network,
attribute_to_datatype=attribute_to_datatype,
attribute_to_is_categorical=attribute_is_categorical)
# attribute_to_is_candidate_key=attribute_to_is_candidate_key)
describer.save_dataset_description_to_file(description_filepath)
def generate_synthetic_data(
mode: str,
num_rows: int,
description_filepath: str,
synthetic_data_filepath: str
):
'''
Generates the synthetic data and saves it to the data/ directory.
Keyword arguments:
mode -- what type of synthetic data
num_rows -- number of rows in the synthetic dataset
description_filepath -- filepath to the data description
synthetic_data_filepath -- filepath to where synthetic data written
'''
generator = DataGenerator()
if mode == 'random':
generator.generate_dataset_in_random_mode(num_rows, description_filepath)
elif mode == 'independent':
generator.generate_dataset_in_independent_mode(num_rows, description_filepath)
elif mode == 'correlated':
generator.generate_dataset_in_correlated_attribute_mode(num_rows, description_filepath)
generator.save_synthetic_data(synthetic_data_filepath)
def compare_histograms(
mode: str,
omop_df: pd.DataFrame,
description_filepath: str,
synthetic_data_filepath: str
):
'''
Makes comparison plots showing the histograms for each column in the
synthetic data.
Keyword arguments:
mode -- what type of synthetic data
omop_df -- DataFrame of the original dataset
description_filepath -- filepath to the data description
synthetic_data_filepath -- filepath to where synthetic data written
'''
synthetic_df = pd.read_csv(synthetic_data_filepath)
# Read attribute description from the dataset description file.
attribute_description = read_json_file(
description_filepath)['attribute_description']
inspector = ModelInspector(
omop_df, synthetic_df, attribute_description)
for attribute in synthetic_df.columns:
figure_filepath = os.path.join(
filepaths.plots_dir,
mode + '_' + attribute + '.png'
)
# need to replace whitespace in filepath for Markdown reference
figure_filepath = figure_filepath.replace(' ', '_')
inspector.compare_histograms(attribute, figure_filepath)
def compare_pairwise_mutual_information(
mode: str,
omop_df: pd.DataFrame,
description_filepath: str,
synthetic_data_filepath: str
):
'''
Looks at correlation of attributes by producing heatmap
'''
synthetic_df = pd.read_csv(synthetic_data_filepath)
attribute_description = read_json_file(
description_filepath)['attribute_description']
inspector = ModelInspector(
omop_df, synthetic_df, attribute_description)
figure_filepath = os.path.join(
filepaths.plots_dir,
'mutual_information_heatmap_' + mode + '.png'
)
inspector.mutual_information_heatmap(figure_filepath)
if __name__ == "__main__":
main()
```
|
{
"source": "JeremyHash/LuatPyComUtils",
"score": 3
}
|
#### File: LuatPyComUtils/src/aliyun_test_reset.py
```python
import os
import platform
import re
import traceback
import serial
from utils import Logger
import sys
import hmac
import time
# 获取当前系统平台
system_cate = platform.system()
print('当前操作系统为:' + system_cate)
# 显示当前所有端口(在Linux下使用python要指明python3)
if system_cate == 'Linux':
ports = os.popen('python3 -m serial.tools.list_ports').read()
print(ports)
# macOS系统平台为Darwin
elif system_cate == 'Darwin':
ports = os.popen('python3 -m serial.tools.list_ports').read()
print(ports)
else:
ports = os.popen('python -m serial.tools.list_ports').read()
print(ports)
port = 'COM' + input('请指定设备端口号(只需要输入COM后数字):')
product_key = ""
device_name = ""
device_secret = ""
# 阿里云测试类
class aliyun_test:
# 获取串口操作对象方法
def serialFactory(self, port, baud_rate):
return serial.Serial(port=port, baudrate=baud_rate)
# 构造方法
def __init__(self, port, baud_rate):
# 获取log对象
self.log = Logger.Logger('./log/aliyun_test_log.txt', level='debug')
# 定义ATList
self.ATList = []
# 定义在httppost中发送的内容
self.post_info = ''
# 定义测试端口
self.port = port
# 定义波特率
self.baud_rate = baud_rate
# 获取串口操作对象
self.ser = self.serialFactory(port, baud_rate)
# 定义重连次数
self.frequency = 0
# 定义DateList
self.DateList = ""
self.MusbList = ""
# 加载ATListFile文件方法
def load_atList(self, ATListFile):
with open("./atListFiles/" + ATListFile, encoding="utf8") as file:
print()
print("【正在加载的ATListFileName:】" + ATListFile)
print()
lines = file.readlines()
tmp_count = 0
for line in lines:
if not line.startswith("#"):
if not line.isspace():
cmd_contents = line.replace("\n", "").split("====")
print("ATCmd:" + cmd_contents[0])
self.ATList.append(cmd_contents)
tmp_count += 1
print()
print("【成功加载---" + ATListFile + "---ATCmd" + str(tmp_count) + "条】")
print()
# 阿里云测试初始化方法
def aliyun_init(self):
if self.ser.is_open:
if len(self.ATList) == 0:
print("ATList为空")
sys.exit(0)
for ATCmd in self.ATList:
self.ser.timeout = float(ATCmd[2])
tmp1 = (ATCmd[0] + "\r\n").encode("GB2312")
self.ser.write(tmp1)
self.log.logger.debug(f"发→◇ {ATCmd[0]}")
res = self.ser.read(1000)
tmp2 = res.decode(encoding="GB2312")
self.log.logger.debug(f"收←◆ {tmp2}")
else:
print(self.ser.port + "端口打开失败")
# 获取mqtt登录信息方法
def get_mqtt_login_info(self):
self.ser.timeout = 2
cmd = f'AT+HTTPDATA={len(self.post_info)},20000\r\n'.encode('GB2312')
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
cmd = self.post_info.encode('GB2312')
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
cmd = b'AT+HTTPACTION=1\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
cmd = b'AT+HTTPREAD\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
res = self.ser.read(200).decode(encoding='GB2312')
self.log.logger.debug(f"收←◆ {res}")
pattern1 = re.compile(r'"iotId":"\w+"')
iotId = pattern1.findall(res)[0]
self.iotId = iotId.replace('"iotId":', '').replace('"', '')
self.log.logger.debug(f'iotId:{self.iotId}')
pattern2 = re.compile(r'"iotToken":"[\^\d\w]+"')
iotToken = pattern2.findall(res)[0]
self.iotToken = iotToken.replace('"iotToken":', '').replace('"', '')
self.log.logger.debug(f'iotToken:{self.iotToken}')
cmd = b'AT+HTTPTERM\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
# 连接阿里云MQTT测试方法
def connect_mqtt_test(self):
self.ser.timeout = 1
cmd = b'1234567890123456789012345678901234567890123456789012345678901234567890'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
cmd = f'AT+MCONFIG="{device_name}","{self.iotId}","{self.iotToken}"\r\n'.encode('GB2312')
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
self.ser.timeout = 5
cmd = ('AT+SSLMIPSTART="' + product_key + '.iot-as-mqtt.cn-shanghai.aliyuncs.com",1883\r\n').encode()
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
cmd = b'AT+MCONNECT=1,300\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
self.DateList = self.ser.read(200).decode(encoding='GB2312').split("\r\n")
self.log.logger.debug(self.DateList[4])
if self.DateList[4] == "CONNECT OK":
self.log.logger.debug("阿里云MQTT连接成功")
else:
self.log.logger.debug("阿里云MQTT连接失败")
self.ser.timeout = 1
cmd = ('AT+MSUB="/' + product_key + '/' + device_name + '/user/Jeremy",0\r\n').encode()
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
self.MusbList = self.ser.read(200).decode(encoding='GB2312').split("\r\n")
self.log.logger.debug(self.MusbList[4])
if self.MusbList[4] == "SUBACK":
self.log.logger.debug("订阅成功")
else:
self.log.logger.debug("订阅失败,重新订阅")
cmd = ('AT+MSUB="/' + product_key + '/' + device_name + '/user/Jeremy",0\r\n').encode()
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
self.MusbList = self.ser.read(200).decode(encoding='GB2312').split("\r\n")
self.log.logger.debug(self.MusbList[4])
if self.MusbList[4] == "SUBACK":
self.log.logger.debug("订阅成功")
else:
self.log.logger.debug("订阅失败,程序退出")
cmd = b'AT+MQTTMSGSET=0\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
startime = int(time.time())
while True:
try:
cmd = ('AT+MPUB="/' + product_key + '/' + device_name + '/user/Jeremy",0,0,"test0"\r\n').encode()
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
endtime = int(time.time())
sub_time = endtime - startime
self.log.logger.debug("sub_time = " + str(sub_time))
if sub_time > 300:
self.frequency += 1
self.log.logger.debug("开始进行第" + str(self.frequency) + "次重启")
self.ser.write(b'AT+MIPCLOSE\r\n')
self.ser.write(b'AT+MDISCONNECT\r\n')
self.ser.write(b'AT+RESET\r\n')
self.ser.close()
for waitime in range(10):
self.log.logger.debug("正在重启中,请等待。。。")
time.sleep(3)
self.ser.open()
self.log.logger.debug("第" + str(self.frequency) + "次重启成功")
self.log.logger.debug("开始连接阿里云MQTT")
self.connect_mqtt_test()
except UnicodeError as e:
self.log.logger.error(e)
self.log.logger.error("---------------解码异常---------------")
self.log.logger.error(traceback.format_exc())
def AlicloudMachineTest():
global device_name, product_key, device_secret
test = aliyun_test(port, 115200)
# # 一机一密测试
test.load_atList('INIT.txt')
test.load_atList('ALIYUN一机一密.txt')
test.aliyun_init()
with open('./cfg/one_device_one_secret.txt') as f:
lines = f.readlines()
product_key = lines[0].replace('\n', '')
device_name = lines[1].replace('\n', '')
device_secret = lines[2].replace('\n', '')
test.log.logger.debug(f'ProductKey:{product_key}')
test.log.logger.debug(f'DeviceName:{device_name}')
test.log.logger.debug(f'DeviceSecret:{device_secret}')
# 拼接加密前明文
message = b'clientId' + device_name.encode('GB2312') + b'deviceName' + device_name.encode(
'GB2312') + b'productKey' + product_key.encode('GB2312')
key = device_secret.encode('GB2312')
# 使用HMACMD5算法用设备密钥加密明文
sign = hmac.new(key, message, digestmod='MD5')
# 拼接http_post发送信息
test.post_info = f'productKey={product_key}&sign={sign.hexdigest()}&clientId={device_name}&deviceName={device_name}'
test.get_mqtt_login_info()
test.connect_mqtt_test()
try:
AlicloudMachineTest()
except KeyboardInterrupt as ke:
print("exit...")
sys.exit(0)
except Exception as e:
print(e)
print("---------------")
print(traceback.format_exc())
```
#### File: LuatPyComUtils/src/Test.py
```python
import hmac
message = b'clientId862991419835241deviceName862991419835241productKeyb0FMK1Ga5cp'
key = b'<KEY>'
h = hmac.new(key,message,digestmod='MD5')
print(h.hexdigest())
```
#### File: src/utils/Auto.py
```python
import os
class Auto:
def get_com(self, com_name):
port = os.popen(".././bin/dev_name " + com_name).read()
return port
if __name__ == '__main__':
com_name = input('请输入要识别的端口:')
port = Auto().get_com(com_name)
print('端口号为:' + port)
```
#### File: src/utils/utils.py
```python
import binascii
# 字符串转十六进制字符串方法
def str_to_hexStr(string):
str_bin = string.encode('GB2312')
return binascii.hexlify(str_bin)
# 字节数据转十六进制方法
def get_hex(bytes_data):
l = [hex(i) for i in bytes_data]
return " ".join(l)
# 十六进制字符串转字符串方法
def hexStr_to_str(hex_str):
hex = hex_str.encode('GB2312')
str_bin = binascii.unhexlify(hex)
return str_bin.decode('GB2312')
if __name__ == '__main__':
print(hexStr_to_str('41542b43474154543f'))
print(get_hex(b'AT'))
print(str_to_hexStr('AT+CGATT?'))
for i in b'AT':
print(hex(i))
```
|
{
"source": "jeremyh/datacube-explorer",
"score": 2
}
|
#### File: cubedash/summary/_extents.py
```python
import functools
import json
import sys
import uuid
from dataclasses import dataclass
from datetime import date, datetime
from pathlib import Path
from typing import Dict, Generator, Iterable, List, Optional
import datacube.drivers.postgres._api as postgres_api
import fiona
import shapely.ops
import structlog
from datacube import Datacube
from datacube.drivers.postgres._fields import PgDocField, RangeDocField
from datacube.index import Index
from datacube.model import Dataset, DatasetType, Field, MetadataType, Range
from geoalchemy2 import Geometry, WKBElement
from geoalchemy2.shape import from_shape, to_shape
from psycopg2._range import Range as PgRange
from shapely.geometry import shape
from sqlalchemy import (
BigInteger,
Integer,
SmallInteger,
String,
and_,
bindparam,
case,
column,
func,
literal,
null,
select,
)
from sqlalchemy.dialects import postgresql as postgres
from sqlalchemy.engine import Engine
from sqlalchemy.sql.elements import ClauseElement, Label
from cubedash._utils import ODC_DATASET as DATASET, alchemy_engine, infer_crs
from cubedash.summary._schema import DATASET_SPATIAL, SPATIAL_REF_SYS
_LOG = structlog.get_logger()
_WRS_PATH_ROW = [
Path(__file__).parent.parent / "data" / "WRS2_descending" / "WRS2_descending.shp",
Path(__file__).parent.parent / "data" / "WRS2_ascending" / "WRS2_acsending.shp",
]
class UnsupportedWKTProductCRS(NotImplementedError):
"""We can't, within Postgis, support arbitrary WKT CRSes at the moment."""
def __init__(self, reason: str) -> None:
self.reason = reason
def get_dataset_extent_alchemy_expression(md: MetadataType, default_crs: str = None):
"""
Build an SQLAlchemy expression to get the extent for a dataset.
It's returned as a postgis geometry.
The logic here mirrors the extent() function of datacube.model.Dataset.
"""
doc = _jsonb_doc_expression(md)
if "grid_spatial" not in md.definition["dataset"]:
# Non-spatial product
return None
projection_offset = _projection_doc_offset(md)
if expects_eo3_metadata_type(md):
return func.ST_SetSRID(
case(
[
# If we have geometry, use it as the polygon.
(
doc[["geometry"]] != None,
func.ST_GeomFromGeoJSON(doc[["geometry"]], type_=Geometry),
)
],
# Otherwise construct a polygon from the computed bounds that ODC added on index.
else_=_bounds_polygon(doc, projection_offset),
),
get_dataset_srid_alchemy_expression(md, default_crs),
)
else:
valid_data_offset = projection_offset + ["valid_data"]
return func.ST_SetSRID(
case(
[
# If we have valid_data offset, use it as the polygon.
(
doc[valid_data_offset] != None,
func.ST_GeomFromGeoJSON(doc[valid_data_offset], type_=Geometry),
)
],
# Otherwise construct a polygon from the four corner points.
else_=_bounds_polygon(doc, projection_offset),
),
get_dataset_srid_alchemy_expression(md, default_crs),
type_=Geometry,
)
def expects_eo3_metadata_type(md: MetadataType) -> bool:
"""
Does the given metadata type expect EO3 datasets?
"""
# We don't have a clean way to say that a product expects EO3
measurements_offset = md.definition["dataset"]["measurements"]
# In EO3, the measurements are in ['measurments'],
# In EO1, they are in ['image', 'bands'].
return measurements_offset == ["measurements"]
def _projection_doc_offset(md):
projection_offset = md.definition["dataset"]["grid_spatial"]
return projection_offset
def _jsonb_doc_expression(md):
doc = md.dataset_fields["metadata_doc"].alchemy_expression
return doc
def _bounds_polygon(doc, projection_offset):
geo_ref_points_offset = projection_offset + ["geo_ref_points"]
return func.ST_MakePolygon(
func.ST_MakeLine(
postgres.array(
tuple(
_gis_point(doc, geo_ref_points_offset + [key])
for key in ("ll", "ul", "ur", "lr", "ll")
)
)
),
type_=Geometry,
)
def _size_bytes_field(dt: DatasetType):
md_fields = dt.metadata_type.dataset_fields
if "size_bytes" in md_fields:
return md_fields["size_bytes"].alchemy_expression
return _jsonb_doc_expression(dt.metadata_type)["size_bytes"].astext.cast(BigInteger)
def get_dataset_srid_alchemy_expression(md: MetadataType, default_crs: str = None):
doc = md.dataset_fields["metadata_doc"].alchemy_expression
if "grid_spatial" not in md.definition["dataset"]:
# Non-spatial product
return None
projection_offset = md.definition["dataset"]["grid_spatial"]
if expects_eo3_metadata_type(md):
spatial_ref = doc[["crs"]].astext
else:
# Most have a spatial_reference field we can use directly.
spatial_ref = doc[projection_offset + ["spatial_reference"]].astext
# When datasets have no CRS, optionally use this as default.
default_crs_expression = None
if default_crs:
if not default_crs.lower().startswith(
"epsg:"
) and not default_crs.lower().startswith("esri:"):
# HACK: Change default CRS with inference
inferred_crs = infer_crs(default_crs)
if inferred_crs is None:
raise UnsupportedWKTProductCRS(
f"WKT Product CRSes are not currently well supported, and "
f"we can't infer this product's one. "
f"(Ideally use an auth-name format for CRS, such as 'EPSG:1234') "
f"Got: {default_crs!r}"
)
default_crs = inferred_crs
auth_name, auth_srid = default_crs.split(":")
default_crs_expression = (
select([SPATIAL_REF_SYS.c.srid])
.where(func.lower(SPATIAL_REF_SYS.c.auth_name) == auth_name.lower())
.where(SPATIAL_REF_SYS.c.auth_srid == int(auth_srid))
.as_scalar()
)
expression = func.coalesce(
case(
[
(
# If matches shorthand code: eg. "epsg:1234"
spatial_ref.op("~")(r"^[A-Za-z0-9]+:[0-9]+$"),
select([SPATIAL_REF_SYS.c.srid])
.where(
func.lower(SPATIAL_REF_SYS.c.auth_name)
== func.lower(func.split_part(spatial_ref, ":", 1))
)
.where(
SPATIAL_REF_SYS.c.auth_srid
== func.split_part(spatial_ref, ":", 2).cast(Integer)
)
.as_scalar(),
)
],
else_=None,
),
case(
[
(
# Plain WKT that ends in an authority code.
# Extract the authority name and code using regexp. Yuck!
# Eg: ".... AUTHORITY["EPSG","32756"]]"
spatial_ref.op("~")(r'AUTHORITY\["[a-zA-Z0-9]+", *"[0-9]+"\]\]$'),
select([SPATIAL_REF_SYS.c.srid])
.where(
func.lower(SPATIAL_REF_SYS.c.auth_name)
== func.lower(
func.substring(
spatial_ref,
r'AUTHORITY\["([a-zA-Z0-9]+)", *"[0-9]+"\]\]$',
)
)
)
.where(
SPATIAL_REF_SYS.c.auth_srid
== func.substring(
spatial_ref, r'AUTHORITY\["[a-zA-Z0-9]+", *"([0-9]+)"\]\]$'
).cast(Integer)
)
.as_scalar(),
)
],
else_=None,
),
# Some older datasets have datum/zone fields instead.
# The only remaining ones in DEA are 'GDA94'.
case(
[
(
doc[(projection_offset + ["datum"])].astext == "GDA94",
select([SPATIAL_REF_SYS.c.srid])
.where(func.lower(SPATIAL_REF_SYS.c.auth_name) == "epsg")
.where(
SPATIAL_REF_SYS.c.auth_srid
== (
"283"
+ func.abs(
doc[(projection_offset + ["zone"])].astext.cast(Integer)
)
).cast(Integer)
)
.as_scalar(),
)
],
else_=None,
),
default_crs_expression,
# TODO: Handle arbitrary WKT strings (?)
# 'GEOGCS[\\"GEOCENTRIC DATUM of AUSTRALIA\\",DATUM[\\"GDA94\\",SPHEROID[
# \\"GRS80\\",6378137,298.257222101]],PRIMEM[\\"Greenwich\\",0],UNIT[\\
# "degree\\",0.0174532925199433]]'
)
# print(as_sql(expression))
return expression
def _gis_point(doc, doc_offset):
return func.ST_MakePoint(
doc[doc_offset + ["x"]].astext.cast(postgres.DOUBLE_PRECISION),
doc[doc_offset + ["y"]].astext.cast(postgres.DOUBLE_PRECISION),
)
def refresh_spatial_extents(
index: Index,
product: DatasetType,
clean_up_deleted=False,
assume_after_date: datetime = None,
):
"""
Update the spatial extents to match any changes upstream in ODC.
:param assume_after_date: Only scan datasets that have changed after the given (db server) time.
If None, all datasets will be regenerated.
:param clean_up_deleted: Scan for any manually deleted rows too. Slow.
"""
engine: Engine = alchemy_engine(index)
log = _LOG.bind(product_name=product.name, after_date=assume_after_date)
# First, remove any archived datasets from our spatial table.
datasets_to_delete = (
select([DATASET.c.id])
.where(DATASET.c.archived.isnot(None))
.where(DATASET.c.dataset_type_ref == product.id)
)
if assume_after_date is not None:
# Note that we use "dataset_changed_expression" to scan the datasets,
# rather than "where archived > date", because the latter has no index!
# (.... and we're using dataset_changed_expression's index everywhere else,
# so it's probably still in memory and super fast!)
datasets_to_delete = datasets_to_delete.where(
dataset_changed_expression() > assume_after_date
)
log.info(
"spatial_archival",
)
changed = engine.execute(
DATASET_SPATIAL.delete().where(DATASET_SPATIAL.c.id.in_(datasets_to_delete))
).rowcount
log.info(
"spatial_archival.end",
change_count=changed,
)
# Forcing? Check every other dataset for removal, so we catch manually-deleted rows from the table.
if clean_up_deleted:
log.warning(
"spatial_deletion_full_scan",
)
changed += engine.execute(
DATASET_SPATIAL.delete().where(
DATASET_SPATIAL.c.dataset_type_ref == product.id,
)
# Where it doesn't exist in the ODC dataset table.
.where(
~DATASET_SPATIAL.c.id.in_(
select([DATASET.c.id]).where(
DATASET.c.dataset_type_ref == product.id,
)
)
)
).rowcount
log.info(
"spatial_deletion_scan.end",
change_count=changed,
)
# We'll update first, then insert new records.
# -> We do it in this order so that inserted records aren't immediately updated.
# (Note: why don't we do this in one upsert? Because we get our sqlalchemy expressions
# through ODC's APIs and can't choose alternative table aliases to make sub-queries.
# Maybe you can figure out a workaround, though?)
column_values = {c.name: c for c in _select_dataset_extent_columns(product)}
only_where = [
DATASET.c.dataset_type_ref
== bindparam("product_ref", product.id, type_=SmallInteger),
DATASET.c.archived.is_(None),
]
if assume_after_date is not None:
only_where.append(dataset_changed_expression() > assume_after_date)
else:
log.warning("spatial_update.recreating_everything")
# Update any changed datasets
log.info(
"spatial_update",
product_name=product.name,
after_date=assume_after_date,
)
changed += engine.execute(
DATASET_SPATIAL.update()
.values(**column_values)
.where(DATASET_SPATIAL.c.id == column_values["id"])
.where(and_(*only_where))
).rowcount
log.info("spatial_update.end", product_name=product.name, change_count=changed)
# ... and insert new ones.
log.info(
"spatial_insert",
product_name=product.name,
after_date=assume_after_date,
)
changed += engine.execute(
postgres.insert(DATASET_SPATIAL)
.from_select(
column_values.keys(),
select(column_values.values())
.where(and_(*only_where))
.order_by(column_values["center_time"]),
)
.on_conflict_do_nothing(index_elements=["id"])
).rowcount
log.info("spatial_insert.end", product_name=product.name, change_count=changed)
# If we changed data...
if changed:
# And it's a non-spatial product...
if get_dataset_extent_alchemy_expression(product.metadata_type) is None:
# And it has WRS path/rows...
if "sat_path" in product.metadata_type.dataset_fields:
# We can synthesize the polygons!
log.info(
"spatial_synthesizing",
)
shapes = _get_path_row_shapes()
rows = [
row
for row in index.datasets.search_returning(
("id", "sat_path", "sat_row"), product=product.name
)
if row.sat_path.lower is not None
]
if rows:
engine.execute(
DATASET_SPATIAL.update()
.where(DATASET_SPATIAL.c.id == bindparam("dataset_id"))
.values(footprint=bindparam("footprint")),
[
dict(
dataset_id=id_,
footprint=from_shape(
shapely.ops.unary_union(
[
shapes[(int(sat_path.lower), row)]
for row in range(
int(sat_row.lower),
int(sat_row.upper) + 1,
)
]
),
srid=4326,
extended=True,
),
)
for id_, sat_path, sat_row in rows
],
)
log.info(
"spatial_synthesizing.end",
)
return changed
def _select_dataset_extent_columns(dt: DatasetType) -> List[Label]:
"""
Get columns for all fields which go into the spatial table
for this DatasetType.
"""
md_type = dt.metadata_type
# If this product has lat/lon fields, we can take spatial bounds.
footprint_expression = get_dataset_extent_alchemy_expression(
md_type, default_crs=_default_crs(dt)
)
# Some time-series-derived products have seemingly-rectangular but *huge* footprints
# (because they union many almost-indistinguishable footprints)
# If they specify a resolution, we can simplify the geometry based on it.
if footprint_expression is not None and dt.grid_spec and dt.grid_spec.resolution:
resolution = min(abs(r) for r in dt.grid_spec.resolution)
footprint_expression = func.ST_SimplifyPreserveTopology(
footprint_expression, resolution / 4
)
return [
DATASET.c.id,
DATASET.c.dataset_type_ref,
center_time_expression(md_type),
(null() if footprint_expression is None else footprint_expression).label(
"footprint"
),
_region_code_field(dt).label("region_code"),
_size_bytes_field(dt).label("size_bytes"),
_dataset_creation_expression(md_type).label("creation_time"),
]
def center_time_expression(md_type: MetadataType):
"""
The center time for the given metadata doc.
(Matches the logic in ODC's Dataset.center_time)
"""
time = md_type.dataset_fields["time"].alchemy_expression
center_time = (func.lower(time) + (func.upper(time) - func.lower(time)) / 2).label(
"center_time"
)
return center_time
def dataset_changed_expression(dataset=DATASET):
"""Expression for the latest time a dataset was changed"""
# This expression matches our 'ix_dataset_type_changed' index, so we can scan it quickly.
dataset_changed = func.greatest(
dataset.c.added,
# The 'updated' column doesn't exist on ODC's definition as it's optional.
column("updated"),
dataset.c.archived,
)
return dataset_changed
def _default_crs(dt: DatasetType) -> Optional[str]:
storage = dt.definition.get("storage")
if not storage:
return None
return storage.get("crs")
def _dataset_creation_expression(md: MetadataType) -> ClauseElement:
"""SQLAlchemy expression for the creation (processing) time of a dataset"""
# Either there's a field called "created", or we fallback to the default "creation_dt' in metadata type.
created_field = md.dataset_fields.get("created")
if created_field is not None:
assert isinstance(created_field, PgDocField)
creation_expression = created_field.alchemy_expression
else:
doc = md.dataset_fields["metadata_doc"].alchemy_expression
creation_dt = md.definition["dataset"].get("creation_dt") or ["creation_dt"]
creation_expression = func.agdc.common_timestamp(doc[creation_dt].astext)
# If they're missing a dataset-creation time, fall back to the time it was indexed.
return func.coalesce(creation_expression, DATASET.c.added)
def get_dataset_bounds_query(md_type):
if "lat" not in md_type.dataset_fields:
# Not a spatial product
return None
lat, lon = md_type.dataset_fields["lat"], md_type.dataset_fields["lon"]
assert isinstance(lat, RangeDocField)
assert isinstance(lon, RangeDocField)
return func.ST_MakeBox2D(
func.ST_MakePoint(lat.lower.alchemy_expression, lon.lower.alchemy_expression),
func.ST_MakePoint(
lat.greater.alchemy_expression, lon.greater.alchemy_expression
),
type_=Geometry,
)
def as_sql(expression, **params):
"""Convert sqlalchemy expression to SQL string.
(primarily for debugging: to see what sqlalchemy is doing)
This has its literal values bound, so it's more readable than the engine's
query logging.
"""
if params:
expression = expression.params(**params)
return str(
expression.compile(
dialect=postgres.dialect(), compile_kwargs={"literal_binds": True}
)
)
def _as_json(obj):
def fallback(o, *args, **kwargs):
if isinstance(o, uuid.UUID):
return str(o)
if isinstance(o, WKBElement):
# Following the EWKT format: include srid
prefix = f"SRID={o.srid};" if o.srid else ""
return str(prefix + to_shape(o).wkt)
if isinstance(o, datetime):
return o.isoformat()
if isinstance(o, PgRange):
return ["∞" if o.lower_inf else o.lower, "∞" if o.upper_inf else o.upper]
return repr(o)
return json.dumps(obj, indent=4, default=fallback)
# This is tied to ODC's internal Dataset search implementation as there's no higher-level api to allow this.
# When region_code is integrated into core (as is being discussed) this can be replaced.
# pylint: disable=protected-access
def datasets_by_region(
engine: Engine,
index: Index,
product_name: str,
region_code: str,
time_range: Range,
limit: int,
offset: int = 0,
) -> Generator[Dataset, None, None]:
product = index.products.get_by_name(product_name)
query = (
select(postgres_api._DATASET_SELECT_FIELDS)
.select_from(
DATASET_SPATIAL.join(DATASET, DATASET_SPATIAL.c.id == DATASET.c.id)
)
.where(DATASET_SPATIAL.c.region_code == bindparam("region_code", region_code))
.where(
DATASET_SPATIAL.c.dataset_type_ref
== bindparam("dataset_type_ref", product.id)
)
)
if time_range:
query = query.where(
DATASET_SPATIAL.c.center_time > bindparam("from_time", time_range.begin)
).where(DATASET_SPATIAL.c.center_time < bindparam("to_time", time_range.end))
query = (
query.order_by(DATASET_SPATIAL.c.center_time)
.limit(bindparam("limit", limit))
.offset(bindparam("offset", offset))
)
return (
index.datasets._make(res, full_info=True)
for res in engine.execute(query).fetchall()
)
@dataclass
class RegionSummary:
product_name: str
region_code: str
count: int
generation_time: datetime
footprint_wgs84: Geometry
@property
def footprint_geojson(self):
extent = self.footprint_wgs84
if not extent:
return None
return {
"type": "Feature",
"geometry": extent.__geo_interface__,
"properties": {"region_code": self.region_code, "count": self.count},
}
@dataclass
class ProductArrival:
"""What arrived for a given product on a particular day?"""
product_name: str
day: date
# Count of datasets added on the given day.
dataset_count: int
# A few dataset ids among the arrivals
sample_dataset_ids: List[uuid.UUID]
class RegionInfo:
def __init__(
self, product: DatasetType, known_regions: Optional[Dict[str, RegionSummary]]
) -> None:
self.product = product
self._known_regions = known_regions
# Treated as an "id" in view code. What kind of region?
name: str = "region"
# A human-readable description displayed on a UI.
description: str = "Regions"
# Used when printing counts "1 region", "5 regions".
unit_label: str = "region"
units_label: str = "regions"
@classmethod
def for_product(
cls, dataset_type: DatasetType, known_regions: Dict[str, RegionSummary] = None
):
region_code_field: Field = dataset_type.metadata_type.dataset_fields.get(
"region_code"
)
grid_spec = dataset_type.grid_spec
# Ingested grids trump the "region_code" field because they've probably sliced it up smaller.
#
# hltc has a grid spec, but most attributes are missing, so grid_spec functions fail.
# Therefore: only assume there's a grid if tile_size is specified.
if grid_spec is not None and grid_spec.tile_size:
return GridRegionInfo(dataset_type, known_regions)
elif region_code_field is not None:
# Generic region info
return RegionInfo(dataset_type, known_regions)
elif "sat_path" in dataset_type.metadata_type.dataset_fields:
return SceneRegionInfo(dataset_type, known_regions)
return None
def region(self, region_code: str) -> Optional[RegionSummary]:
return self._known_regions.get(region_code)
def dataset_region_code(self, dataset: Dataset) -> Optional[str]:
"""
Get the region code for a dataset.
This should always give the same result as the alchemy_expression() function,
but is computed in pure python.
Classes that override alchemy_expression should override this to match.
"""
return dataset.metadata.region_code
def alchemy_expression(self):
"""
Get an alchemy expression that computes dataset's region code
Classes that override this should also override dataset_region_code to match.
"""
dt = self.product
region_code_field: Field = dt.metadata_type.dataset_fields.get("region_code")
# `alchemy_expression` is part of the postgres driver (PgDocField),
# not the base Field class.
if not hasattr(region_code_field, "alchemy_expression"):
raise NotImplementedError(
"ODC index driver doesn't support alchemy expressions"
)
return region_code_field.alchemy_expression
def region_label(self, region_code: str) -> str:
"""
Convert the region_code into something human-readable.
"""
# Default plain, un-prettified.
return region_code
class GridRegionInfo(RegionInfo):
"""Ingested datacube products have tiles"""
name = "tiled"
description = "Tiled product"
unit_label = "tile"
units_label = "tiles"
def region_label(self, region_code: str) -> str:
return "Tile {:+d}, {:+d}".format(*_from_xy_region_code(region_code))
def alchemy_expression(self):
"""
Get an sqlalchemy expression to calculate the region code (a string)
This is usually the 'region_code' field, if one exists, but there are
fallbacks for other native Satellites/Platforms.
Eg.
On Landsat scenes this is the path/row (separated by underscore)
On tiles this is the tile numbers (separated by underscore: possibly with negative)
On Sentinel this is MGRS number
"""
dt = self.product
grid_spec = dt.grid_spec
doc = _jsonb_doc_expression(dt.metadata_type)
projection_offset = _projection_doc_offset(dt.metadata_type)
# Calculate tile refs
geo_ref_points_offset = projection_offset + ["geo_ref_points"]
center_point = func.ST_Centroid(
func.ST_Collect(
_gis_point(doc, geo_ref_points_offset + ["ll"]),
_gis_point(doc, geo_ref_points_offset + ["ur"]),
)
)
# todo: look at grid_spec crs. Use it for defaults, conversion.
size_x, size_y = grid_spec.tile_size or (1000.0, 1000.0)
origin_x, origin_y = grid_spec.origin
return func.concat(
func.floor((func.ST_X(center_point) - origin_x) / size_x).cast(String),
"_",
func.floor((func.ST_Y(center_point) - origin_y) / size_y).cast(String),
)
def dataset_region_code(self, dataset: Dataset) -> Optional[str]:
tiles = [
tile
for tile, _ in dataset.type.grid_spec.tiles(
dataset.extent.centroid.boundingbox
)
]
if not len(tiles) == 1:
raise ValueError(
"Tiled dataset should only have one tile? "
f"Got {tiles!r} for {dataset!r}"
)
x, y = tiles[0]
return f"{x}_{y}"
def _from_xy_region_code(region_code: str):
"""
>>> _from_xy_region_code('95_3')
(95, 3)
>>> _from_xy_region_code('95_-3')
(95, -3)
"""
x, y = region_code.split("_")
return int(x), int(y)
class SceneRegionInfo(RegionInfo):
"""Landsat WRS2"""
name = "scenes"
description = "Landsat WRS2 scene-based product"
unit_label = "scene"
units_label = "scenes"
def region_label(self, region_code: str) -> str:
if "_" in region_code:
x, y = _from_xy_region_code(region_code)
return f"Path {x}, Row {y}"
else:
return f"Path {region_code}"
def alchemy_expression(self):
dt = self.product
# Generate region code for older sat_path/sat_row pairs.
md_fields = dt.metadata_type.dataset_fields
path_field: RangeDocField = md_fields["sat_path"]
row_field: RangeDocField = md_fields["sat_row"]
return case(
[
# Is this just one scene? Include it specifically
(
row_field.lower.alchemy_expression
== row_field.greater.alchemy_expression,
func.concat(
path_field.lower.alchemy_expression.cast(String),
"_",
row_field.greater.alchemy_expression.cast(String),
),
),
],
# Otherwise it's a range of rows, so our region-code is the whole path.
else_=path_field.lower.alchemy_expression.cast(String),
)
def dataset_region_code(self, dataset: Dataset) -> Optional[str]:
path_range = dataset.metadata.fields["sat_path"]
row_range = dataset.metadata.fields["sat_row"]
if row_range is None and path_range is None:
return None
# If it's just one scene? Include it specifically
if row_range[0] == row_range[1]:
return f"{path_range[0]}_{row_range[1]}"
# Otherwise it's a range of rows, so we say the whole path.
else:
return f"{path_range[0]}"
def _region_code_field(dt: DatasetType):
"""
Get an sqlalchemy expression to calculate the region code (a string)
"""
region_info = RegionInfo.for_product(
dt,
# The None is here bad OO design. The class probably should be split in two for different use-cases.
None,
)
if region_info is not None:
return region_info.alchemy_expression()
else:
_LOG.debug(
"no_region_code",
product_name=dt.name,
metadata_type_name=dt.metadata_type.name,
)
return null()
def get_sample_dataset(*product_names: str, index: Index = None) -> Iterable[Dict]:
with Datacube(index=index) as dc:
index = dc.index
for product_name in product_names:
product = index.products.get_by_name(product_name)
res = (
alchemy_engine(index)
.execute(
select(_select_dataset_extent_columns(product))
.where(
DATASET.c.dataset_type_ref
== bindparam("product_ref", product.id, type_=SmallInteger)
)
.where(DATASET.c.archived == None)
.limit(1)
)
.fetchone()
)
if res:
yield dict(res)
@functools.lru_cache()
def _get_path_row_shapes():
path_row_shapes = {}
for shape_file in _WRS_PATH_ROW:
with fiona.open(str(shape_file)) as f:
for _k, item in f.items():
prop = item["properties"]
key = prop["PATH"], prop["ROW"]
assert key not in path_row_shapes
path_row_shapes[key] = shape(item["geometry"])
return path_row_shapes
def get_mapped_crses(*product_names: str, index: Index = None) -> Iterable[Dict]:
with Datacube(index=index) as dc:
index = dc.index
for product_name in product_names:
product = index.products.get_by_name(product_name)
# SQLAlchemy queries require "column == None", not "column is None" due to operator overloading:
# pylint: disable=singleton-comparison
res = (
alchemy_engine(index)
.execute(
select(
[
literal(product.name).label("product"),
get_dataset_srid_alchemy_expression(
product.metadata_type
).label("crs"),
]
)
.where(DATASET.c.dataset_type_ref == product.id)
.where(DATASET.c.archived == None)
.limit(1)
)
.fetchone()
)
if res:
yield dict(res)
if __name__ == "__main__":
print(
_as_json(
list(
get_mapped_crses(
*(sys.argv[1:] or ["ls8_nbar_scene", "ls8_nbar_albers"])
)
)
)
)
print(
_as_json(
list(
get_sample_dataset(
*(sys.argv[1:] or ["ls8_nbar_scene", "ls8_nbar_albers"])
)
)
)
)
```
#### File: cubedash/summary/show.py
```python
import sys
import time
from textwrap import dedent
import click
import structlog
from click import echo, secho
from datacube.config import LocalConfig
from datacube.index import Index, index_connect
from datacube.ui.click import config_option, environment_option, pass_config
from cubedash._filters import sizeof_fmt
from cubedash.logs import init_logging
from cubedash.summary import SummaryStore
_LOG = structlog.get_logger()
def _get_store(config: LocalConfig, variant: str, log=_LOG) -> SummaryStore:
index: Index = index_connect(
config, application_name=f"cubedash.show.{variant}", validate_connection=False
)
return SummaryStore.create(index, log=log)
@click.command(help=__doc__)
@environment_option
@config_option
@pass_config
@click.option(
"--verbose",
"-v",
count=True,
help=dedent(
"""\
Enable all log messages, instead of just errors.
Logging goes to stdout unless `--event-log-file` is specified.
Logging is coloured plain-text if going to a tty, and jsonl format otherwise.
Use twice to enable debug logging too.
"""
),
)
@click.option(
"-l",
"--event-log-file",
help="Output jsonl logs to file",
type=click.Path(writable=True, dir_okay=True),
)
@click.argument("product_name")
@click.argument("year", type=int, required=False)
@click.argument("month", type=int, required=False)
@click.argument("day", type=int, required=False)
def cli(
config: LocalConfig,
product_name: str,
year: int,
month: int,
day: int,
event_log_file: str,
verbose: bool,
):
"""
Print the recorded summary information for the given product
"""
init_logging(
open(event_log_file, "a") if event_log_file else None, verbosity=verbose
)
store = _get_store(config, "setup")
t = time.time()
summary = store.get(product_name, year, month, day)
t_end = time.time()
if not store.index.products.get_by_name(product_name):
echo(f"Unknown product {product_name!r}", err=True)
sys.exit(-1)
product = store.get_product_summary(product_name)
if product is None:
echo(f"No info: product {product_name!r} has not been summarised", err=True)
sys.exit(-1)
secho(product_name, bold=True)
echo()
dataset_count = summary.dataset_count if summary else product.dataset_count
echo(f"{dataset_count} datasets")
if product.dataset_count:
echo(f"from {product.time_earliest.isoformat()} ")
echo(f" to {product.time_latest.isoformat()} ")
echo()
if store.needs_extent_refresh(product_name):
secho("Has changes", bold=True)
echo(f"Last extent refresh: {product.last_refresh_time}")
echo(f"Last summary completion: {product.last_successful_summary_time}")
if product.fixed_metadata:
echo()
secho("Metadata", fg="blue")
for k, v in product.fixed_metadata.items():
echo(f"\t{k}: {v}")
echo()
secho(
f"Period: {year or 'all-years'} {month or 'all-months'} {day or 'all-days'}",
fg="blue",
)
if summary:
if summary.size_bytes:
echo(f"\tStorage size: {sizeof_fmt(summary.size_bytes)}")
echo(f"\t{summary.dataset_count} datasets")
echo(f"\tSummarised: {summary.summary_gen_time}")
if summary.footprint_geometry:
secho(f"\tFootprint area: {summary.footprint_geometry.area}")
if not summary.footprint_geometry.is_valid:
secho("\tInvalid Geometry", fg="red")
else:
secho("\tNo footprint")
elif year or month or day:
echo("\tNo summary for chosen period.")
echo()
echo(f"(fetched in {round(t_end - t, 2)} seconds)")
if __name__ == "__main__":
cli()
```
|
{
"source": "jeremyh/eo-datasets",
"score": 3
}
|
#### File: eodatasets3/scripts/recompress.py
```python
import copy
import io
import socket
import stat
import sys
import tarfile
import tempfile
import traceback
from contextlib import suppress
from functools import partial
from itertools import chain
from pathlib import Path
from typing import IO, Callable, Dict, Iterable, List, Tuple
import click
import numpy
import rasterio
import structlog
from structlog.processors import (
JSONRenderer,
StackInfoRenderer,
TimeStamper,
format_exc_info,
)
from eodatasets3.ui import PathPath
from eodatasets3.verify import PackageChecksum
_PREDICTOR_TABLE = {
"int8": 2,
"uint8": 2,
"int16": 2,
"uint16": 2,
"int32": 2,
"uint32": 2,
"int64": 2,
"uint64": 2,
"float32": 3,
"float64": 3,
}
# The info of a file, and a method to open the file for reading.
ReadableMember = Tuple[tarfile.TarInfo, Callable[[], IO]]
_LOG = structlog.get_logger()
class RecompressFailure(Exception):
pass
def _create_tarinfo(path: Path, name=None) -> tarfile.TarInfo:
"""
Create a TarInfo ("tar member") based on the given filesystem path.
(these contain the information of a file, such as permissions, when writing to a tar file)
This code is based on TarFile.gettarinfo(), but doesn't need an existing tar file.
"""
# We're avoiding convenience methods like `path.is_file()`, to minimise repeated `stat()` calls on lustre.
s = path.stat()
info = tarfile.TarInfo(name or path.name)
if stat.S_ISREG(s.st_mode):
info.size = s.st_size
info.type = tarfile.REGTYPE
elif stat.S_ISDIR(s.st_mode):
info.type = tarfile.DIRTYPE
info.size = 0
else:
raise NotImplementedError(
f"Only regular files and directories are supported for extracted datasets. "
f"({path.name} in {path.absolute().parent})"
)
info.mode = s.st_mode
info.uid = s.st_uid
info.gid = s.st_gid
info.mtime = s.st_mtime
if tarfile.pwd:
try:
info.uname = tarfile.pwd.getpwuid(info.uid)[0]
except KeyError:
pass
if tarfile.grp:
try:
info.gname = tarfile.grp.getgrgid(info.gid)[0]
except KeyError:
pass
return info
def _tar_members(in_tar: tarfile.TarFile) -> Iterable[ReadableMember]:
"""Get readable files (members) from a tar"""
members: List[tarfile.TarInfo] = in_tar.getmembers()
for member in members:
# We return a lambda/callable so that the file isn't opened until it's needed.
yield member, partial(in_tar.extractfile, member)
def _folder_members(path: Path, base_path: Path = None) -> Iterable[ReadableMember]:
"""
Get readable files (presented as tar members) from a directory.
"""
if not base_path:
base_path = path
# Note that the members in input USGS tars are sorted alphabetically.
# We'll sort our own inputs to match.
# (The primary practical benefit is predictable outputs in tests)
for item in sorted(path.iterdir()):
member = _create_tarinfo(item, name=str(item.relative_to(base_path)))
if member.type == tarfile.DIRTYPE:
yield member, None
yield from _folder_members(item, base_path=path)
else:
# We return a lambda/callable so that the file isn't opened until it's needed.
yield member, partial(item.open, "rb")
def repackage_tar(
input_path: Path,
input_files: Iterable[ReadableMember],
output_tar_path: Path,
clean_inputs: bool,
**compress_args,
) -> bool:
log = _LOG.bind(
name=output_tar_path.stem,
in_path=str(input_path.absolute()),
out_path=str(output_tar_path.absolute()),
)
if output_tar_path.exists():
log.info("skip.exists")
return True
try:
members = list(input_files)
# Add the MTL file to the beginning of the output tar, so it can be accessed faster.
# This slows down this repackage a little, as we're seeking/decompressing the input stream an extra time.
_reorder_tar_members(members, input_path.name)
_create_tar_with_files(input_path, members, output_tar_path, **compress_args)
log.info(
"complete",
in_size=sum(m.size for m, _ in members),
in_count=len(members),
# The user/group give us a hint as to whether this was repackaged outside of USGS.
in_users=list({(member.uname, member.gname) for member, _ in members}),
out_size=output_tar_path.stat().st_size,
)
result_exists = output_tar_path.exists()
if not result_exists:
# This should never happen, so it's an exception.
raise RuntimeError(f"No output after a success? Expected {output_tar_path}")
if clean_inputs:
log.info("input.cleanup")
please_remove(input_path, excluding=output_tar_path)
except Exception:
log.exception("error", exc_info=True)
return False
return True
def _create_tar_with_files(
input_path: Path,
members: List[ReadableMember],
output_tar_path: Path,
**compress_args,
) -> None:
"""
Package and compress the given input files to a new tar path.
The output tar path is written atomically, so on failure it will only exist if complete.
"""
out_dir: Path = output_tar_path.parent
out_dir.mkdir(parents=True, exist_ok=True)
verify = PackageChecksum()
# Use a temporary file so that we can move to the output path atomically.
with tempfile.TemporaryDirectory(prefix=".extract-", dir=str(out_dir)) as tmpdir:
tmpdir = Path(tmpdir).absolute()
tmp_out_tar = tmpdir.joinpath(output_tar_path.name)
with tarfile.open(tmp_out_tar, "w") as out_tar:
with click.progressbar(
label=input_path.name,
length=sum(member.size for member, _ in members),
file=sys.stderr,
) as progress:
file_number = 0
for readable_member in members:
file_number += 1
progress.label = (
f"{input_path.name} ({file_number:2d}/{len(members)})"
)
_recompress_tar_member(
readable_member, out_tar, compress_args, verify, tmpdir
)
member, _ = readable_member
progress.update(member.size)
# Append sha1 checksum file
checksum_path = tmpdir / "package.sha1"
verify.write(checksum_path)
checksum_path.chmod(0o664)
out_tar.add(checksum_path, checksum_path.name)
# Match the lower r/w permission bits to the output folder.
# (Temp directories default to 700 otherwise.)
tmp_out_tar.chmod(out_dir.stat().st_mode & 0o777)
# Our output tar is complete. Move it into place.
tmp_out_tar.rename(output_tar_path)
def _recompress_tar_member(
readable_member: ReadableMember,
out_tar: tarfile.TarFile,
compress_args: Dict,
verify: PackageChecksum,
tmpdir: Path,
):
member, open_member = readable_member
new_member = copy.copy(member)
# Copy with a minimum 664 permission, which is used by USGS tars.
# (some of our repacked datasets have only user read permission.)
new_member.mode = new_member.mode | 0o664
# If it's a tif, check whether it's compressed.
if member.name.lower().endswith(".tif"):
with open_member() as input_fp, rasterio.open(input_fp) as ds:
if not ds.profile.get("compress"):
# No compression: let's compress it
with rasterio.MemoryFile(filename=member.name) as memory_file:
try:
_recompress_image(ds, memory_file, **compress_args)
except Exception as e:
raise RecompressFailure(f"Error during {member.name}") from e
new_member.size = memory_file.getbuffer().nbytes
out_tar.addfile(new_member, memory_file)
# Image has been written. Seek to beginning to take a checksum.
memory_file.seek(0)
verify.add(memory_file, tmpdir / new_member.name)
return
else:
# It's already compressed, we'll fall through and copy it verbatim.
pass
if member.size == 0:
# Typically a directory entry.
out_tar.addfile(new_member)
return
# Copy unchanged into target (typically text/metadata files).
with open_member() as member:
file_contents = member.read()
out_tar.addfile(new_member, io.BytesIO(file_contents))
verify.add(io.BytesIO(file_contents), tmpdir / new_member.name)
del file_contents
def _reorder_tar_members(members: List[ReadableMember], identifier: str):
"""
Put the (tiny) MTL file at the beginning of the tar so that it's always quick to read.
"""
# Find MTL
for i, (member, _) in enumerate(members):
if "_MTL" in member.path:
mtl_index = i
break
else:
formatted_members = "\n\t".join(m.name for m, _ in members)
raise ValueError(
f"No MTL file found in package {identifier}. Have:\n\t{formatted_members}"
)
# Move to front
mtl_item = members.pop(mtl_index)
members.insert(0, mtl_item)
def _recompress_image(
input_image: rasterio.DatasetReader,
output_fp: rasterio.MemoryFile,
zlevel=9,
block_size=(512, 512),
):
"""
Read an image from given file pointer, and write as a compressed GeoTIFF.
"""
# noinspection PyUnusedLocal
block_size_y, block_size_x = block_size
if len(input_image.indexes) != 1:
raise ValueError(
f"Expecting one-band-per-tif input (USGS packages). "
f"Input has multiple layers {repr(input_image.indexes)}"
)
array: numpy.ndarray = input_image.read(1)
profile = input_image.profile
profile.update(
driver="GTiff",
predictor=_PREDICTOR_TABLE[array.dtype.name],
compress="deflate",
zlevel=zlevel,
blockxsize=block_size_x,
blockysize=block_size_y,
tiled=True,
)
with output_fp.open(**profile) as output_dataset:
output_dataset.write(array, 1)
# Copy gdal metadata
output_dataset.update_tags(**input_image.tags())
output_dataset.update_tags(1, **input_image.tags(1))
@click.command(help=__doc__)
@click.option(
"--output-base",
type=PathPath(file_okay=False, writable=True),
help="The base output directory "
"(default to same dir as input if --clean-inputs).",
)
@click.option(
"--zlevel", type=click.IntRange(0, 9), default=5, help="Deflate compression level."
)
@click.option(
"--block-size", type=int, default=512, help="Compression block size (both x and y)"
)
@click.option(
"--clean-inputs/--no-clean-inputs",
default=False,
help="Delete originals after repackaging",
)
@click.option("-f", "input_file", help="Read paths from file", type=click.File("r"))
@click.argument("paths", nargs=-1, type=PathPath(exists=True, readable=True))
def main(
paths: List[Path],
input_file,
output_base: Path,
zlevel: int,
clean_inputs: bool,
block_size: int,
):
# Structured (json) logging goes to stdout
structlog.configure(
processors=[
StackInfoRenderer(),
format_exc_info,
TimeStamper(utc=False, fmt="iso"),
JSONRenderer(),
]
)
if (not output_base) and (not clean_inputs):
raise click.UsageError(
"Need to specify either a different output directory (--output-base) "
"or to clean inputs (--clean-inputs)"
)
if input_file:
paths = chain((Path(p.strip()) for p in input_file), paths)
with rasterio.Env():
total = failures = 0
for path in paths:
total += 1
# Input is either a tar.gz file, or a directory containing an MTL (already extracted)
if path.suffix.lower() == ".gz":
with tarfile.open(str(path), "r") as in_tar:
success = repackage_tar(
path,
_tar_members(in_tar),
_output_tar_path(output_base, path),
clean_inputs=clean_inputs,
zlevel=zlevel,
block_size=(block_size, block_size),
)
elif path.is_dir():
success = repackage_tar(
path,
_folder_members(path),
_output_tar_path_from_directory(output_base, path),
clean_inputs=clean_inputs,
zlevel=zlevel,
block_size=(block_size, block_size),
)
else:
raise ValueError(
f"Expected either tar.gz or a dataset folder. " f"Got: {repr(path)}"
)
if not success:
failures += 1
if total > 1:
_LOG.info(
"node.finish",
host=socket.getfqdn(),
total_count=total,
failure_count=failures,
)
sys.exit(failures)
def please_remove(path: Path, excluding: Path):
"""
Delete all of path, excluding the given path.
"""
if path.absolute() == excluding.absolute():
return
if path.is_dir():
for p in path.iterdir():
please_remove(p, excluding)
with suppress(OSError):
path.rmdir()
else:
path.unlink()
def _format_exception(e: BaseException):
"""
Shamelessly stolen from stdlib's logging module.
"""
with io.StringIO() as sio:
traceback.print_exception(e.__class__, e, e.__traceback__, None, sio)
return sio.getvalue().strip()
def _output_tar_path(base_output, input_path):
if base_output:
out_path = _calculate_out_base_path(base_output, input_path)
else:
out_path = input_path
# Remove .gz suffix
name = out_path.stem
if not name.endswith(".tar"):
raise RuntimeError(f"Expected path to end in .tar.gz, got: {out_path}")
return out_path.with_name(name)
def _output_tar_path_from_directory(base_output, input_path):
mtl_files = list(input_path.glob("*_MTL.txt"))
if not mtl_files:
raise ValueError(f"Dataset has no mtl: {input_path}")
if len(mtl_files) > 1:
_LOG.warn("multiple.mtl.files", in_path=input_path)
mtl_file = mtl_files[0]
dataset_name = mtl_file.name.replace("_MTL.txt", "")
if base_output:
return _calculate_out_base_path(base_output, input_path) / f"{dataset_name}.tar"
else:
return input_path / f"{dataset_name}.tar"
def _calculate_out_base_path(out_base: Path, path: Path) -> Path:
if "USGS" not in path.parts:
raise ValueError(
"Expected AODH input path structure, "
"eg: /AODH/USGS/L1/Landsat/C1/092_091/LT50920911991126/LT05_L1GS_092091_19910506_20170126_01_T2.tar.gz"
)
# The directory structure after the "USGS" folder is recreated onto the output base folder.
return out_base.joinpath(*path.parts[path.parts.index("USGS") + 1 : -1], path.name)
if __name__ == "__main__":
main()
```
#### File: tests/integration/conftest.py
```python
import json
import shutil
from datetime import datetime
from pathlib import Path
from typing import Callable, Dict
import pytest
from eodatasets3 import serialise
from eodatasets3.model import DatasetDoc
from eodatasets3.prepare.landsat_l1_prepare import normalise_nci_symlinks
L71GT_TARBALL_PATH: Path = (
Path(__file__).parent / "data" / "LE07_L1TP_104078_20130429_20161124_01_T1.tar"
)
L5_TARBALL_PATH: Path = (
Path(__file__).parent / "data" / "LT05_L1TP_090085_19970406_20161231_01_T1.tar.gz"
)
L8_INPUT_PATH: Path = (
Path(__file__).parent / "data" / "LC08_L1TP_090084_20160121_20170405_01_T1"
)
L8_C2_INPUT_PATH: Path = (
Path(__file__).parent / "data" / "LC08_L1TP_090084_20160121_20200907_02_T1"
)
LS8_TELEMETRY_PATH: Path = (
Path(__file__).parent
/ "data"
/ "LS8_OLITIRS_STD-MD_P00_LC80840720742017365LGN00_084_072-074_20180101T004644Z20180101T004824_1"
)
# Manually converted from a S1 STAC document
S1_EO3_PATH: Path = (
Path(__file__).parent
/ "data"
/ "s1_rtc_021EE1_N00E001_2019_09_13_metadata_eo3.json"
)
WOFS_PATH: Path = Path(__file__).parent / "data" / "wofs"
def path_offset(base: Path, offset: str):
return str(normalise_nci_symlinks(base.absolute().joinpath(offset)))
def tar_offset(tar: Path, offset: str):
return "tar:" + str(normalise_nci_symlinks(tar.absolute())) + "!" + offset
def relative_offset(base, offset):
return offset
@pytest.fixture
def sentinel1_eo3() -> Path:
with open(S1_EO3_PATH) as f:
return json.load(f)
@pytest.fixture
def l1_ls8_folder(tmp_path: Path) -> Path:
return _make_copy(L8_INPUT_PATH, tmp_path)
@pytest.fixture
def l1_c2_ls8_folder(tmp_path: Path) -> Path:
return _make_copy(L8_C2_INPUT_PATH, tmp_path)
@pytest.fixture
def l1_ls8_metadata_path(l1_ls8_folder: Path, l1_ls8_dataset: DatasetDoc) -> Path:
path = l1_ls8_folder / f"{l1_ls8_dataset.label}.odc-metadata.yaml"
serialise.to_path(path, l1_ls8_dataset)
return path
@pytest.fixture
def l1_ls8_dataset_path(l1_ls8_folder: Path, l1_ls8_metadata_path: Path) -> Path:
"""
A prepared L1 dataset with an EO3 metadata file.
"""
return l1_ls8_folder
@pytest.fixture
def l1_ls7_tarball(tmp_path: Path) -> Path:
return _make_copy(L71GT_TARBALL_PATH, tmp_path)
@pytest.fixture
def l1_ls5_tarball(tmp_path: Path) -> Path:
return _make_copy(L5_TARBALL_PATH, tmp_path)
def _make_copy(input_path, tmp_path):
our_input = tmp_path / input_path.name
if input_path.is_file():
shutil.copy(input_path, our_input)
else:
shutil.copytree(input_path, our_input)
return our_input
@pytest.fixture
def l1_ls8_dataset(l1_ls8_folder_md_expected: Dict) -> DatasetDoc:
return serialise.from_doc(l1_ls8_folder_md_expected)
@pytest.fixture
def l1_ls8_folder_md_expected(l1_ls8_folder) -> Dict:
return expected_l1_ls8_folder(l1_ls8_folder, relative_offset)
@pytest.fixture
def l1_ls8_ga_expected(l1_ls8_folder) -> Dict:
return expected_l1_ls8_folder(
l1_ls8_folder,
relative_offset,
organisation="ga.gov.au",
collection="3",
# the id in the ls8_telemetry_path fixture
lineage={"satellite_telemetry_data": ["30841328-89c2-4693-8802-a3560a6cf67a"]},
)
@pytest.fixture
def l1_c2_ls8_usgs_expected(l1_ls8_folder) -> Dict:
return expected_l1_ls8_folder(
l1_ls8_folder,
relative_offset,
organisation="usgs.gov",
collection="2",
l1_collection="2",
)
@pytest.fixture
def l1_ls8_folder_md_expected_absolute(l1_ls8_folder) -> Dict:
return expected_l1_ls8_folder(l1_ls8_folder, path_offset)
@pytest.fixture
def ls8_telemetry_path(tmp_path: Path) -> Path:
"""Telemetry data with old-style ODC metadata"""
return _make_copy(LS8_TELEMETRY_PATH, tmp_path)
@pytest.fixture(params=("ls5", "ls7", "ls8"))
def example_metadata(
request,
l1_ls5_tarball_md_expected: Dict,
l1_ls7_tarball_md_expected: Dict,
l1_ls8_folder_md_expected: Dict,
):
"""
Test against arbitrary valid eo3 documents.
"""
which = request.param
if which == "ls5":
return l1_ls5_tarball_md_expected
elif which == "ls7":
return l1_ls7_tarball_md_expected
elif which == "ls8":
return l1_ls8_folder_md_expected
raise AssertionError
def expected_l1_ls8_folder(
l1_ls8_folder: Path,
offset: Callable[[Path, str], str] = relative_offset,
organisation="usgs.gov",
collection="1",
l1_collection="1",
lineage=None,
):
"""
:param collection: The collection of the current scene
:param l1_collection: The collection of the original landsat l1 scene
:return:
"""
org_code = organisation.split(".")[0]
product_name = f"{org_code}_ls8c_level1_{collection}"
if collection == "2":
processing_datetime = datetime(2020, 9, 7, 19, 30, 5)
cloud_cover = 93.28
points_model = 125
points_version = 5
rmse_model_x = 4.525
rmse_model_y = 5.917
software_version = "LPGS_15.3.1c"
uuid = "d9221c40-24c3-5356-ab22-4dcac2bf2d70"
quality_tag = "QA_PIXEL"
else:
processing_datetime = datetime(2017, 4, 5, 11, 17, 36)
cloud_cover = 93.22
points_model = 66
points_version = 4
rmse_model_x = 4.593
rmse_model_y = 5.817
software_version = "LPGS_2.7.0"
uuid = "a780754e-a884-58a7-9ac0-df518a67f59d"
quality_tag = "BQA"
processing_date = processing_datetime.strftime("%Y%m%d")
return {
"$schema": "https://schemas.opendatacube.org/dataset",
"id": uuid,
"label": f"{product_name}-0-{processing_date}_090084_2016-01-21",
"product": {
"name": product_name,
"href": f"https://collections.dea.ga.gov.au/product/{product_name}",
},
"properties": {
"datetime": datetime(2016, 1, 21, 23, 50, 23, 54435),
# The minor version comes from the processing date (as used in filenames to distinguish reprocesses).
"odc:dataset_version": f"{collection}.0.{processing_date}",
"odc:file_format": "GeoTIFF",
"odc:processing_datetime": processing_datetime,
"odc:producer": organisation,
"odc:product_family": "level1",
"odc:region_code": "090084",
"eo:cloud_cover": cloud_cover,
"eo:gsd": 15.0,
"eo:instrument": "OLI_TIRS",
"eo:platform": "landsat-8",
"eo:sun_azimuth": 74.007_443_8,
"eo:sun_elevation": 55.486_483,
"landsat:collection_category": "T1",
"landsat:collection_number": int(l1_collection),
"landsat:data_type": "L1TP",
"landsat:geometric_rmse_model_x": rmse_model_x,
"landsat:geometric_rmse_model_y": rmse_model_y,
"landsat:ground_control_points_model": points_model,
"landsat:ground_control_points_version": points_version,
"landsat:landsat_product_id": f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1",
"landsat:landsat_scene_id": "LC80900842016021LGN02",
"landsat:processing_software_version": software_version,
"landsat:station_id": "LGN",
"landsat:wrs_path": 90,
"landsat:wrs_row": 84,
},
"crs": "epsg:32655",
"geometry": {
"coordinates": [
[
[879307.5, -3776885.4340469087],
[879307.5, -3778240.713151076],
[839623.3108524992, -3938223.736900397],
[832105.7835592609, -3953107.5],
[831455.8296215904, -3953107.5],
[831453.7930575205, -3953115.0],
[819969.5411349908, -3953115.0],
[641985.0, -3906446.160824098],
[641985.0, -3889797.3351159613],
[685647.6920251067, -3717468.346156044],
[688909.3673333039, -3714585.0],
[708011.4230769231, -3714585.0],
[879315.0, -3761214.3020833335],
[879315.0, -3776857.8139976147],
[879307.5, -3776885.4340469087],
]
],
"type": "Polygon",
},
"grids": {
"default": {
"shape": (60, 60),
"transform": (
3955.5,
0.0,
641_985.0,
0.0,
-3975.500_000_000_000_5,
-3_714_585.0,
0.0,
0.0,
1.0,
),
},
"panchromatic": {
"shape": (60, 60),
"transform": (
3955.25,
0.0,
641_992.5,
0.0,
-3975.25,
-3_714_592.5,
0.0,
0.0,
1.0,
),
},
},
"measurements": {
"coastal_aerosol": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B1.TIF",
)
},
"blue": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B2.TIF",
)
},
"green": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B3.TIF",
)
},
"red": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B4.TIF",
)
},
"nir": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B5.TIF",
)
},
"swir_1": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B6.TIF",
)
},
"swir_2": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B7.TIF",
)
},
"panchromatic": {
"grid": "panchromatic",
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B8.TIF",
),
},
"cirrus": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B9.TIF",
)
},
"lwir_1": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B10.TIF",
)
},
"lwir_2": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B11.TIF",
)
},
"quality": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_{quality_tag}.TIF",
)
},
},
"accessories": {
"metadata:landsat_mtl": {
"path": f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_MTL.txt"
}
},
"lineage": lineage or {},
}
@pytest.fixture
def l1_ls7_tarball_md_expected(
l1_ls7_tarball, offset: Callable[[Path, str], str] = relative_offset
) -> Dict:
return {
"$schema": "https://schemas.opendatacube.org/dataset",
"id": "f23c5fa2-3321-5be9-9872-2be73fee12a6",
"label": "usgs_ls7e_level1_1-0-20161124_104078_2013-04-29",
"product": {
"name": "usgs_ls7e_level1_1",
"href": "https://collections.dea.ga.gov.au/product/usgs_ls7e_level1_1",
},
"crs": "epsg:32652",
"properties": {
"datetime": datetime(2013, 4, 29, 1, 10, 20, 336_104),
"odc:dataset_version": "1.0.20161124",
"odc:file_format": "GeoTIFF",
"odc:processing_datetime": datetime(2016, 11, 24, 8, 26, 33),
"odc:producer": "usgs.gov",
"odc:product_family": "level1",
"odc:region_code": "104078",
"eo:cloud_cover": 0.0,
"eo:gsd": 15.0,
"eo:instrument": "ETM",
"eo:platform": "landsat-7",
"eo:sun_azimuth": 40.562_981_98,
"eo:sun_elevation": 39.374_408_72,
"landsat:collection_category": "T1",
"landsat:collection_number": 1,
"landsat:data_type": "L1TP",
"landsat:ephemeris_type": "DEFINITIVE",
"landsat:geometric_rmse_model_x": 2.752,
"landsat:geometric_rmse_model_y": 3.115,
"landsat:ground_control_points_model": 179,
"landsat:ground_control_points_version": 4,
"landsat:landsat_product_id": "LE07_L1TP_104078_20130429_20161124_01_T1",
"landsat:landsat_scene_id": "LE71040782013119ASA00",
"landsat:processing_software_version": "LPGS_12.8.2",
"landsat:station_id": "ASA",
"landsat:scan_gap_interpolation": 2.0,
"landsat:wrs_path": 104,
"landsat:wrs_row": 78,
},
"geometry": {
"coordinates": [
[
[563899.8055973209, -2773901.091688032],
[565174.4110839436, -2768992.5],
[570160.9334151996, -2768992.5],
[570170.5, -2768985.0],
[570223.8384207273, -2768992.5],
[588295.85185022, -2768992.5],
[721491.3496511441, -2790262.4648538055],
[721949.2518894314, -2790326.8512143376],
[758758.6847331291, -2797433.428778118],
[770115.0, -2803848.537800015],
[770115.0, -2819232.438669062],
[745548.1744650088, -2936338.4604302375],
[730486.820264895, -2981715.0],
[707958.3856497289, -2981715.0],
[707665.2711912903, -2981666.327459585],
[695862.7971396025, -2981638.6536404933],
[593801.8189357058, -2963902.554008508],
[537007.2875722996, -2953328.054250119],
[536671.5165534337, -2953272.2984591112],
[526480.1507793682, -2945221.547092697],
[525364.2405528432, -2927837.1702428674],
[529047.5112406499, -2911836.1482165447],
[529451.9856980122, -2906561.9692719015],
[536583.4124976253, -2879098.363725102],
[545784.6687009194, -2839125.873061804],
[562106.6687009194, -2775306.873061804],
[563899.8055973209, -2773901.091688032],
]
],
"type": "Polygon",
},
"grids": {
"default": {
"shape": (60, 60),
"transform": (
4080.500_000_000_000_5,
0.0,
525_285.0,
0.0,
-3545.5,
-2_768_985.0,
0.0,
0.0,
1.0,
),
},
"panchromatic": {
"shape": [60, 60],
"transform": [
4080.25,
0.0,
525_292.5,
0.0,
-3545.25,
-2_768_992.5,
0.0,
0.0,
1.0,
],
},
},
"measurements": {
"blue": {
"path": offset(
l1_ls7_tarball, "LE07_L1TP_104078_20130429_20161124_01_T1_B1.TIF"
)
},
"green": {
"path": offset(
l1_ls7_tarball, "LE07_L1TP_104078_20130429_20161124_01_T1_B2.TIF"
)
},
"nir": {
"path": offset(
l1_ls7_tarball, "LE07_L1TP_104078_20130429_20161124_01_T1_B4.TIF"
)
},
"quality": {
"path": offset(
l1_ls7_tarball, "LE07_L1TP_104078_20130429_20161124_01_T1_BQA.TIF"
)
},
"red": {
"path": offset(
l1_ls7_tarball, "LE07_L1TP_104078_20130429_20161124_01_T1_B3.TIF"
)
},
"swir_1": {
"path": offset(
l1_ls7_tarball, "LE07_L1TP_104078_20130429_20161124_01_T1_B5.TIF"
)
},
"swir_2": {
"path": offset(
l1_ls7_tarball, "LE07_L1TP_104078_20130429_20161124_01_T1_B7.TIF"
)
},
"tir_1": {"path": "LE07_L1TP_104078_20130429_20161124_01_T1_B6_VCID_1.TIF"},
"tir_2": {"path": "LE07_L1TP_104078_20130429_20161124_01_T1_B6_VCID_2.TIF"},
"panchromatic": {
"path": offset(
l1_ls7_tarball, "LE07_L1TP_104078_20130429_20161124_01_T1_B8.TIF"
),
"grid": "panchromatic",
},
},
"accessories": {
"metadata:landsat_mtl": {
"path": "LE07_L1TP_104078_20130429_20161124_01_T1_MTL.txt"
}
},
"lineage": {},
}
@pytest.fixture
def l1_ls5_tarball_md_expected(
l1_ls5_tarball, offset: Callable[[Path, str], str] = relative_offset
) -> Dict:
return {
"$schema": "https://schemas.opendatacube.org/dataset",
"id": "b0d31709-dda4-5a67-9fdf-3ae026a99a72",
"label": "usgs_ls5t_level1_1-0-20161231_090085_1997-04-06",
"product": {
"name": "usgs_ls5t_level1_1",
"href": "https://collections.dea.ga.gov.au/product/usgs_ls5t_level1_1",
},
"crs": "epsg:32655",
"properties": {
"datetime": datetime(1997, 4, 6, 23, 17, 43, 102_000),
"odc:dataset_version": "1.0.20161231",
"odc:file_format": "GeoTIFF",
"odc:processing_datetime": datetime(2016, 12, 31, 15, 54, 58),
"odc:producer": "usgs.gov",
"odc:product_family": "level1",
"odc:region_code": "090085",
"eo:cloud_cover": 27.0,
"eo:gsd": 30.0,
"eo:instrument": "TM",
"eo:platform": "landsat-5",
"eo:sun_azimuth": 51.254_542_23,
"eo:sun_elevation": 31.987_632_19,
"landsat:collection_category": "T1",
"landsat:collection_number": 1,
"landsat:data_type": "L1TP",
"landsat:ephemeris_type": "PREDICTIVE",
"landsat:geometric_rmse_model_x": 3.036,
"landsat:geometric_rmse_model_y": 3.025,
"landsat:geometric_rmse_verify": 0.163,
"landsat:ground_control_points_model": 161,
"landsat:ground_control_points_verify": 1679,
"landsat:ground_control_points_version": 4,
"landsat:landsat_product_id": "LT05_L1TP_090085_19970406_20161231_01_T1",
"landsat:landsat_scene_id": "LT50900851997096ASA00",
"landsat:processing_software_version": "LPGS_12.8.2",
"landsat:station_id": "ASA",
"landsat:wrs_path": 90,
"landsat:wrs_row": 85,
},
"geometry": {
"coordinates": [
[
[636860.78, -3881685.0],
[593385.0, -4045573.25],
[593385.0, -4062038.8525309917],
[636850.5348070407, -4075317.3559092814],
[768589.1325495897, -4101015.0],
[790215.8408397632, -4101015.0],
[795289.3607718372, -4094590.58897732],
[835815.0, -3937552.555313688],
[835815.0, -3920661.1474690083],
[792349.4651929593, -3907382.6440907186],
[657073.5519714399, -3881685.0],
[636860.78, -3881685.0],
]
],
"type": "Polygon",
},
"grids": {
"default": {
"shape": (60, 60),
"transform": (
4040.5,
0.0,
593_385.0,
0.0,
-3655.5,
-3_881_685.0,
0.0,
0.0,
1.0,
),
}
},
"measurements": {
"blue": {
"path": offset(
l1_ls5_tarball, "LT05_L1TP_090085_19970406_20161231_01_T1_B1.TIF"
)
},
"green": {
"path": offset(
l1_ls5_tarball, "LT05_L1TP_090085_19970406_20161231_01_T1_B2.TIF"
)
},
"red": {
"path": offset(
l1_ls5_tarball, "LT05_L1TP_090085_19970406_20161231_01_T1_B3.TIF"
)
},
"nir": {
"path": offset(
l1_ls5_tarball, "LT05_L1TP_090085_19970406_20161231_01_T1_B4.TIF"
)
},
"swir_1": {
"path": offset(
l1_ls5_tarball, "LT05_L1TP_090085_19970406_20161231_01_T1_B5.TIF"
)
},
"swir_2": {
"path": offset(
l1_ls5_tarball, "LT05_L1TP_090085_19970406_20161231_01_T1_B7.TIF"
)
},
"tir": {
"path": offset(
l1_ls5_tarball, "LT05_L1TP_090085_19970406_20161231_01_T1_B6.TIF"
)
},
"quality": {
"path": offset(
l1_ls5_tarball, "LT05_L1TP_090085_19970406_20161231_01_T1_BQA.TIF"
)
},
},
"accessories": {
"metadata:landsat_mtl": {
"path": "LT05_L1TP_090085_19970406_20161231_01_T1_MTL.txt"
}
},
"lineage": {},
}
@pytest.fixture
def input_uint8_tif() -> Path:
return Path(WOFS_PATH / "ga_ls_wofs_3_099081_2020-07-26_interim_water_clipped.tif")
@pytest.fixture
def input_uint8_tif_2() -> Path:
return Path(WOFS_PATH / "ga_ls_wofs_3_090081_1993_01_05_interim_water_clipped.tif")
```
#### File: integration/prepare/test_prepare_landsat_l1.py
```python
import shutil
from datetime import datetime
from pathlib import Path
from typing import Callable, Dict
import pytest
from eodatasets3.prepare import landsat_l1_prepare
from tests.common import check_prepare_outputs, run_prepare_cli
LC08_L2_C2_POST_20210507_INPUT_PATH: Path = (
Path(__file__).parent.parent / "data" / "LC08_L2SP_098084_20210503_20210508_02_T1"
)
LT05_L2_C2_INPUT_PATH: Path = (
Path(__file__).parent.parent / "data" / "LT05_L2SP_090084_19980308_20200909_02_T1"
)
LE07_L2_C2_INPUT_PATH: Path = (
Path(__file__).parent.parent / "data" / "LE07_L2SP_090084_20210331_20210426_02_T1"
)
@pytest.fixture
def lc08_l2_c2_post_20210507_folder(tmp_path: Path) -> Path:
return _make_copy(LC08_L2_C2_POST_20210507_INPUT_PATH, tmp_path)
@pytest.fixture
def lt05_l2_c2_folder(tmp_path: Path) -> Path:
return _make_copy(LT05_L2_C2_INPUT_PATH, tmp_path)
@pytest.fixture
def le07_l2_c2_folder(tmp_path: Path) -> Path:
return _make_copy(LE07_L2_C2_INPUT_PATH, tmp_path)
def relative_offset(base, offset):
return offset
def _make_copy(input_path, tmp_path):
our_input = tmp_path / input_path.name
if input_path.is_file():
shutil.copy(input_path, our_input)
else:
shutil.copytree(input_path, our_input)
return our_input
def test_prepare_l5_l1_usgs_tarball(
tmp_path: Path, l1_ls5_tarball_md_expected: Dict, l1_ls5_tarball: Path
):
assert l1_ls5_tarball.exists(), "Test data missing(?)"
output_path: Path = tmp_path / "out"
output_path.mkdir()
# When specifying an output base path it will create path/row subfolders within it.
expected_metadata_path = (
output_path
/ "090"
/ "085"
/ "LT05_L1TP_090085_19970406_20161231_01_T1.odc-metadata.yaml"
)
check_prepare_outputs(
invoke_script=landsat_l1_prepare.main,
run_args=["--output-base", str(output_path), str(l1_ls5_tarball)],
expected_doc=l1_ls5_tarball_md_expected,
expected_metadata_path=expected_metadata_path,
)
def test_prepare_l8_l1_usgs_tarball(l1_ls8_folder, l1_ls8_folder_md_expected):
assert l1_ls8_folder.exists(), "Test data missing(?)"
# No output path defined,so it will default to being a sibling to the input.
expected_metadata_path = (
l1_ls8_folder.parent
/ "LC08_L1TP_090084_20160121_20170405_01_T1.odc-metadata.yaml"
)
assert not expected_metadata_path.exists()
check_prepare_outputs(
invoke_script=landsat_l1_prepare.main,
run_args=[str(l1_ls8_folder)],
expected_doc=l1_ls8_folder_md_expected,
expected_metadata_path=expected_metadata_path,
)
def test_prepare_l8_l1_c2(
tmp_path: Path, l1_c2_ls8_folder: Path, l1_c2_ls8_usgs_expected: Dict
):
"""Run prepare script with a source telemetry data and unique producer."""
assert l1_c2_ls8_folder.exists(), "Test data missing(?)"
output_path = tmp_path
expected_metadata_path = (
output_path
/ "090"
/ "084"
/ "LC08_L1TP_090084_20160121_20200907_02_T1.odc-metadata.yaml"
)
check_prepare_outputs(
invoke_script=landsat_l1_prepare.main,
run_args=[
"--output-base",
output_path,
"--producer",
"usgs.gov",
l1_c2_ls8_folder,
],
expected_doc=l1_c2_ls8_usgs_expected,
expected_metadata_path=expected_metadata_path,
)
def test_prepare_lc08_l2_c2_post_20210507(
tmp_path: Path,
lc08_l2_c2_post_20210507_folder: Path,
):
"""Support a functionality baseline for the enhancements to expand landsat
prepare (YAML) logic to support USGS level 2 - PR#159:
LC08 C2 L2 post 7th May 2021."""
assert lc08_l2_c2_post_20210507_folder.exists(), "Test data missing(?)"
output_path = tmp_path
expected_metadata_path = (
output_path
/ "098"
/ "084"
/ "LC08_L2SP_098084_20210503_20210508_02_T1.odc-metadata.yaml"
)
check_prepare_outputs(
invoke_script=landsat_l1_prepare.main,
run_args=[
"--output-base",
output_path,
"--producer",
"usgs.gov",
lc08_l2_c2_post_20210507_folder,
],
expected_doc=expected_lc08_l2_c2_post_20210507_folder(),
expected_metadata_path=expected_metadata_path,
)
def test_prepare_lt05_l2_c2(
tmp_path: Path,
lt05_l2_c2_folder: Path,
):
"""Support a functionality baseline for the enhancements to expand landsat
prepare (YAML) logic to support USGS level 2 - PR#159:
LT05 C2 L2."""
assert lt05_l2_c2_folder.exists(), "Test data missing(?)"
output_path = tmp_path
expected_metadata_path = (
output_path
/ "090"
/ "084"
/ "LT05_L2SP_090084_19980308_20200909_02_T1.odc-metadata.yaml"
)
check_prepare_outputs(
invoke_script=landsat_l1_prepare.main,
run_args=[
"--output-base",
output_path,
"--producer",
"usgs.gov",
lt05_l2_c2_folder,
],
expected_doc=expected_lt05_l2_c2_folder(),
expected_metadata_path=expected_metadata_path,
)
def test_prepare_le07_l2_c2(
tmp_path: Path,
le07_l2_c2_folder: Path,
):
"""Support a functionality baseline for the enhancements to expand landsat
prepare (YAML) logic to support USGS level 2 - PR#159:
LE07 C2 L2."""
assert le07_l2_c2_folder.exists(), "Test data missing(?)"
output_path = tmp_path
expected_metadata_path = (
output_path
/ "090"
/ "084"
/ "LE07_L2SP_090084_20210331_20210426_02_T1.odc-metadata.yaml"
)
check_prepare_outputs(
invoke_script=landsat_l1_prepare.main,
run_args=[
"--output-base",
output_path,
"--producer",
"usgs.gov",
le07_l2_c2_folder,
],
expected_doc=expected_le07_l2_c2_folder(),
expected_metadata_path=expected_metadata_path,
)
def test_prepare_l8_l1_tarball_with_source(
tmp_path: Path, l1_ls8_folder: Path, ls8_telemetry_path, l1_ls8_ga_expected: Dict
):
"""Run prepare script with a source telemetry data and unique producer."""
assert l1_ls8_folder.exists(), "Test data missing(?)"
output_path = tmp_path
expected_metadata_path = (
output_path
/ "090"
/ "084"
/ "LC08_L1TP_090084_20160121_20170405_01_T1.odc-metadata.yaml"
)
check_prepare_outputs(
invoke_script=landsat_l1_prepare.main,
run_args=[
"--output-base",
output_path,
"--producer",
"ga.gov.au",
"--source",
ls8_telemetry_path,
l1_ls8_folder,
],
expected_doc=l1_ls8_ga_expected,
expected_metadata_path=expected_metadata_path,
)
def test_prepare_l7_l1_usgs_tarball(
l1_ls7_tarball: Path, l1_ls7_tarball_md_expected: Dict
):
assert l1_ls7_tarball.exists(), "Test data missing(?)"
expected_metadata_path = (
l1_ls7_tarball.parent
/ "LE07_L1TP_104078_20130429_20161124_01_T1.odc-metadata.yaml"
)
check_prepare_outputs(
invoke_script=landsat_l1_prepare.main,
run_args=[str(l1_ls7_tarball)],
expected_doc=l1_ls7_tarball_md_expected,
expected_metadata_path=expected_metadata_path,
)
def test_skips_old_datasets(l1_ls7_tarball):
"""Prepare should skip datasets older than the given date"""
expected_metadata_path = (
l1_ls7_tarball.parent
/ "LE07_L1TP_104078_20130429_20161124_01_T1.odc-metadata.yaml"
)
run_prepare_cli(
landsat_l1_prepare.main,
# Can't be newer than right now.
"--newer-than",
datetime.now().isoformat(),
str(l1_ls7_tarball),
)
assert (
not expected_metadata_path.exists()
), "Dataset should have been skipped due to age"
# It should work with an old date.
run_prepare_cli(
landsat_l1_prepare.main,
# Some old date, from before the test data was created.
"--newer-than",
"2014-05-04",
str(l1_ls7_tarball),
)
assert (
expected_metadata_path.exists()
), "Dataset should have been packaged when using an ancient date cutoff"
def expected_lc08_l2_c2_post_20210507_folder(
l2_c2_ls8_folder: Path = None,
offset: Callable[[Path, str], str] = relative_offset,
organisation="usgs.gov",
collection="2",
leveln_collection="2",
lineage=None,
):
""" """
org_code = organisation.split(".")[0]
product_name = f"{org_code}_ls8c_level{leveln_collection}_{collection}"
processing_datetime = datetime(2021, 5, 8, 11, 5, 47)
cloud_cover = 72.57
points_model = 35
points_version = 5
rmse_model_x = 5.974
rmse_model_y = 4.276
software_version = "LPGS_15.4.0"
uuid = "d4ff459f-67e0-57df-af73-dfef14ad9c47"
quality_tag = "QA_PIXEL"
processing_date = processing_datetime.strftime("%Y%m%d")
return {
"$schema": "https://schemas.opendatacube.org/dataset",
"id": uuid,
"label": f"{product_name}-0-{processing_date}_098084_2021-05-03",
"product": {
"name": product_name,
"href": f"https://collections.dea.ga.gov.au/product/{product_name}",
},
"properties": {
"datetime": datetime(2021, 5, 3, 0, 39, 15, 718295),
# The minor version comes from the processing date,
# as used in filenames to distinguish reprocesses.
"odc:dataset_version": f"{collection}.0.{processing_date}",
"odc:file_format": "GeoTIFF",
"odc:processing_datetime": processing_datetime,
"odc:producer": organisation,
"odc:product_family": "level2",
"odc:region_code": "098084",
"eo:cloud_cover": cloud_cover,
"eo:gsd": 30.0,
"eo:instrument": "OLI_TIRS",
"eo:platform": "landsat-8",
"eo:sun_azimuth": 36.555_149_01,
"eo:sun_elevation": 31.263_730_68,
"landsat:algorithm_source_surface_reflectance": "LaSRC_1.5.0",
"landsat:collection_category": "T1",
"landsat:collection_number": int(leveln_collection),
"landsat:data_type": "L2SP",
"landsat:geometric_rmse_model_x": rmse_model_x,
"landsat:geometric_rmse_model_y": rmse_model_y,
"landsat:ground_control_points_model": points_model,
"landsat:ground_control_points_version": points_version,
"landsat:landsat_product_id": f"LC08_L2SP_098084_20210503_{processing_date}_0{leveln_collection}_T1",
"landsat:landsat_scene_id": "LC80980842021123LGN00",
"landsat:processing_software_version": software_version,
"landsat:station_id": "LGN",
"landsat:wrs_path": 98,
"landsat:wrs_row": 84,
},
"crs": "epsg:32653",
"geometry": {
"coordinates": [
[
[656390.7366486033, -3713985.0],
[653137.3072645832, -3716864.7197616836],
[609585.0, -3888976.9699604893],
[609585.0, -3905841.19690461],
[691402.3702754473, -3932222.5947522246],
[777854.1421397077, -3952215.0],
[796350.2575455576, -3952215.0],
[818563.2799541968, -3885739.54955405],
[846315.0, -3761631.0],
[823680.1297245529, -3749859.4052477754],
[677099.698619789, -3713985.0],
[656390.7366486033, -3713985.0],
],
],
"type": "Polygon",
},
"grids": {
"default": {
"shape": (60, 60),
"transform": (
3945.5000000000005,
0.0,
609585.0,
0.0,
-3970.5,
-3713985.0,
0.0,
0.0,
1.0,
),
},
},
"measurements": {
"blue": {
"path": offset(
l2_c2_ls8_folder,
f"LC08_L2SP_098084_20210503_{processing_date}_0{leveln_collection}_T1_SR_B2.TIF",
)
},
"coastal_aerosol": {
"path": offset(
l2_c2_ls8_folder,
f"LC08_L2SP_098084_20210503_{processing_date}_0{leveln_collection}_T1_SR_B1.TIF",
)
},
"green": {
"path": offset(
l2_c2_ls8_folder,
f"LC08_L2SP_098084_20210503_{processing_date}_0{leveln_collection}_T1_SR_B3.TIF",
)
},
"lwir": {
"path": offset(
l2_c2_ls8_folder,
f"LC08_L2SP_098084_20210503_{processing_date}_0{leveln_collection}_T1_ST_B10.TIF",
)
},
"nir": {
"path": offset(
l2_c2_ls8_folder,
f"LC08_L2SP_098084_20210503_{processing_date}_0{leveln_collection}_T1_SR_B5.TIF",
)
},
"qa_aerosol": {
"path": offset(
l2_c2_ls8_folder,
f"LC08_L2SP_098084_20210503_{processing_date}_0{leveln_collection}_T1_SR_QA_AEROSOL.TIF",
)
},
"quality": {
"path": offset(
l2_c2_ls8_folder,
f"LC08_L2SP_098084_20210503_{processing_date}_0{leveln_collection}_T1_{quality_tag}.TIF",
)
},
"red": {
"path": offset(
l2_c2_ls8_folder,
f"LC08_L2SP_098084_20210503_{processing_date}_0{leveln_collection}_T1_SR_B4.TIF",
)
},
"swir_1": {
"path": offset(
l2_c2_ls8_folder,
f"LC08_L2SP_098084_20210503_{processing_date}_0{leveln_collection}_T1_SR_B6.TIF",
)
},
"swir_2": {
"path": offset(
l2_c2_ls8_folder,
f"LC08_L2SP_098084_20210503_{processing_date}_0{leveln_collection}_T1_SR_B7.TIF",
)
},
},
"accessories": {
"metadata:landsat_mtl": {
"path": f"LC08_L2SP_098084_20210503_{processing_date}_0{leveln_collection}_T1_MTL.txt"
}
},
"lineage": lineage or {},
}
def expected_lt05_l2_c2_folder(
l2_c2_ls8_folder: Path = None,
offset: Callable[[Path, str], str] = relative_offset,
organisation="usgs.gov",
collection="2",
leveln_collection="2",
lineage=None,
):
""" """
org_code = organisation.split(".")[0]
product_name = f"{org_code}_ls5t_level{leveln_collection}_{collection}"
processing_datetime = datetime(2020, 9, 9, 10, 36, 59)
cloud_cover = 12.0
points_model = 965
points_version = 5
rmse_model_x = 3.085
rmse_model_y = 2.977
software_version = "LPGS_15.3.1c"
uuid = "b08b3f9e-b00c-5a67-88d8-a889f0e79d00"
quality_tag = "QA_PIXEL"
processing_date = processing_datetime.strftime("%Y%m%d")
return {
"$schema": "https://schemas.opendatacube.org/dataset",
"id": uuid,
"label": f"{product_name}-0-{processing_date}_090084_1998-03-08",
"product": {
"name": product_name,
"href": f"https://collections.dea.ga.gov.au/product/{product_name}",
},
"properties": {
"datetime": datetime(1998, 3, 8, 23, 26, 47, 294081),
# The minor version comes from the processing date,
# as used in filenames to distinguish reprocesses.
"odc:dataset_version": f"{collection}.0.{processing_date}",
"odc:file_format": "GeoTIFF",
"odc:processing_datetime": processing_datetime,
"odc:producer": organisation,
"odc:product_family": "level2",
"odc:region_code": "090084",
"eo:cloud_cover": cloud_cover,
"eo:gsd": 30.0,
"eo:instrument": "TM",
"eo:platform": "landsat-5",
"eo:sun_azimuth": 61.298_799_16,
"eo:sun_elevation": 41.583_263_99,
"landsat:algorithm_source_surface_reflectance": "LEDAPS_3.4.0",
"landsat:collection_category": "T1",
"landsat:collection_number": int(leveln_collection),
"landsat:data_type": "L2SP",
"landsat:geometric_rmse_model_x": rmse_model_x,
"landsat:geometric_rmse_model_y": rmse_model_y,
"landsat:ground_control_points_model": points_model,
"landsat:ground_control_points_version": points_version,
"landsat:landsat_product_id": f"LT05_L2SP_090084_19980308_{processing_date}_0{leveln_collection}_T1",
"landsat:landsat_scene_id": "LT50900841998067ASA00",
"landsat:processing_software_version": software_version,
"landsat:station_id": "ASA",
"landsat:wrs_path": 90,
"landsat:wrs_row": 84,
},
"crs": "epsg:32655",
"geometry": {
"coordinates": [
[
[686022.6472422444, -3724785.0],
[682595.9899406636, -3727553.91102268],
[638085.0, -3907560.0],
[790642.5733366499, -3940388.5126599884],
[830607.0294154783, -3943044.3288386273],
[880215.0, -3761340.0],
[707789.0909090909, -3724785.0],
[686022.6472422444, -3724785.0],
],
],
"type": "Polygon",
},
"grids": {
"default": {
"shape": (60, 60),
"transform": (
4035.5000000000005,
0.0,
638085.0,
0.0,
-3655.5,
-3724785.0,
0.0,
0.0,
1.0,
),
},
},
"measurements": {
"atmos_opacity": {
"path": offset(
l2_c2_ls8_folder,
f"LT05_L2SP_090084_19980308_{processing_date}_0{leveln_collection}_T1_SR_ATMOS_OPACITY.TIF",
)
},
"blue": {
"path": offset(
l2_c2_ls8_folder,
f"LT05_L2SP_090084_19980308_{processing_date}_0{leveln_collection}_T1_SR_B1.TIF",
)
},
"green": {
"path": offset(
l2_c2_ls8_folder,
f"LT05_L2SP_090084_19980308_{processing_date}_0{leveln_collection}_T1_SR_B2.TIF",
)
},
"lwir": {
"path": offset(
l2_c2_ls8_folder,
f"LT05_L2SP_090084_19980308_{processing_date}_0{leveln_collection}_T1_ST_B6.TIF",
)
},
"nir": {
"path": offset(
l2_c2_ls8_folder,
f"LT05_L2SP_090084_19980308_{processing_date}_0{leveln_collection}_T1_SR_B4.TIF",
)
},
"qa_cloud": {
"path": offset(
l2_c2_ls8_folder,
f"LT05_L2SP_090084_19980308_{processing_date}_0{leveln_collection}_T1_SR_CLOUD_QA.TIF",
)
},
"quality": {
"path": offset(
l2_c2_ls8_folder,
f"LT05_L2SP_090084_19980308_{processing_date}_0{leveln_collection}_T1_{quality_tag}.TIF",
)
},
"red": {
"path": offset(
l2_c2_ls8_folder,
f"LT05_L2SP_090084_19980308_{processing_date}_0{leveln_collection}_T1_SR_B3.TIF",
)
},
"swir_1": {
"path": offset(
l2_c2_ls8_folder,
f"LT05_L2SP_090084_19980308_{processing_date}_0{leveln_collection}_T1_SR_B5.TIF",
)
},
"swir_2": {
"path": offset(
l2_c2_ls8_folder,
f"LT05_L2SP_090084_19980308_{processing_date}_0{leveln_collection}_T1_SR_B7.TIF",
)
},
},
"accessories": {
"metadata:landsat_mtl": {
"path": f"LT05_L2SP_090084_19980308_{processing_date}_0{leveln_collection}_T1_MTL.txt"
}
},
"lineage": lineage or {},
}
def expected_le07_l2_c2_folder(
l2_c2_ls8_folder: Path = None,
offset: Callable[[Path, str], str] = relative_offset,
organisation="usgs.gov",
collection="2",
leveln_collection="2",
lineage=None,
):
""" """
org_code = organisation.split(".")[0]
product_name = f"{org_code}_ls7e_level{leveln_collection}_{collection}"
processing_datetime = datetime(2021, 4, 26, 10, 52, 29)
cloud_cover = 6.0
points_model = 1240
points_version = 5
rmse_model_x = 3.08
rmse_model_y = 3.663
software_version = "LPGS_15.4.0"
uuid = "2184a390-3bfc-5393-91fa-e9dae7c3fe39"
quality_tag = "QA_PIXEL"
processing_date = processing_datetime.strftime("%Y%m%d")
return {
"$schema": "https://schemas.opendatacube.org/dataset",
"id": uuid,
"label": f"{product_name}-0-{processing_date}_090084_2021-03-31",
"product": {
"name": product_name,
"href": f"https://collections.dea.ga.gov.au/product/{product_name}",
},
"properties": {
"datetime": datetime(2021, 3, 31, 23, 1, 59, 738020),
# The minor version comes from the processing date,
# as used in filenames to distinguish reprocesses.
"odc:dataset_version": f"{collection}.0.{processing_date}",
"odc:file_format": "GeoTIFF",
"odc:processing_datetime": processing_datetime,
"odc:producer": organisation,
"odc:product_family": "level2",
"odc:region_code": "090084",
"eo:cloud_cover": cloud_cover,
"eo:gsd": 30.0,
"eo:instrument": "ETM",
"eo:platform": "landsat-7",
"eo:sun_azimuth": 57.028_331_7,
"eo:sun_elevation": 31.970_873_9,
"landsat:algorithm_source_surface_reflectance": "LEDAPS_3.4.0",
"landsat:collection_category": "T1",
"landsat:collection_number": int(leveln_collection),
"landsat:data_type": "L2SP",
"landsat:geometric_rmse_model_x": rmse_model_x,
"landsat:geometric_rmse_model_y": rmse_model_y,
"landsat:ground_control_points_model": points_model,
"landsat:ground_control_points_version": points_version,
"landsat:landsat_product_id": f"LE07_L2SP_090084_20210331_{processing_date}_0{leveln_collection}_T1",
"landsat:landsat_scene_id": "LE70900842021090ASA00",
"landsat:processing_software_version": software_version,
"landsat:station_id": "ASA",
"landsat:wrs_path": 90,
"landsat:wrs_row": 84,
},
"crs": "epsg:32655",
"geometry": {
"coordinates": [
[
[691984.3611711152, -3725685.0],
[681093.954439248, -3750395.5802668664],
[643185.0, -3904314.5],
[671569.7291070349, -3915142.15448428],
[825623.838894661, -3944415.0],
[846655.2670137166, -3944415.0],
[857285.0835718605, -3923426.16362107],
[895215.0, -3765785.5],
[870923.2163880672, -3754935.8100720993],
[713242.5838228005, -3725685.0],
[691984.3611711152, -3725685.0],
],
],
"type": "Polygon",
},
"grids": {
"default": {
"shape": (60, 60),
"transform": (
4200.5,
0.0,
643185.0,
0.0,
-3645.5,
-3725685.0,
0.0,
0.0,
1.0,
),
},
},
"measurements": {
"atmos_opacity": {
"path": offset(
l2_c2_ls8_folder,
f"LE07_L2SP_090084_20210331_{processing_date}_0{leveln_collection}_T1_SR_ATMOS_OPACITY.TIF",
)
},
"blue": {
"path": offset(
l2_c2_ls8_folder,
f"LE07_L2SP_090084_20210331_{processing_date}_0{leveln_collection}_T1_SR_B1.TIF",
)
},
"green": {
"path": offset(
l2_c2_ls8_folder,
f"LE07_L2SP_090084_20210331_{processing_date}_0{leveln_collection}_T1_SR_B2.TIF",
)
},
"lwir": {
"path": offset(
l2_c2_ls8_folder,
f"LE07_L2SP_090084_20210331_{processing_date}_0{leveln_collection}_T1_ST_B6.TIF",
)
},
"nir": {
"path": offset(
l2_c2_ls8_folder,
f"LE07_L2SP_090084_20210331_{processing_date}_0{leveln_collection}_T1_SR_B4.TIF",
)
},
"qa_cloud": {
"path": offset(
l2_c2_ls8_folder,
f"LE07_L2SP_090084_20210331_{processing_date}_0{leveln_collection}_T1_SR_CLOUD_QA.TIF",
)
},
"quality": {
"path": offset(
l2_c2_ls8_folder,
f"LE07_L2SP_090084_20210331_{processing_date}_0{leveln_collection}_T1_{quality_tag}.TIF",
)
},
"red": {
"path": offset(
l2_c2_ls8_folder,
f"LE07_L2SP_090084_20210331_{processing_date}_0{leveln_collection}_T1_SR_B3.TIF",
)
},
"swir_1": {
"path": offset(
l2_c2_ls8_folder,
f"LE07_L2SP_090084_20210331_{processing_date}_0{leveln_collection}_T1_SR_B5.TIF",
)
},
"swir_2": {
"path": offset(
l2_c2_ls8_folder,
f"LE07_L2SP_090084_20210331_{processing_date}_0{leveln_collection}_T1_SR_B7.TIF",
)
},
},
"accessories": {
"metadata:landsat_mtl": {
"path": f"LE07_L2SP_090084_20210331_{processing_date}_0{leveln_collection}_T1_MTL.txt"
}
},
"lineage": lineage or {},
}
```
|
{
"source": "jeremyherbert/barbutils",
"score": 3
}
|
#### File: jeremyherbert/barbutils/barbutils.py
```python
import struct
import numpy as np
from typing import Union
def compute_barb_checksum(header: bytes) -> bytes:
tmp = sum(struct.unpack("b" * 56, header[:56]))
return struct.pack("<i", tmp)
def load_barb(payload: bytes):
if len(payload) < 61:
raise ValueError("payload is too short")
header = payload[:56]
checksum = payload[56:60]
waveform_data = payload[60:]
if compute_barb_checksum(header) != checksum:
raise ValueError("incorrect checksum")
sample_rate = struct.unpack("<d", header[0x08:0x08+8])[0]
maximum_voltage = struct.unpack("<f", header[0x14:0x14+4])[0]
minimum_voltage = struct.unpack("<f", header[0x18:0x18+4])[0]
scale_voltage = np.max([np.abs(maximum_voltage), np.abs(minimum_voltage)])
normalised_waveform_data = scale_voltage * np.array([x[0] for x in struct.iter_unpack("<h", waveform_data)]) / 2**15
return sample_rate, normalised_waveform_data
def generate_barb(data: Union[np.array, list], sample_rate: float) -> bytes:
if sample_rate < 10 or sample_rate > 1e9:
raise ValueError("sample_rate must be between 10Hz and 1GHz")
if np.max(data) > 10 or np.min(data) < -10:
raise ValueError("all elements of data must be between -10 and 10")
if len(data) == 0:
raise ValueError("data is empty")
header = b"\x01\x00\x00\x00"
header += b"\x04\x00\x00\x00"
header += struct.pack("<d", sample_rate)
header += b"\xCD\xCC\x8C\x3F"
header += struct.pack("<ff", np.max(data), np.min(data))
header += b"\x00\x00\x00\x00"
header += b"\x16\x00\x00\x00"
header += b"\x00\x00\x00\x00"
header += b"\x00\x00\x00\x00"
header += b"\x00\x00\x00\x00"
header += struct.pack("<I", len(data))
header += b"\x00\x00\x00\x00"
header += compute_barb_checksum(header)
tmp_data = np.array(data, dtype=np.float64)
tmp_data /= np.max(np.abs(tmp_data)*2)
normalised_data = tmp_data
scaled_data = np.array(normalised_data * (2**16 - 1), dtype=np.int16)
payload = b""
for i in scaled_data:
payload += struct.pack("<h", i)
return header + payload
```
|
{
"source": "jeremyherbert/jeremyherbert",
"score": 2
}
|
#### File: jeremyherbert/jeremyherbert/fusion360_python_debug_addin.py
```python
import adsk.core
import adsk.fusion
import importlib
import traceback
import os
import sys
import tempfile
import threading
from http.server import HTTPServer, BaseHTTPRequestHandler
import json
# add pydevd to path
this_script_dir = os.path.dirname(os.path.realpath(__file__))
if sys.platform == "darwin":
# mac
sys.path.append(os.path.join(this_script_dir, "pydevd_lib.macosx-10.9-x86_64-3.7"))
sys.path.append(os.path.join(this_script_dir, "pydevd_lib.macosx-10.9-x86_64-3.7", "pydevd_attach_to_process"))
else:
# windows
pass
# run log path
script_run_log_path = os.path.join(tempfile.gettempdir(), "fusion360_python_debug_addin_log.txt")
# name of asynchronous even which will be used to launch a script inside fusion 360
custom_event_name = 'fusion360_python_debug_addin_run_script'
class ThreadEventHandler(adsk.core.CustomEventHandler):
"""
An event handler which can run a python script in the main thread of fusion 360.
"""
def notify(self, args):
try:
args = json.loads(args.additionalInfo)
script_path = os.path.abspath(args['script'])
detach = args['detach']
if os.path.isfile(script_path):
script_name = os.path.splitext(os.path.basename(script_path))[0]
script_dir = os.path.dirname(script_path)
sys.path.append(script_dir)
try:
import attach_script
attach_script.attach(args['debug_port'], 'localhost')
module = importlib.import_module(script_name)
importlib.reload(module)
module.run({'isApplicationStartup': False})
finally:
del sys.path[-1]
if detach:
try:
import pydevd
pydevd.stoptrace()
except:
pass
except:
with open(script_run_log_path, 'w') as f:
f.write(traceback.format_exc())
class FusionInjectorHTTPRequestHandler(BaseHTTPRequestHandler):
"""
A HTTP request handler which queues an event in the main thread of fusion 360 (event is to run a script)
"""
def do_POST(self):
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
try:
request_data = json.loads(body.decode())
assert "script" in request_data
assert "detach" in request_data
assert "debug_port" in request_data
adsk.core.Application.get().fireCustomEvent(custom_event_name, json.dumps(request_data))
self.send_response(200)
self.end_headers()
self.wfile.write(b"done")
except:
self.send_response(500)
self.end_headers()
self.wfile.write(traceback.format_exc().encode())
def run_server():
server_address = ('localhost', 8181)
httpd = HTTPServer(server_address, FusionInjectorHTTPRequestHandler)
httpd.serve_forever()
handlers = []
def run(context):
try:
app = adsk.core.Application.get()
ui = app.userInterface
if sys.platform != "darwin":
raise NotImplementedError("Windows support not implemented")
try:
app.unregisterCustomEvent(custom_event_name)
except:
pass
# set up fusion 360 callback
custom_event = app.registerCustomEvent(custom_event_name)
event_handler = ThreadEventHandler()
custom_event.add(event_handler)
handlers.append(event_handler) # prevent instance from being garbage collected
http_server_thread = threading.Thread(target=run_server, daemon=True)
http_server_thread.start()
ui.messageBox('addin started')
except:
if ui:
ui.messageBox(('AddIn Start Failed: {}').format(traceback.format_exc()))
def stop(context):
ui = None
try:
app = adsk.core.Application.get()
ui = app.userInterface
except:
if ui:
ui.messageBox(('AddIn Stop Failed: {}').format(traceback.format_exc()))
```
|
{
"source": "jeremyherbert/polyglot-turtle-xiao",
"score": 3
}
|
#### File: polyglot-turtle-xiao/misc/baud_calculator.py
```python
def calculate_register_from_frequency_arith(refclk, oversampling, baud_freq):
assert oversampling in [16, 8, 3]
return 65536 * (1 - oversampling * (baud_freq/refclk))
def calculate_frequency_from_register_arith(refclk, oversampling, register):
return (refclk/oversampling) * (1 - (register/65536))
def calculate_error_at_frequency_arith(refclk, oversampling, baud_freq):
register = calculate_register_from_frequency_arith(refclk, oversampling, baud_freq)
actual_frequency = calculate_frequency_from_register_arith(refclk, oversampling, int(register))
return 1 - (baud_freq / actual_frequency)
def calculate_register_from_frequency_frac(refclk, oversampling, fp, baud_freq):
assert oversampling in [16, 8, 3]
assert 0 <= fp <= 7
return (refclk / (oversampling * baud_freq)) - fp/8
def calculate_frequency_from_register_frac(refclk, oversampling, fp, register):
assert oversampling in [16, 8, 3]
assert 0 <= fp <= 7
return refclk / (oversampling * register + (fp / 8))
def calculate_error_at_frequency_frac(refclk, oversampling, fp, baud_freq):
register = calculate_register_from_frequency_frac(refclk, oversampling, fp, baud_freq)
actual_frequency = calculate_frequency_from_register_frac(refclk, oversampling, fp, int(register))
return 1 - (baud_freq / actual_frequency)
if __name__ == "__main__":
for freq in [1200, 2400, 4800, 9600, 19200, 384000, 57600, 115200, 128000, 230400, 256000, 460800, 500000, 675000, 1000000]:
refclk = 48e6
fp = 0
if 1200 <= freq <= 10000:
oversampling = 16
elif 10000 < freq <= 500000:
oversampling = 8
elif 500000 < freq:
oversampling = 3
else:
raise ValueError
print("baud:", freq)
print("\toversampling:", oversampling)
print("\tarith",
"; register:", int(calculate_register_from_frequency_arith(refclk, oversampling, freq)),
"; error:", calculate_error_at_frequency_arith(refclk, oversampling, freq) * 100, "%")
if oversampling >= 8:
for fp in range(8):
print("\tfp:", fp,
"; register:", int(calculate_register_from_frequency_frac(refclk, oversampling, fp, freq)),
"; error:", calculate_error_at_frequency_frac(refclk, oversampling, fp, freq) * 100, "%")
print()
```
|
{
"source": "jeremyherbert/real_time_stdev",
"score": 3
}
|
#### File: variance/tests/test_variance.py
```python
import cocotb
from cocotb.triggers import Timer
from cocotb.result import TestFailure
import random
import math
import numpy as np
TICK = 1
def integer_variance(integers):
raw_style = int(np.sum(integers**2)>>7) - int((np.sum(integers)>>7)**2)
return raw_style
@cocotb.test()
def variance_basic_test(dut):
integers = np.random.randint(0, (2**14)-1, 128)
# reset
dut.reset = 0;
dut.data_in = 0;
dut.clk = 0
yield Timer(TICK)
dut.clk = 1
yield Timer(TICK)
dut.reset = 1;
# feed in 100 integers
for i in range(128):
dut.data_in = int(integers[i])
dut.clk = 0
yield Timer(TICK)
dut.clk = 1
yield Timer(TICK)
for i in range(7):
dut.clk = 0
yield Timer(TICK)
dut.clk = 1
yield Timer(TICK)
integer_var = integer_variance(integers)
# np_var = int(np.mean(integers**2)) - int(np.mean(integers)**2)
# print("first integer was", hex(integers[0]))
# print("integer sum was", hex(np.sum(integers)))
# print("integer square sum was", hex(np.sum(integers**2)))
# print("mean was", hex(int(np.mean(integers))))
# print("mean of squares was", hex(int(np.mean(integers**2))))
# print("integer variance was", integer_var, " (hex: %s)" % hex(integer_var))
# print("numpy variance was", np_var)
# print("variance needs", np.log(integer_variance(integers))/np.log(2), "bits")
if int(dut.data_out) != integer_var:
raise TestFailure("variance output was wrong; got %i, expected %i" % (int(dut.data_out), integer_variance(integers)))
# one more clock
dut.clk = 0
yield Timer(TICK)
dut.clk = 1
yield Timer(TICK)
integer_var = integer_variance(np.append(integers[1:], integers[-1]))
if int(dut.data_out) != integer_var:
raise TestFailure("variance output was wrong; got %i, expected %i" % (int(dut.data_out), integer_variance(integers)))
```
|
{
"source": "jeremy-hernandez/MyDSA",
"score": 3
}
|
#### File: dss/collections/linkedlist_double.py
```python
import colorama
class DllNode:
def __init__(self, _val=0, _next=None, _prev=None):
self.value = _val
self.next = _next
self.prev = _prev
def __str__(self) -> str:
return f"Value: {self.value} | Prev == None: {self.prev == None} | Next === None: {self.next == None}"
class DoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
self.__size = 0
def __len__(self):
return self.__size
def __str__(self):
curr = self.head
node_values = []
while curr:
node_values.append(colorama.Fore.GREEN + str(curr.value))
node_values.append(colorama.Style.RESET_ALL + "<->")
curr = curr.next
return "".join(node_values[:len(node_values)-1])
### Access ###
def get(self, index):
if index < 0 or index >= self.__size:
raise IndexError("Index out of range")
curr = self.head
while index > 0:
curr = curr.next
index -= 1
return curr
### Insertions ###
def append(self, value):
node = DllNode(value, None, self.tail)
if not self.head:
# no elements in list
self.tail = node
self.head = self.tail
else:
self.tail.next = node
self.tail = self.tail.next
self.__size += 1
return True
def append_front(self, value):
node = DllNode(value, self.head)
if not self.head:
# no elements in list
self.head = node
self.tail = self.head
else:
self.head = node
self.__size += 1
return True
def insert(self, index, value):
if index == self.__size:
self.append(value)
elif index == 0:
self.append_front(value)
else:
prev = self.get(index - 1)
node = DllNode(value, prev.next, prev)
prev.next.prev = node
prev.next = node
self.__size += 1
return True
### Deletions ###
### Deletions ###
def pop(self):
if not self.head:
return None
node = self.tail
if self.__size == 1:
self.head = self.tail = None
else:
self.tail = self.tail.prev
self.tail.next = None
self.__size -= 1
return node
def pop_front(self):
if not self.head:
return None
node = self.head
if self.__size == 1:
self.head = self.tail = None
else:
self.head = node.next
self.head.prev = None
self.__size -= 1
return node
def remove(self, index):
if index == self.__size:
return self.pop()
if index == 0:
return self.pop_front()
node = self.get(index)
node.prev.next = node.next
node.next.prev = node.prev
self.__size -= 1
return node
### Utils ###
### Private Functions ###
```
#### File: dss/collections/stack.py
```python
class Stack:
def __init__(self) -> None:
self.__stack = []
def __len__(self):
return len(self.__stack)
def peek(self):
return self.__stack[len(self.__stack) - 1]
def pop(self):
return self.__stack.pop() # this is cheating
def push(self, value):
self.__stack.append(value) # also cheating
```
#### File: tests/datastructures/test_dynamic_array.py
```python
import pytest
from dss.collections.array_dynamic import DynamicArray
class TestDynamicArrayOnCreation:
def test_should_not_be_none(self):
arr = DynamicArray()
assert(arr) is not None
def test_should_have_capacity_10(self):
arr = DynamicArray()
assert(arr.get_capacity()) == 10
def test_should_have_capacity_5(self):
arr = DynamicArray(5)
assert(arr.get_capacity()) == 5
def test_should_raise_exception_for_invalid_initial_capacity(self):
with pytest.raises(ValueError):
arr = DynamicArray(0)
class TestDynamicArrayInserts:
def test_get_invalid_index_greater_size(self):
arr = DynamicArray()
with pytest.raises(IndexError):
arr.get(10)
def test_get_invalid_index_less_than_zero(self):
arr = DynamicArray()
with pytest.raises(IndexError):
arr.get(-1)
class TestDynamicArrayDeletes:
def test_get_invalid_index_greater_size(self):
arr = DynamicArray()
with pytest.raises(IndexError):
arr.get(10)
def test_get_invalid_index_less_than_zero(self):
arr = DynamicArray()
with pytest.raises(IndexError):
arr.get(-1)
class TestDynamicArrayIndexError:
def test_get_invalid_index_greater_size(self):
arr = DynamicArray()
with pytest.raises(IndexError):
arr.get(10)
def test_get_invalid_index_less_than_zero(self):
arr = DynamicArray()
with pytest.raises(IndexError):
arr.get(-1)
```
#### File: tests/datastructures/test_singly_linked_list.py
```python
import pytest
from dss.collections.linkedlist_single import SinglyLinkedList
class TestOnCreation:
def test_should_not_be_none(self):
arr = SinglyLinkedList()
assert(arr) is not None
```
|
{
"source": "JeremyHi/402-CryptoSym",
"score": 3
}
|
#### File: Malmo-0.30.0/homework-3/Environment.py
```python
import MalmoPython
import os
import sys
import time
from MazeAgent import MazeAgent
# Environment Configuration Constants:
FLOOR_LEVEL = 5
MISSION_TIME = 120000
MAX_RETRIES = 3
TICK_LENGTH = 1 # seconds
class Environment:
# Problem-specific constants
GOAL_BLOCK = "diamond_block"
WARN_BLOCK = "obsidian"
SAFE_BLOCK = "cobblestone"
def __init__ (self, maze):
self.maze = maze
self.agMaze = [r.replace('P', '.') for r in maze]
self.mission_xml = self.getMissionXML(maze)
self.mission = MalmoPython.MissionSpec(self.mission_xml, True)
self.mission_record = MalmoPython.MissionRecordSpec()
self.malmo = MalmoPython.AgentHost()
self.agent = MazeAgent(self)
self.attempt = 1
self.goalReached = False
def __translateCoord (self, c, r):
return (len(self.maze[0]) -1 - c, len(self.maze) - 1 - r)
def __generatePit (self, c, r):
maze = self.maze
# Generate pit
result = '<DrawBlock x="{}" y="5" z="{}" type="lava"/>'.format(c, r)
# Generate obsidian warning blocks, as long as there isn't another tile already
# there, and the tile would not be generated outside of the maze's bounds
# (probably a better way to do this, but meh)
obsidians = list(filter(lambda b: b[0] != 0 and b[1] != 0 and b[0] < len(maze[0]) and b[1] < len(maze) and maze[len(maze) - b[1] - 1][len(maze[0]) - b[0] - 1] == ".",\
[(c-1, r), (c+1, r), (c, r-1), (c, r+1)]))
for coord in obsidians:
result = result + '<DrawBlock x="{}" y="{}" z="{}" type="{}"/>'.format(coord[0], FLOOR_LEVEL, coord[1], Environment.WARN_BLOCK)
return result
def __generateMaze (self):
# maze grid is symmetrical
maze = self.maze
rows = len(maze)
cols = len(maze[0])
# Base grid at 0,0
result = '<DrawCuboid x1="0" y1="{}" z1="0" x2="{}" y2="{}" z2="{}" type="{}"/>'.format(FLOOR_LEVEL, cols-1, FLOOR_LEVEL, rows-1, Environment.SAFE_BLOCK)
# Parse special cell contents
for r, row in enumerate(maze):
for c, cell in enumerate(row):
tcoord = self.__translateCoord(c, r)
if cell == "*":
self.playerPos = tcoord
elif cell == "X":
result = result + '<DrawBlock x="{}" y="{}" z="{}" type="{}"/>'.format(tcoord[0], FLOOR_LEVEL + 1, tcoord[1], Environment.SAFE_BLOCK)
elif cell == "G":
result = result + '<DrawBlock x="{}" y="{}" z="{}" type="{}"/>'.format(tcoord[0], FLOOR_LEVEL, tcoord[1], Environment.GOAL_BLOCK)
elif cell == "P":
result = result + self.__generatePit(tcoord[0], tcoord[1])
return result
def getMissionXML (self, maze):
mazeXML = self.__generateMaze()
return '''<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<Mission xmlns="http://ProjectMalmo.microsoft.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<About>
<Summary>Maze Pitfalls!</Summary>
</About>
<ServerSection>
<ServerInitialConditions>
<Time>
<StartTime>1000</StartTime>
<AllowPassageOfTime>false</AllowPassageOfTime>
</Time>
<Weather>clear</Weather>
</ServerInitialConditions>
<ServerHandlers>
<FlatWorldGenerator generatorString="3;7,2*3,11;8;village"/>
<DrawingDecorator>
<DrawCuboid x1="-50" y1="''' + str(FLOOR_LEVEL) + '''" z1="-50" x2="50" y2="''' + str(FLOOR_LEVEL) + '''" z2="50" type="air"/>
''' + mazeXML + '''
</DrawingDecorator>
<ServerQuitFromTimeUp timeLimitMs="''' + str(MISSION_TIME) + '''"/>
<ServerQuitWhenAnyAgentFinishes/>
</ServerHandlers>
</ServerSection>
<AgentSection mode="Survival">
<Name>BlindBot</Name>
<AgentStart>
<Placement x="''' + str(self.playerPos[0] + 0.5) + '''" y="''' + str(FLOOR_LEVEL + 1.0) + '''" z="''' + str(self.playerPos[1] + 0.5) + '''"/>
</AgentStart>
<AgentHandlers>
<DiscreteMovementCommands/>
<AgentQuitFromTouchingBlockType>
<Block type="lava"/>
</AgentQuitFromTouchingBlockType>
<ObservationFromGrid>
<Grid name="floor3x3">
<min x="-1" y="-1" z="-1"/>
<max x="1" y="-1" z="1"/>
</Grid>
</ObservationFromGrid>
</AgentHandlers>
</AgentSection>
</Mission>'''
def goalTest (self, block):
if block == Environment.GOAL_BLOCK:
self.goalReached = True
print("[$] Mission Successful! Deaths: " + str(self.attempt - 1))
def startMission (self):
if (self.attempt <= MAX_RETRIES):
print("[~] Starting mission - Attempt #" + str(self.attempt))
self.malmo.startMission( self.mission, self.mission_record )
world_state = self.malmo.getWorldState()
while not world_state.has_mission_begun:
sys.stdout.write(".")
time.sleep(0.1)
world_state = self.malmo.getWorldState()
print("[!] Mission started!")
while world_state.is_mission_running and not self.goalReached:
time.sleep(TICK_LENGTH)
self.agent.act()
world_state = self.malmo.getWorldState()
if not self.goalReached:
self.agent.die()
self.attempt += 1
self.startMission()
else:
print("[X] GAME OVER!")
```
#### File: Malmo-0.30.0/Python_Examples/default_world_test.py
```python
import MalmoPython
import os
import random
import sys
import time
import datetime
import json
import random
def GetMissionXML():
''' Build an XML mission string that uses the DefaultWorldGenerator.'''
return '''<?xml version="1.0" encoding="UTF-8" ?>
<Mission xmlns="http://ProjectMalmo.microsoft.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<About>
<Summary>Normal life</Summary>
</About>
<ServerSection>
<ServerHandlers>
<DefaultWorldGenerator />
</ServerHandlers>
</ServerSection>
<AgentSection mode="Survival">
<Name>Rover</Name>
<AgentStart>
<Inventory>
<InventoryBlock slot="0" type="glowstone" quantity="63"/>
</Inventory>
</AgentStart>
<AgentHandlers>
<ContinuousMovementCommands/>
<ObservationFromFullStats/>
</AgentHandlers>
</AgentSection>
</Mission>'''
# Variety of strategies for dealing with loss of motion:
commandSequences=[
"jump 1; move 1; wait 1; jump 0; move 1; wait 2", # attempt to jump over obstacle
"turn 0.5; wait 1; turn 0; move 1; wait 2", # turn right a little
"turn -0.5; wait 1; turn 0; move 1; wait 2", # turn left a little
"move 0; attack 1; wait 5; pitch 0.5; wait 1; pitch 0; attack 1; wait 5; pitch -0.5; wait 1; pitch 0; attack 0; move 1; wait 2", # attempt to destroy some obstacles
"move 0; pitch 1; wait 2; pitch 0; use 1; jump 1; wait 6; use 0; jump 0; pitch -1; wait 1; pitch 0; wait 2; move 1; wait 2" # attempt to build tower under our feet
]
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # flush print output immediately
my_mission = MalmoPython.MissionSpec(GetMissionXML(), True)
agent_host = MalmoPython.AgentHost()
try:
agent_host.parse( sys.argv )
except RuntimeError as e:
print 'ERROR:',e
print agent_host.getUsage()
exit(1)
if agent_host.receivedArgument("help"):
print agent_host.getUsage()
exit(0)
if agent_host.receivedArgument("test"):
my_mission.timeLimitInSeconds(20) # else mission runs forever
# Attempt to start the mission:
max_retries = 3
for retry in range(max_retries):
try:
agent_host.startMission( my_mission, MalmoPython.MissionRecordSpec() )
break
except RuntimeError as e:
if retry == max_retries - 1:
print "Error starting mission",e
print "Is the game running?"
exit(1)
else:
time.sleep(2)
# Wait for the mission to start:
world_state = agent_host.getWorldState()
while not world_state.has_mission_begun:
time.sleep(0.1)
world_state = agent_host.getWorldState()
currentSequence="move 1; wait 4" # start off by moving
currentSpeed = 0.0
distTravelledAtLastCheck = 0.0
timeStampAtLastCheck = datetime.datetime.now()
cyclesPerCheck = 10 # controls how quickly the agent responds to getting stuck, and the amount of time it waits for on a "wait" command.
currentCycle = 0
waitCycles = 0
# Main loop:
while world_state.is_mission_running:
world_state = agent_host.getWorldState()
if world_state.number_of_observations_since_last_state > 0:
obvsText = world_state.observations[-1].text
currentCycle += 1
if currentCycle == cyclesPerCheck: # Time to check our speed and decrement our wait counter (if set):
currentCycle = 0
if waitCycles > 0:
waitCycles -= 1
# Now use the latest observation to calculate our approximate speed:
data = json.loads(obvsText) # observation comes in as a JSON string...
dist = data.get(u'DistanceTravelled', 0) #... containing a "DistanceTravelled" field (amongst other things).
timestamp = world_state.observations[-1].timestamp # timestamp arrives as a python DateTime object
delta_dist = dist - distTravelledAtLastCheck
delta_time = timestamp - timeStampAtLastCheck
currentSpeed = 1000000.0 * delta_dist / float(delta_time.microseconds) # "centimetres" per second?
distTravelledAtLastCheck = dist
timeStampAtLastCheck = timestamp
if waitCycles == 0:
# Time to execute the next command, if we have one:
if currentSequence != "":
commands = currentSequence.split(";", 1)
command = commands[0].strip()
if len(commands) > 1:
currentSequence = commands[1]
else:
currentSequence = ""
print command
verb,sep,param = command.partition(" ")
if verb == "wait": # "wait" isn't a Malmo command - it's just used here to pause execution of our "programme".
waitCycles = int(param.strip())
else:
agent_host.sendCommand(command) # Send the command to Minecraft.
if currentSequence == "" and currentSpeed < 50 and waitCycles == 0: # Are we stuck?
currentSequence = random.choice(commandSequences) # Choose a random action (or insert your own logic here for choosing more sensibly...)
print "Stuck! Chosen programme: " + currentSequence
# Mission has ended.
```
#### File: Malmo-0.30.0/Python_Examples/teleport_test.py
```python
import MalmoPython
import json
import math
import os
import random
import sys
import time
import errno
from timeit import default_timer as timer
# Test that AbsoluteMovementCommand teleportation works for long distances.
WIDTH=860
HEIGHT=480
def genItems():
items = ""
for x in xrange(10):
for z in xrange(10):
items += '<DrawBlock x="' + str(x * 1000) + '" y="3" z="' + str(z * 1000) + '" type="redstone_block"/>'
items += '<DrawItem x="' + str(x * 1000) + '" y="10" z="' + str(z * 1000) + '" type="emerald"/>'
return items
def startMission(agent_host, xml):
my_mission = MalmoPython.MissionSpec(xml, True)
my_mission_record = MalmoPython.MissionRecordSpec()
max_retries = 3
for retry in range(max_retries):
try:
agent_host.startMission( my_mission, my_mission_record )
break
except RuntimeError as e:
if retry == max_retries - 1:
print "Error starting mission",e
print "Is the game running?"
exit(1)
else:
time.sleep(2)
world_state = agent_host.peekWorldState()
while not world_state.has_mission_begun:
time.sleep(0.1)
world_state = agent_host.peekWorldState()
for error in world_state.errors:
print "Error:",error.text
if len(world_state.errors) > 0:
exit(1)
def processFrame(frame):
'''Label each pixel as either red, green or blue, and return the percentages of each.'''
red_total = 0
green_total = 0
blue_total = 0
num_pixels = WIDTH * HEIGHT
for pixel in xrange(0, num_pixels*3, 3):
r = frame[pixel]
g = frame[pixel+1]
b = frame[pixel+2]
# We're not worrying about cases where r==g, g==b, etc, since
# the pixels are very obviously either red, green or blue.
if r > g:
if r > b:
red_total += 1
else:
blue_total += 1
else:
if g > b:
green_total += 1
else:
blue_total += 1
red_total = int(100 * red_total / num_pixels)
blue_total = int(100 * blue_total / num_pixels)
green_total = int(100 * green_total / num_pixels)
return red_total, green_total, blue_total
worldXML = '''<?xml version="1.0" encoding="UTF-8" ?>
<Mission xmlns="http://ProjectMalmo.microsoft.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<About>
<Summary>Teleportastic</Summary>
</About>
<ServerSection>
<ServerInitialConditions>
<Time>
<StartTime>1000</StartTime>
<AllowPassageOfTime>false</AllowPassageOfTime> <!-- Keep steady daylight to make image parsing simple -->
</Time>
<Weather>clear</Weather> <!-- Keep steady weather to make image parsing simple -->
</ServerInitialConditions>
<ServerHandlers>
<FlatWorldGenerator generatorString="3;;1;" forceReset="true" destroyAfterUse="true"/>
<DrawingDecorator>''' + genItems() + '''</DrawingDecorator>
<ServerQuitWhenAnyAgentFinishes />
</ServerHandlers>
</ServerSection>
<AgentSection mode="Survival">
<Name>Brundlefly</Name>
<AgentStart>
<Placement x="-100.5" y="4" z="400.5" yaw="0" pitch="90"/> <!-- Look down at the ground -->
<Inventory/>
</AgentStart>
<AgentHandlers>
<ObservationFromFullInventory/>
<AbsoluteMovementCommands/>
<MissionQuitCommands/>
<RewardForCollectingItem>
<Item type="emerald" reward="1"/>
</RewardForCollectingItem>
<VideoProducer>
<Width>''' + str(WIDTH) + '''</Width>
<Height>''' + str(HEIGHT) + '''</Height>
</VideoProducer>
</AgentHandlers>
</AgentSection>
</Mission>'''
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # flush print output immediately
agent_host = MalmoPython.AgentHost()
try:
agent_host.parse( sys.argv )
except RuntimeError as e:
print 'ERROR:',e
print agent_host.getUsage()
exit(1)
if agent_host.receivedArgument("help"):
print agent_host.getUsage()
exit(0)
startMission(agent_host, worldXML)
world_state = agent_host.peekWorldState()
# Teleport to each location in turn, see if we collect the right number of emeralds,
# and check we get the right image for each location.
total_reward = 0
for x in xrange(10):
for z in xrange(10):
teleport_x = x * 1000 + 0.5
teleport_z = z * 1000 + 0.5
tp_command = "tp " + str(teleport_x)+ " 4 " + str(teleport_z)
print "Sending command: " + tp_command
agent_host.sendCommand(tp_command)
# Hang around until the image stabilises and the reward is collected.
# While the chunk is loading, everything will be blue.
# Once the frame is rendering correctly, we should be looking at a field of grass
# with a single redstone square in the middle.
good_frame = False
collected_reward = False
start = timer()
end_reward = None
end_frame = None
while not good_frame or not collected_reward:
world_state = agent_host.getWorldState()
if not world_state.is_mission_running:
print "Mission ended prematurely - error."
exit(1)
if not collected_reward and world_state.number_of_rewards_since_last_state > 0:
total_reward += world_state.rewards[-1].getValue()
print "Total reward: " + str(total_reward)
collected_reward = True
end_reward = timer()
if not good_frame and world_state.number_of_video_frames_since_last_state > 0:
frame_x = world_state.video_frames[-1].xPos
frame_z = world_state.video_frames[-1].zPos
if math.fabs(frame_x - teleport_x) < 0.001 and math.fabs(frame_z - teleport_z) < 0.001:
r,g,b = processFrame(world_state.video_frames[-1].pixels)
if b == 0:
good_frame = True
end_frame = timer()
print "Took " + "{0:.2f}".format((end_frame - start) * 1000) + "ms to stabilise frame; " + "{0:.2f}".format((end_reward - start) * 1000) + "ms to collect reward."
# Visited all the locations - quit the mission.
agent_host.sendCommand("quit")
while world_state.is_mission_running:
world_state = agent_host.peekWorldState()
print "Teleport mission over."
world_state = agent_host.getWorldState()
if world_state.number_of_rewards_since_last_state > 0:
total_reward += world_state.rewards[-1].getValue()
if total_reward != 100:
print "Got incorrect reward (" + str(total_reward) + ") - should have received 100 for collecting 100 emeralds."
exit(1)
print "Test successful"
exit(0)
```
#### File: 402-CryptoSym/pathfinder/Pathfinder.py
```python
import unittest
from MazeProblem import MazeProblem
from SearchTreeNode import SearchTreeNode
class Pathfinder:
# solve is parameterized by a maze pathfinding problem
# (see MazeProblem.py and unit tests below), and will
# return a list of actions that solves that problem. An
# example returned list might look like:
# ["U", "R", "R", "U"]
def solve(self, problem):
already_visited = set()
nodes = [[SearchTreeNode(state=problem.initial, action=None, parent=None)]]
already_visited.add(nodes[0][0].state)
counter = 0
while nodes:
current_level = nodes.pop()
for node in current_level:
if problem.goalTest(node.state):
solution = []
while node.parent is not None:
solution.append(node.action)
node = node.parent
print(counter)
return list(reversed(solution))
else:
already_visited.add((node.state))
next_level = []
for node in current_level:
transitions = problem.transitions(node.state)
for transition in transitions:
if transition[1] not in already_visited:
new_node = SearchTreeNode(transition[1], transition[0], node)
counter += 1
next_level.append(new_node)
nodes.append(next_level)
class PathfinderTests(unittest.TestCase):
def test_maze1(self):
maze = [
"XXXXX",
"X..GX",
"X...X",
"X*..X",
"XXXXX"]
problem = MazeProblem(maze)
soln = Pathfinder.solve(self, problem)
soln_test = problem.solnTest(soln)
self.assertTrue(soln_test[1])
self.assertEqual(soln_test[0], 4)
def test_maze2(self):
maze = [
"XXXXX",
"XG..X",
"XX..X",
"X*..X",
"XXXXX"]
problem = MazeProblem(maze)
soln = Pathfinder.solve(self, problem)
soln_test = problem.solnTest(soln)
self.assertTrue(soln_test[1])
self.assertEqual(soln_test[0], 4)
def test_maze3(self):
maze = [
"XXXXXXX",
"XG....X",
"XX...XX",
"X*..XXX",
"XXXXXXX"]
problem = MazeProblem(maze)
soln = Pathfinder.solve(self, problem)
soln_test = problem.solnTest(soln)
self.assertTrue(soln_test[1])
self.assertEqual(soln_test[0], 4)
def test_maze4(self):
maze = [
"XXXXXXXXX",
"XXXG..XXX",
"XXXXX.XXX",
"XX*...XXX",
"XXXXXXXXX"]
problem = MazeProblem(maze)
soln = Pathfinder.solve(self, problem)
soln_test = problem.solnTest(soln)
self.assertTrue(soln_test[1])
self.assertEqual(soln_test[0], 7)
def test_maze5(self):
maze = ["XXXXXXXXXX", "X..X.....X", "X*.....XXX",
"XXX..X.XGX", "X...XX.X.X", "X.X.X.X...X",
"X...X.XX.X", "X..G.XXX.X", "X........X",
"XXXXXXXXXX"]
problem = MazeProblem(maze)
soln = Pathfinder.solve(self, problem)
soln_test = problem.solnTest(soln)
self.assertTrue(soln_test[1])
self.assertEqual(soln_test[0], 7)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeremyhu/llvm",
"score": 3
}
|
#### File: gn/build/sync_source_lists_from_cmake.py
```python
from __future__ import print_function
import os
import re
import subprocess
import sys
def sync_source_lists():
# Use shell=True on Windows in case git is a bat file.
gn_files = subprocess.check_output(['git', 'ls-files', '*BUILD.gn'],
shell=os.name == 'nt').splitlines()
# Matches e.g. | "foo.cpp",|, captures |foo| in group 1.
gn_cpp_re = re.compile(r'^\s*"([^"]+\.(?:cpp|c|h|S))",$', re.MULTILINE)
# Matches e.g. | foo.cpp|, captures |foo| in group 1.
cmake_cpp_re = re.compile(r'^\s*([A-Za-z_0-9./-]+\.(?:cpp|c|h|S))$',
re.MULTILINE)
changed = False
for gn_file in gn_files:
# The CMakeLists.txt for llvm/utils/gn/secondary/foo/BUILD.gn is
# directly at foo/CMakeLists.txt.
strip_prefix = 'llvm/utils/gn/secondary/'
if not gn_file.startswith(strip_prefix):
continue
cmake_file = os.path.join(
os.path.dirname(gn_file[len(strip_prefix):]), 'CMakeLists.txt')
if not os.path.exists(cmake_file):
continue
def get_sources(source_re, text):
return set([m.group(1) for m in source_re.finditer(text)])
gn_cpp = get_sources(gn_cpp_re, open(gn_file).read())
cmake_cpp = get_sources(cmake_cpp_re, open(cmake_file).read())
if gn_cpp == cmake_cpp:
continue
changed = True
print(gn_file)
add = cmake_cpp - gn_cpp
if add:
print('add:\n' + '\n'.join(' "%s",' % a for a in add))
remove = gn_cpp - cmake_cpp
if remove:
print('remove:\n' + '\n'.join(remove))
print()
return changed
def sync_unittests():
# Matches e.g. |add_llvm_unittest_with_input_files|.
unittest_re = re.compile(r'^add_\S+_unittest', re.MULTILINE)
checked = [ 'clang', 'clang-tools-extra', 'lld', 'llvm' ]
changed = False
for c in checked:
for root, _, _ in os.walk(os.path.join(c, 'unittests')):
cmake_file = os.path.join(root, 'CMakeLists.txt')
if not os.path.exists(cmake_file):
continue
if not unittest_re.search(open(cmake_file).read()):
continue # Skip CMake files that just add subdirectories.
gn_file = os.path.join('llvm/utils/gn/secondary', root, 'BUILD.gn')
if not os.path.exists(gn_file):
changed = True
print('missing GN file %s for unittest CMake file %s' %
(gn_file, cmake_file))
return changed
def main():
src = sync_source_lists()
tests = sync_unittests()
if src or tests:
sys.exit(1)
if __name__ == '__main__':
main()
```
|
{
"source": "JeremyJacquemont/weaverbird",
"score": 2
}
|
#### File: mongo_translator/steps/filter.py
```python
from typing import Dict, List
from weaverbird.backends.mongo_translator.steps.types import MongoStep
from weaverbird.pipeline.conditions import (
ComparisonCondition,
Condition,
ConditionComboAnd,
ConditionComboOr,
DateBoundCondition,
InclusionCondition,
MatchCondition,
NullCondition,
)
from weaverbird.pipeline.dates import RelativeDate
from weaverbird.pipeline.steps import FilterStep
operator_mapping = {
'eq': '$eq',
'ne': '$ne',
'lt': '$lt',
'le': '$lte',
'gt': '$gt',
'ge': '$gte',
'in': '$in',
'nin': '$nin',
'isnull': '$eq',
'notnull': '$ne',
'from': '$gte',
'until': '$lte',
}
RELATIVE_DATE_OPERATORS: Dict[str, int] = {'until': -1, 'from': +1}
def translate_relative_date(value: RelativeDate):
sign = RELATIVE_DATE_OPERATORS[value.operator]
return {
'$dateAdd': {
'startDate': truncate_to_day({'$toDate': value.date}),
'amount': sign * abs(int(value.quantity)),
'unit': value.duration,
},
}
def truncate_to_day(value):
return {
'$dateTrunc': {
'date': value,
'unit': 'day',
}
}
def build_match_tree(condition: Condition, parent_operator='and') -> dict:
# coumpound conditions
if isinstance(condition, ConditionComboAnd):
return {'$and': [build_match_tree(c, 'and') for c in condition.and_]}
elif isinstance(condition, ConditionComboOr):
return {'$or': [build_match_tree(c, 'or') for c in condition.or_]}
# simple conditions
elif isinstance(condition, ComparisonCondition) or isinstance(condition, InclusionCondition):
return {condition.column: {operator_mapping[condition.operator]: condition.value}}
elif isinstance(condition, NullCondition):
return {condition.column: {operator_mapping[condition.operator]: None}}
elif isinstance(condition, MatchCondition):
if condition.operator == 'matches':
return {condition.column: {'$regex': condition.value}}
elif condition.operator == 'notmatches':
return {condition.column: {'$not': {'$regex': condition.value}}}
# dates
elif isinstance(condition, DateBoundCondition):
if isinstance(condition.value, RelativeDate):
target_date = translate_relative_date(condition.value)
else:
target_date = {'$toDate': condition.value}
return {
'$expr': {
operator_mapping[condition.operator]: [
truncate_to_day(f'${condition.column}'),
truncate_to_day(target_date),
]
}
}
def translate_filter(step: FilterStep) -> List[MongoStep]:
return [{'$match': build_match_tree(step.condition)}]
```
#### File: backends/mongo_translator/utils.py
```python
import datetime
from re import sub
from typing import Any, Dict, List, Union
from weaverbird.backends.mongo_translator.steps.filter import (
translate_relative_date,
truncate_to_day,
)
from weaverbird.backends.mongo_translator.steps.types import MongoStep
from weaverbird.pipeline.conditions import (
ConditionComboAnd,
ConditionComboOr,
MatchCondition,
SimpleCondition,
)
from weaverbird.pipeline.dates import RelativeDate
def column_to_user_variable(col_name: str) -> str:
"""User variable names can contain the ascii characters [_a-zA-Z0-9] and any non-ascii character."""
col_name_without_invalid_chars = sub(r'/[^_a-zA-Z0-9]/g', '_', col_name)
return f'vqb_{col_name_without_invalid_chars}'
class UnsupportedOperatorError(Exception):
"""Raised when condition uses an unsupported operator"""
def build_cond_expression(
cond: Union[SimpleCondition, ConditionComboOr, ConditionComboAnd]
) -> MongoStep:
operator_mapping = {
'eq': '$eq',
'ne': '$ne',
'lt': '$lt',
'le': '$lte',
'gt': '$gt',
'ge': '$gte',
'in': '$in',
'nin': '$in',
'isnull': '$eq',
'notnull': '$ne',
'matches': '$regexMatch',
'notmatches': '$regexMatch',
'from': '$gte',
'until': '$lte',
}
unsupported_operators: List = []
if isinstance(cond, ConditionComboAnd):
return build_and_expression(cond)
if isinstance(cond, ConditionComboOr):
return {'$and': [build_cond_expression(elem) for elem in cond.or_]}
if cond.operator in unsupported_operators:
raise UnsupportedOperatorError(f'Unsupported operator ${cond.operator} in conditions')
if cond.operator == 'matches' or cond.operator == 'notmatches':
cond_expression = build_matches_expression(cond)
return cond_expression
else:
if cond.operator == 'notnull' or cond.operator == 'isnull':
return {operator_mapping[cond.operator]: [f'${cond.column}', None]}
else:
cond_expression = {operator_mapping[cond.operator]: [f'${cond.column}', cond.value]}
if cond.operator == 'nin':
cond_expression = {'$not': cond_expression}
cond_expression = build_dates_expressions(cond, cond_expression, operator_mapping)
return cond_expression
def build_dates_expressions(
cond: SimpleCondition, cond_expression: Dict[str, Any], operator_mapping: Dict[str, str]
):
if cond.operator == 'until':
if isinstance(cond.value, datetime.datetime):
cond_expression[operator_mapping[cond.operator]][1] = [
datetime.datetime(
day=cond.value.day, month=cond.value.month, year=cond.value.month
).replace(hour=23, minute=59, second=59, microsecond=999999)
]
if cond.operator == 'from' or cond.operator == 'until':
cond_expression = {
'$expr': {
operator_mapping[cond.operator]: [
truncate_to_day(f'${cond.column}'),
truncate_to_day(
translate_relative_date(cond.value)
if isinstance(cond.value, RelativeDate)
else cond.value
),
]
}
}
return cond_expression
def build_matches_expression(cond: MatchCondition):
cond_expression: MongoStep = {'$regexMatch': {'input': f'${cond.column}', 'regex': cond.value}}
if cond.operator == 'notmatches':
cond_expression = {'$not': cond_expression}
return cond_expression
def build_and_expression(cond: ConditionComboAnd):
if len(cond.and_) == 1:
return build_cond_expression(cond.and_[0])
else:
return {'$and': [build_cond_expression(elem) for elem in cond.and_]}
```
#### File: pandas_executor/steps/argmin.py
```python
from pandas import DataFrame
from weaverbird.backends.pandas_executor.types import DomainRetriever, PipelineExecutor
from weaverbird.pipeline.steps import ArgminStep
_TMP_GROUP_COL_NAME = '__TMP_COL_NAME'
def execute_argmin(
step: ArgminStep,
df: DataFrame,
domain_retriever: DomainRetriever = None,
execute_pipeline: PipelineExecutor = None,
) -> DataFrame:
group = step.groups
if len(step.groups) == 0:
# if no groups, we create a temp column with a constant in it.
# Grouping on it should yield a single result
df[_TMP_GROUP_COL_NAME] = 1
group = [_TMP_GROUP_COL_NAME]
aggregated_df = df.groupby(group, as_index=False, dropna=False).agg({step.column: 'min'})
if len(step.groups) == 0:
# we now remove the ugly temp column that we grouped on
del df[_TMP_GROUP_COL_NAME]
del aggregated_df[_TMP_GROUP_COL_NAME]
return df.merge(aggregated_df, on=[step.column] + step.groups)
```
#### File: pandas_executor/steps/concatenate.py
```python
from pandas import DataFrame
from weaverbird.backends.pandas_executor.types import DomainRetriever, PipelineExecutor
from weaverbird.pipeline.steps import ConcatenateStep
def execute_concatenate(
step: ConcatenateStep,
df: DataFrame,
domain_retriever: DomainRetriever = None,
execute_pipeline: PipelineExecutor = None,
) -> DataFrame:
new_col = df[step.columns[0]].astype(str)
for col_name in step.columns[1:]:
new_col = new_col.str.cat(df[col_name].astype(str), sep=step.separator)
return df.assign(**{step.new_column_name: new_col})
```
#### File: pandas_executor/steps/convert.py
```python
from pandas import DataFrame
from weaverbird.backends.pandas_executor.types import DomainRetriever, PipelineExecutor
from weaverbird.pipeline.steps import ConvertStep
from .utils.cast import cast_to_bool, cast_to_datetime, cast_to_float, cast_to_int, cast_to_str
CAST_FUNCTIONS = {
'integer': cast_to_int,
'float': cast_to_float,
'text': cast_to_str,
'date': cast_to_datetime,
'boolean': cast_to_bool,
}
def execute_convert(
step: ConvertStep,
df: DataFrame,
domain_retriever: DomainRetriever = None,
execute_pipeline: PipelineExecutor = None,
) -> DataFrame:
cast_function = CAST_FUNCTIONS[step.data_type]
transformed_columns = {col_name: cast_function(df[col_name]) for col_name in step.columns}
return df.assign(**transformed_columns)
```
#### File: pandas_executor/steps/formula.py
```python
from pandas import DataFrame
from weaverbird.backends.pandas_executor.steps.utils.formula import clean_formula, eval_formula
from weaverbird.backends.pandas_executor.types import DomainRetriever, PipelineExecutor
from weaverbird.pipeline.steps import FormulaStep
def execute_formula(
step: FormulaStep,
df: DataFrame,
domain_retriever: DomainRetriever = None,
execute_pipeline: PipelineExecutor = None,
) -> DataFrame:
serie = eval_formula(df, clean_formula(step.formula))
return df.assign(**{step.new_column: serie})
```
#### File: pandas_executor/steps/ifthenelse.py
```python
from typing import Any
import numpy as np
from pandas import DataFrame
from weaverbird.backends.pandas_executor.steps.utils.formula import clean_formula, eval_formula
from weaverbird.backends.pandas_executor.types import DomainRetriever, PipelineExecutor
from weaverbird.pipeline.steps.ifthenelse import IfThenElse, IfthenelseStep
from .utils.condition import apply_condition
# ifthenelse can take as a parameter either a formula, or a value
def clean_if_formula(formula_or_value: Any) -> Any:
if isinstance(formula_or_value, str):
return clean_formula(formula_or_value)
else:
return formula_or_value
def _execute_ifthenelse(ifthenelse: IfThenElse, df: DataFrame, new_column) -> DataFrame:
if isinstance(ifthenelse.else_value, IfThenElse):
else_branch = _execute_ifthenelse(ifthenelse.else_value, df, new_column)[new_column]
else:
else_branch = eval_formula(df, clean_if_formula(ifthenelse.else_value))
then_branch = eval_formula(df, clean_if_formula(ifthenelse.then))
return df.assign(
**{
new_column: np.where(
apply_condition(ifthenelse.condition, df), then_branch, else_branch
)
}
)
def execute_ifthenelse(
step: IfthenelseStep,
df: DataFrame,
domain_retriever: DomainRetriever = None,
execute_pipeline: PipelineExecutor = None,
) -> DataFrame:
return _execute_ifthenelse(step, df, step.new_column)
```
#### File: pandas_executor/steps/percentage.py
```python
from pandas import DataFrame
from weaverbird.backends.pandas_executor.types import DomainRetriever, PipelineExecutor
from weaverbird.pipeline.steps import PercentageStep
def execute_percentage(
step: PercentageStep,
df: DataFrame,
domain_retriever: DomainRetriever = None,
execute_pipeline: PipelineExecutor = None,
) -> DataFrame:
new_column_name = step.new_column_name or f'{step.column}_PCT'
if len(step.group) > 0:
sums = df.groupby(step.group, dropna=False)[step.column].transform('sum')
else:
sums = df[step.column].sum()
return df.assign(**{new_column_name: df[step.column] / sums})
```
#### File: pandas_executor/steps/sort.py
```python
from pandas import DataFrame
from weaverbird.backends.pandas_executor.types import DomainRetriever, PipelineExecutor
from weaverbird.pipeline.steps import SortStep
def execute_sort(
step: SortStep,
df: DataFrame,
domain_retriever: DomainRetriever = None,
execute_pipeline: PipelineExecutor = None,
) -> DataFrame:
return df.sort_values(
by=[sort.column for sort in step.columns],
ascending=[sort.order == 'asc' for sort in step.columns],
)
```
#### File: pandas_executor/steps/unpivot.py
```python
from pandas import DataFrame
from weaverbird.backends.pandas_executor.types import DomainRetriever, PipelineExecutor
from weaverbird.pipeline.steps import UnpivotStep
def execute_unpivot(
step: UnpivotStep,
df: DataFrame,
domain_retriever: DomainRetriever = None,
execute_pipeline: PipelineExecutor = None,
) -> DataFrame:
df_melted = df.melt(
id_vars=step.keep,
value_vars=step.unpivot,
var_name=step.unpivot_column_name,
value_name=step.value_column_name,
)
return df_melted.dropna(subset=[step.value_column_name]) if step.dropna else df_melted
```
#### File: steps/utils/condition.py
```python
from numpy.ma import logical_and, logical_or
from pandas import DataFrame, Series
from pandas.tseries.offsets import DateOffset
from weaverbird.backends.pandas_executor.steps.utils.dates import evaluate_relative_date
from weaverbird.pipeline.conditions import (
ComparisonCondition,
Condition,
ConditionComboAnd,
ConditionComboOr,
DateBoundCondition,
InclusionCondition,
MatchCondition,
NullCondition,
)
from weaverbird.pipeline.dates import RelativeDate
def apply_condition(condition: Condition, df: DataFrame) -> Series:
"""
Returns a boolean Series, that will be used to filter a DataFrame.
Example:
filter = apply_condition(condition, df)
df[filter] # The filtered DataFrame
"""
if isinstance(condition, ComparisonCondition):
return getattr(df[condition.column], condition.operator)(condition.value)
elif isinstance(condition, InclusionCondition):
f = df[condition.column].isin(condition.value)
if condition.operator.startswith('n'):
return ~f
else:
return f
elif isinstance(condition, NullCondition):
f = df[condition.column].isnull()
if condition.operator.startswith('not'):
return ~f
else:
return f
elif isinstance(condition, MatchCondition):
f = df[condition.column].str.contains(condition.value)
if condition.operator.startswith('not'):
return ~f
else:
return f
elif isinstance(condition, DateBoundCondition):
if condition.operator == 'until':
comparison_method = 'le'
elif condition.operator == 'from':
comparison_method = 'ge'
else:
raise NotImplementedError
value = condition.value
if isinstance(value, RelativeDate):
value = evaluate_relative_date(value)
# Remove time info from the column to filter on
column_without_time = df[condition.column] - DateOffset(
hour=0, minute=0, second=0, microsecond=0, nanosecond=0
)
# Do the same with the value to compare it to
value_without_time = value - DateOffset(
hour=0, minute=0, second=0, microsecond=0, nanosecond=0
)
return getattr(column_without_time, comparison_method)(value_without_time)
elif isinstance(condition, ConditionComboAnd):
return logical_and.reduce([apply_condition(c, df) for c in condition.and_])
elif isinstance(condition, ConditionComboOr):
return logical_or.reduce([apply_condition(c, df) for c in condition.or_])
else:
raise NotImplementedError
```
#### File: sql_translator/steps/aggregate.py
```python
from weaverbird.backends.sql_translator.steps.utils.aggregation import (
build_first_or_last_aggregation,
prepare_aggregation_query,
)
from weaverbird.backends.sql_translator.steps.utils.query_transformation import (
build_selection_query,
)
from weaverbird.backends.sql_translator.types import (
SQLDialect,
SQLPipelineTranslator,
SQLQuery,
SQLQueryDescriber,
SQLQueryExecutor,
SQLQueryRetriever,
)
from weaverbird.pipeline.steps import AggregateStep
def translate_aggregate(
step: AggregateStep,
query: SQLQuery,
index: int,
sql_query_retriever: SQLQueryRetriever = None,
sql_query_describer: SQLQueryDescriber = None,
sql_query_executor: SQLQueryExecutor = None,
sql_translate_pipeline: SQLPipelineTranslator = None,
subcall_from_other_pipeline_count: int = None,
sql_dialect: SQLDialect = None,
) -> SQLQuery:
query_name = f'AGGREGATE_STEP_{index}'
aggregated_cols = []
aggregated_string = ''
first_last_string = ''
query, aggregated_string, new_as_columns = prepare_aggregation_query(
query_name, aggregated_cols, aggregated_string, query, step
)
query, query_string = build_first_or_last_aggregation(
aggregated_string, first_last_string, query, step, query_name, new_as_columns
)
new_query = SQLQuery(
query_name=query_name,
transformed_query=f'{query.transformed_query}, {query_name} AS ({query_string})',
)
new_query.metadata_manager = query.metadata_manager
new_query.selection_query = build_selection_query(
query.metadata_manager.retrieve_query_metadata_columns(), query_name
)
return new_query
```
#### File: pipeline/steps/aggregate.py
```python
from typing import List, Literal, Optional, Sequence
from pydantic import Field, root_validator, validator
from pydantic.main import BaseModel
from weaverbird.pipeline.steps.utils.base import BaseStep
from weaverbird.pipeline.steps.utils.render_variables import StepWithVariablesMixin
from weaverbird.pipeline.steps.utils.validation import validate_unique_columns
from weaverbird.pipeline.types import ColumnName, PopulatedWithFieldnames, TemplatedVariable
AggregateFn = Literal[
'avg',
'sum',
'min',
'max',
'count',
'count distinct',
'first',
'last',
'count distinct including empty',
]
class Aggregation(BaseModel):
class Config(PopulatedWithFieldnames):
...
new_columns: List[ColumnName] = Field(alias='newcolumns')
agg_function: AggregateFn = Field(alias='aggfunction')
columns: List[ColumnName]
@validator('columns', pre=True)
def validate_unique_columns(cls, value):
return validate_unique_columns(value)
@root_validator(pre=True)
def handle_legacy_syntax(cls, values):
if 'column' in values:
values['columns'] = [values.pop('column')]
if 'newcolumn' in values:
values['new_columns'] = [values.pop('newcolumn')]
return values
class AggregateStep(BaseStep):
name = Field('aggregate', const=True)
on: List[ColumnName] = []
aggregations: Sequence[Aggregation]
keep_original_granularity: Optional[bool] = Field(
default=False, alias='keepOriginalGranularity'
)
class Config(PopulatedWithFieldnames):
...
class AggregationWithVariables(Aggregation):
class Config(PopulatedWithFieldnames):
...
new_columns: List[TemplatedVariable] = Field(alias='newcolumns')
agg_function: TemplatedVariable = Field(alias='aggfunction')
columns: List[TemplatedVariable]
class AggregateStepWithVariables(AggregateStep, StepWithVariablesMixin):
aggregations: Sequence[AggregationWithVariables]
```
#### File: pipeline/steps/cumsum.py
```python
from typing import List, Optional, Tuple, Union
from pydantic import Field, root_validator
from weaverbird.pipeline.steps.utils.base import BaseStep
from weaverbird.pipeline.steps.utils.render_variables import StepWithVariablesMixin
from weaverbird.pipeline.types import ColumnName, TemplatedVariable
class CumSumStep(BaseStep):
name = Field('cumsum', const=True)
to_cumsum: List[Tuple[str, Optional[str]]] = Field(..., alias='toCumSum')
reference_column: ColumnName = Field(..., alias='referenceColumn')
groupby: Optional[List[ColumnName]]
@root_validator(pre=True)
def handle_legacy_syntax(cls, values):
if 'value_column' in values:
values['to_cumsum'] = [(values.pop('value_column'), values.pop('new_column', None))]
return values
class CumSumStepWithVariable(CumSumStep, StepWithVariablesMixin):
to_cumsum: Union[TemplatedVariable, List[Tuple[TemplatedVariable, TemplatedVariable]]] = Field(
..., alias='toCumSum'
)
```
#### File: steps/utils/base.py
```python
from typing import Dict
from pydantic.main import BaseModel
from weaverbird.pipeline.types import PopulatedWithFieldnames
class BaseStep(BaseModel):
name: str
class Config(PopulatedWithFieldnames):
extra = 'forbid'
# None values are excluded, to avoid triggering validations error in front-ends
def dict(self, *, exclude_none: bool = True, **kwargs) -> Dict:
return super().dict(exclude_none=True, **kwargs)
```
#### File: steps/utils/validation.py
```python
from typing import Sequence
from weaverbird.exceptions import DuplicateColumnError
def validate_unique_columns(columns: Sequence[str]) -> Sequence[str]:
if len(set(columns)) < len(columns):
raise DuplicateColumnError
else:
return columns
```
#### File: steps/sql_translator/test_concatenate.py
```python
from weaverbird.backends.sql_translator.metadata import ColumnMetadata
from weaverbird.backends.sql_translator.steps import translate_concatenate
from weaverbird.pipeline.steps import ConcatenateStep
def test_translate_simple_concatenate(query):
step = ConcatenateStep(
name='concatenate', columns=['TOTO', 'AGE'], separator=',', new_column_name='TO_AGE'
)
query = translate_concatenate(
step,
query,
index=1,
)
expected_transformed_query = (
"WITH SELECT_STEP_0 AS (SELECT * FROM products), CONCATENATE_STEP_1 AS (SELECT TOTO, RAICHU, FLORIZARRE, "
"CONCAT_WS(',', TOTO, AGE) AS TO_AGE FROM SELECT_STEP_0)"
)
assert query.transformed_query == expected_transformed_query
assert (
query.selection_query == 'SELECT TOTO, RAICHU, FLORIZARRE, TO_AGE FROM CONCATENATE_STEP_1'
)
assert query.query_name == 'CONCATENATE_STEP_1'
# assert on metadatas
assert query.metadata_manager.retrieve_query_metadata_columns() == {
'FLORIZARRE': ColumnMetadata(
name='FLORIZARRE',
original_name='FLORIZARRE',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
'RAICHU': ColumnMetadata(
name='RAICHU',
original_name='RAICHU',
type='INT',
original_type='int',
alias=None,
delete=False,
),
'TOTO': ColumnMetadata(
name='TOTO',
original_name='TOTO',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
'TO_AGE': ColumnMetadata(
name='TO_AGE',
original_name='TO_AGE',
type='STRING',
original_type='string',
alias=None,
delete=False,
),
}
```
#### File: steps/sql_translator/test_duration.py
```python
import pytest
from weaverbird.backends.sql_translator.metadata import ColumnMetadata, SqlQueryMetadataManager
from weaverbird.backends.sql_translator.steps import translate_duration
from weaverbird.backends.sql_translator.types import SQLQuery
from weaverbird.pipeline.steps import DurationStep
@pytest.fixture
def query_date_duration():
return SQLQuery(
query_name='SELECT_STEP_0',
transformed_query='WITH SELECT_STEP_0 AS (SELECT * FROM products)',
selection_query='SELECT TOTO, RAICHU, FLORIZARRE FROM SELECT_STEP_0',
metadata_manager=SqlQueryMetadataManager(
tables_metadata={
'TABLE1': {
'TOTO': 'text',
'RAICHU': 'int',
'FLORIZARRE': 'text',
'FROM_DATE': 'date',
'TO_DATE': 'date',
}
},
),
)
def test_translate_simple_duration(query_date_duration):
step = DurationStep(
name='duration',
newColumnName='DURATION',
startDateColumn='FROM_DATE',
endDateColumn='TO_DATE',
durationIn='days',
)
query = translate_duration(
step,
query_date_duration,
index=1,
)
expected_transformed_query = (
'WITH SELECT_STEP_0 AS (SELECT * FROM products), DURATION_STEP_1 AS (SELECT TOTO, RAICHU, FLORIZARRE, '
'FROM_DATE, TO_DATE, DATEDIFF(days, to_timestamp(FROM_DATE), to_timestamp(TO_DATE)) AS DURATION FROM '
'SELECT_STEP_0)'
)
assert query.transformed_query == expected_transformed_query
assert (
query.selection_query
== 'SELECT TOTO, RAICHU, FLORIZARRE, FROM_DATE, TO_DATE, DURATION FROM DURATION_STEP_1'
)
assert query.query_name == 'DURATION_STEP_1'
# we test metadatas
assert query.metadata_manager.retrieve_query_metadata_columns() == {
'DURATION': ColumnMetadata(
name='DURATION',
original_name='DURATION',
type='FLOAT',
original_type='FLOAT',
alias=None,
delete=False,
),
'FLORIZARRE': ColumnMetadata(
name='FLORIZARRE',
original_name='FLORIZARRE',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
'FROM_DATE': ColumnMetadata(
name='FROM_DATE',
original_name='FROM_DATE',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'RAICHU': ColumnMetadata(
name='RAICHU',
original_name='RAICHU',
type='INT',
original_type='int',
alias=None,
delete=False,
),
'TOTO': ColumnMetadata(
name='TOTO',
original_name='TOTO',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
'TO_DATE': ColumnMetadata(
name='TO_DATE',
original_name='TO_DATE',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
}
def test_translate_simple_duration_postgres(query_date_duration):
step = DurationStep(
name='duration',
newColumnName='DURATION',
startDateColumn='FROM_DATE',
endDateColumn='TO_DATE',
durationIn='days',
)
query = translate_duration(step, query_date_duration, index=1, sql_dialect='postgres')
expected_transformed_query = (
'WITH SELECT_STEP_0 AS (SELECT * FROM products), DURATION_STEP_1 AS (SELECT TOTO, RAICHU, FLORIZARRE, '
'FROM_DATE, TO_DATE, EXTRACT(days FROM (TO_DATE-FROM_DATE)) AS DURATION FROM '
'SELECT_STEP_0)'
)
assert query.transformed_query == expected_transformed_query
assert (
query.selection_query
== 'SELECT TOTO, RAICHU, FLORIZARRE, FROM_DATE, TO_DATE, DURATION FROM DURATION_STEP_1'
)
assert query.query_name == 'DURATION_STEP_1'
# we test metadatas
assert query.metadata_manager.retrieve_query_metadata_columns() == {
'DURATION': ColumnMetadata(
name='DURATION',
original_name='DURATION',
type='FLOAT',
original_type='FLOAT',
alias=None,
delete=False,
),
'FLORIZARRE': ColumnMetadata(
name='FLORIZARRE',
original_name='FLORIZARRE',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
'FROM_DATE': ColumnMetadata(
name='FROM_DATE',
original_name='FROM_DATE',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'RAICHU': ColumnMetadata(
name='RAICHU',
original_name='RAICHU',
type='INT',
original_type='int',
alias=None,
delete=False,
),
'TOTO': ColumnMetadata(
name='TOTO',
original_name='TOTO',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
'TO_DATE': ColumnMetadata(
name='TO_DATE',
original_name='TO_DATE',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
}
def test_translate_with_old_column_name_duration(query_date_duration):
step = DurationStep(
name='duration',
newColumnName='TOTO',
startDateColumn='FROM_DATE',
endDateColumn='TO_DATE',
durationIn='days',
)
query = translate_duration(
step,
query_date_duration,
index=1,
)
expected_transformed_query = (
'WITH SELECT_STEP_0 AS (SELECT * FROM products), DURATION_STEP_1 AS (SELECT RAICHU, FLORIZARRE, FROM_DATE, '
'TO_DATE, DATEDIFF(days, to_timestamp(FROM_DATE), to_timestamp(TO_DATE)) AS TOTO FROM SELECT_STEP_0)'
)
assert query.transformed_query == expected_transformed_query
assert (
query.selection_query
== 'SELECT TOTO, RAICHU, FLORIZARRE, FROM_DATE, TO_DATE FROM DURATION_STEP_1'
)
assert query.query_name == 'DURATION_STEP_1'
# we test metadatas
assert query.metadata_manager.retrieve_query_metadata_columns() == {
'FLORIZARRE': ColumnMetadata(
name='FLORIZARRE',
original_name='FLORIZARRE',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
'FROM_DATE': ColumnMetadata(
name='FROM_DATE',
original_name='FROM_DATE',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'RAICHU': ColumnMetadata(
name='RAICHU',
original_name='RAICHU',
type='INT',
original_type='int',
alias=None,
delete=False,
),
'TOTO': ColumnMetadata(
name='TOTO',
original_name='TOTO',
type='FLOAT',
original_type='FLOAT',
alias=None,
delete=False,
),
'TO_DATE': ColumnMetadata(
name='TO_DATE',
original_name='TO_DATE',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
}
```
#### File: steps/sql_translator/test_lowercase.py
```python
from weaverbird.backends.sql_translator.steps import translate_lowercase
from weaverbird.pipeline.steps import LowercaseStep
def test_translate_simple_lowercase(query):
step = LowercaseStep(name='lowercase', column='raichu')
query = translate_lowercase(
step,
query,
index=1,
)
expected_transformed_query = (
'WITH SELECT_STEP_0 AS (SELECT * FROM products), LOWERCASE_STEP_1 AS (SELECT TOTO, FLORIZARRE, LOWER(RAICHU) '
'AS RAICHU FROM SELECT_STEP_0)'
)
assert query.transformed_query == expected_transformed_query
assert query.selection_query == 'SELECT TOTO, RAICHU, FLORIZARRE FROM LOWERCASE_STEP_1'
assert query.query_name == 'LOWERCASE_STEP_1'
```
#### File: steps/sql_translator/test_rename.py
```python
import pytest
from weaverbird.backends.sql_translator.metadata import SqlQueryMetadataManager
from weaverbird.backends.sql_translator.steps import translate_rename
from weaverbird.backends.sql_translator.types import SQLQuery
from weaverbird.pipeline.steps import RenameStep
def test_translate_simple_rename(query):
step = RenameStep(name='rename', to_rename=[['TOTO', 'toto_name']])
query = translate_rename(
step,
query,
index=1,
)
expected_transformed_query = (
'WITH SELECT_STEP_0 AS (SELECT * FROM products), RENAME_STEP_1 AS (SELECT RAICHU, FLORIZARRE, '
'TOTO AS TOTO_NAME FROM SELECT_STEP_0)'
)
assert query.transformed_query == expected_transformed_query
assert query.selection_query == 'SELECT RAICHU, FLORIZARRE, TOTO_NAME FROM RENAME_STEP_1'
assert query.query_name == 'RENAME_STEP_1'
def test_translate_simple_rename_only_one():
q = SQLQuery(
query_name='SELECT_STEP_0',
transformed_query='WITH SELECT_STEP_0 AS (SELECT * FROM products)',
selection_query='SELECT TOTO FROM SELECT_STEP_0',
metadata_manager=SqlQueryMetadataManager(tables_metadata={'table1': {'TOTO': 'int'}}),
)
step = RenameStep(name='rename', to_rename=[['TOTO', 'toto_name']])
query = translate_rename(
step,
q,
index=1,
)
expected_transformed_query = (
'WITH SELECT_STEP_0 AS (SELECT * FROM products), RENAME_STEP_1 AS (SELECT TOTO AS TOTO_NAME FROM '
'SELECT_STEP_0)'
)
assert query.transformed_query == expected_transformed_query
assert query.selection_query == 'SELECT TOTO_NAME FROM RENAME_STEP_1'
assert query.query_name == 'RENAME_STEP_1'
def test_translate_multiple_rename(query):
step = RenameStep(
name='rename', to_rename=[['TOTO', 'toto_name'], ['RAICHU', 'raichu_renamed']]
)
query = translate_rename(
step,
query,
index=2,
)
expected_transformed_query = (
'WITH SELECT_STEP_0 AS (SELECT * FROM products), RENAME_STEP_2 AS (SELECT FLORIZARRE, TOTO AS TOTO_NAME, '
'RAICHU AS RAICHU_RENAMED FROM SELECT_STEP_0)'
)
assert query.transformed_query == expected_transformed_query
assert (
query.selection_query == 'SELECT FLORIZARRE, TOTO_NAME, RAICHU_RENAMED FROM RENAME_STEP_2'
)
assert query.query_name == 'RENAME_STEP_2'
def test_translate_rename_error(query):
step = RenameStep(
name='rename', to_rename=[['TOTO', 'toto_name'], ['RAICHU', 'raichu_renamed']]
)
mocker = SQLQuery(
query_name='SELECT_STEP_0',
transformed_query='WITH SELECT_STEP_0 AS (SELECT * FROM products)',
selection_query='SELECT TOTO, RAICHU, FLORIZARE FROM SELECT_STEP_0',
metadata_manager=SqlQueryMetadataManager(
tables_metadata={'table2': {'TOTO': 'text', 'RAICHU': 'int', 'FLORIZARRE': 'text'}}
),
)
with pytest.raises(AssertionError):
assert translate_rename(step, mocker, index=1) == translate_rename(step, query, index=1)
```
#### File: steps/sql_translator/test_rollup.py
```python
from unittest.mock import Mock
import pytest
from weaverbird.backends.sql_translator.metadata import ColumnMetadata
from weaverbird.backends.sql_translator.steps.rollup import translate_rollup
from weaverbird.pipeline.steps import RollupStep
@pytest.fixture
def sql_query_describer():
return Mock(return_value={'toto': 'int', 'raichu': 'text'})
def test_translate_rollup(query, sql_query_describer):
step = RollupStep(
name='rollup',
hierarchy=['CONTINENT', 'COUNTRY', 'CITY'],
aggregations=[
{'newcolumns': ['VALUE'], 'aggfunction': 'sum', 'columns': ['VALUE']},
],
)
result = translate_rollup(step, query, index=1, sql_query_describer=sql_query_describer)
assert (
result.transformed_query
== '''WITH SELECT_STEP_0 AS (SELECT * FROM products), \
ROLLUP_STEP_1 AS (SELECT CONTINENT, COUNTRY, CITY, \
COALESCE(CITY, COUNTRY, CONTINENT) AS LABEL, \
CASE WHEN CITY IS NOT NULL THEN 'CITY' WHEN COUNTRY IS NOT NULL \
THEN 'COUNTRY' WHEN CONTINENT IS NOT NULL THEN 'CONTINENT' ELSE '' END AS LEVEL, \
CASE WHEN LEVEL = 'COUNTRY' THEN "CONTINENT" WHEN LEVEL = 'CITY' \
THEN "COUNTRY" ELSE NULL END AS PARENT, SUM(VALUE) AS VALUE FROM SELECT_STEP_0 \
GROUP BY ROLLUP(CONTINENT, COUNTRY, CITY) HAVING CONTINENT IS NOT NULL)'''
)
assert result.query_name == 'ROLLUP_STEP_1'
assert result.selection_query == 'SELECT TOTO, RAICHU FROM ROLLUP_STEP_1'
assert result.metadata_manager.retrieve_query_metadata_columns() == {
'TOTO': ColumnMetadata(
name='TOTO',
original_name='toto',
type='INT',
original_type='int',
alias=None,
delete=False,
),
'RAICHU': ColumnMetadata(
name='RAICHU',
original_name='raichu',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
}
def test_translate_rollup_groupby(query, sql_query_describer):
step = RollupStep(
name='rollup',
hierarchy=['CONTINENT', 'COUNTRY', 'CITY'],
aggregations=[
{
'newcolumns': ['VALUE-sum', 'COUNT'],
'aggfunction': 'sum',
'columns': ['VALUE', 'COUNT'],
},
{'newcolumns': ['VALUE-avg'], 'aggfunction': 'avg', 'columns': ['VALUE']},
],
groupby=['YEAR'],
labelCol='MY_LABEL',
levelCol='MY_LEVEL',
parentLabelCol='MY_PARENT',
)
result = translate_rollup(step, query, index=1, sql_query_describer=sql_query_describer)
assert (
result.transformed_query
== """WITH SELECT_STEP_0 AS (SELECT * FROM products), \
ROLLUP_STEP_1 AS (SELECT CONTINENT, COUNTRY, CITY, YEAR, COALESCE(CITY, COUNTRY, CONTINENT) AS MY_LABEL, \
CASE WHEN CITY IS NOT NULL THEN 'CITY' WHEN COUNTRY IS NOT NULL THEN 'COUNTRY' WHEN CONTINENT IS NOT NULL \
THEN 'CONTINENT' ELSE '' END AS MY_LEVEL, CASE WHEN MY_LEVEL = 'COUNTRY' \
THEN "CONTINENT" WHEN MY_LEVEL = 'CITY' THEN "COUNTRY" ELSE NULL END AS MY_PARENT, SUM(VALUE) AS VALUE-sum, \
SUM(COUNT) AS COUNT, AVG(VALUE) AS VALUE-avg FROM SELECT_STEP_0 \
GROUP BY YEAR, ROLLUP(CONTINENT, COUNTRY, CITY) HAVING CONTINENT IS NOT NULL)"""
)
assert result.selection_query == 'SELECT TOTO, RAICHU FROM ROLLUP_STEP_1'
```
#### File: steps/sql_translator/test_select.py
```python
from weaverbird.backends.sql_translator.steps import translate_select
from weaverbird.pipeline.steps import SelectStep
def test_translate_select(query):
step = SelectStep(name='select', columns=['RAICHU', 'FLORIZARRE'])
query = translate_select(
step,
query,
index=1,
)
assert (
query.transformed_query
== 'WITH SELECT_STEP_0 AS (SELECT * FROM products), KEEPCOLS_STEP_1 AS (SELECT RAICHU, FLORIZARRE FROM '
'SELECT_STEP_0)'
)
assert query.selection_query == 'SELECT RAICHU, FLORIZARRE FROM KEEPCOLS_STEP_1'
assert query.query_name == 'KEEPCOLS_STEP_1'
```
#### File: steps/sql_translator/test_text.py
```python
from weaverbird.backends.sql_translator.steps.text import translate_text
from weaverbird.backends.sql_translator.types import SQLQuery, SqlQueryMetadataManager
from weaverbird.pipeline.steps import TextStep
def test_text(query):
step = TextStep(name='text', new_column='BEST_SINGER_EVER', text='jean-jacques-goldman')
query_result = translate_text(step, query, index=1)
expected_transformed_query = (
"""WITH SELECT_STEP_0 AS (SELECT * FROM products), TEXT_STEP_1 AS (SELECT TOTO, RAICHU, FLORIZARRE, """
"""'jean-jacques-goldman' AS BEST_SINGER_EVER FROM """
"""SELECT_STEP_0)"""
)
assert query_result.transformed_query == expected_transformed_query
assert (
query_result.selection_query
== """SELECT TOTO, RAICHU, FLORIZARRE, BEST_SINGER_EVER FROM TEXT_STEP_1"""
)
assert query_result.query_name == 'TEXT_STEP_1'
def test_text_only_one():
step = TextStep(name='text', new_column='BEST_SINGER_EVER', text='jean-jacques-goldman')
q = SQLQuery(
query_name='SELECT_STEP_0',
transformed_query='WITH SELECT_STEP_0 AS (SELECT * FROM products)',
selection_query='SELECT RAICHU FROM SELECT_STEP_0',
metadata_manager=SqlQueryMetadataManager(tables_metadata={'table1': {}}, query_metadata={}),
)
query_result = translate_text(step, q, index=1)
expected_transformed_query = (
"""WITH SELECT_STEP_0 AS (SELECT * FROM products), TEXT_STEP_1 AS (SELECT """
"""'jean-jacques-goldman' AS BEST_SINGER_EVER FROM """
"""SELECT_STEP_0)"""
)
assert query_result.transformed_query == expected_transformed_query
assert query_result.selection_query == """SELECT BEST_SINGER_EVER FROM TEXT_STEP_1"""
assert query_result.query_name == 'TEXT_STEP_1'
```
#### File: steps/sql_translator/test_todate.py
```python
from weaverbird.backends.sql_translator.steps import translate_todate
from weaverbird.pipeline.steps import ToDateStep
def test_translate_simple_todate(query):
step = ToDateStep(name='todate', column='RAICHU', format='%d/%m/%Y')
query = translate_todate(
step,
query,
index=1,
)
expected_transformed_query = (
'WITH SELECT_STEP_0 AS (SELECT * FROM products), TODATE_STEP_1 AS (SELECT TOTO, FLORIZARRE, TO_DATE('
"RAICHU, 'DD/MM/YYYY') AS RAICHU FROM SELECT_STEP_0)"
)
assert query.transformed_query == expected_transformed_query
assert query.selection_query == 'SELECT TOTO, RAICHU, FLORIZARRE FROM TODATE_STEP_1'
assert query.query_name == 'TODATE_STEP_1'
def test_translate_automatic_guess_todate(query):
step = ToDateStep(name='todate', column='RAICHU')
query = translate_todate(
step,
query,
index=1,
)
expected_transformed_query = (
'WITH SELECT_STEP_0 AS (SELECT * FROM products), TODATE_STEP_1 AS (SELECT TOTO, FLORIZARRE, TO_DATE('
'RAICHU) AS RAICHU FROM SELECT_STEP_0)'
)
assert query.transformed_query == expected_transformed_query
assert query.selection_query == 'SELECT TOTO, RAICHU, FLORIZARRE FROM TODATE_STEP_1'
assert query.query_name == 'TODATE_STEP_1'
def test_translate_hard_todate(query):
step = ToDateStep(name='todate', column='RAICHU', format="mm/dd/yyyy, 'hh24:mi hours'")
query = translate_todate(
step,
query,
index=1,
)
expected_transformed_query = (
'WITH SELECT_STEP_0 AS (SELECT * FROM products), TODATE_STEP_1 AS (SELECT TOTO, FLORIZARRE, TO_DATE('
"RAICHU, 'mm/dd/yyyy, hh24:mi hours') AS RAICHU FROM SELECT_STEP_0)"
)
assert query.transformed_query == expected_transformed_query
assert query.selection_query == 'SELECT TOTO, RAICHU, FLORIZARRE FROM TODATE_STEP_1'
assert query.query_name == 'TODATE_STEP_1'
```
#### File: steps/sql_translator/test_uniquegroups.py
```python
import pytest
from weaverbird.backends.sql_translator.steps import translate_uniquegroups
from weaverbird.pipeline.steps import UniqueGroupsStep
def test_translate_uniquegroups_empty(query):
step = UniqueGroupsStep(name='uniquegroups', on=[])
query = translate_uniquegroups(
step,
query,
index=1,
)
expected_transformed_query = (
'WITH SELECT_STEP_0 AS (SELECT * FROM products), UNIQUEGROUPS_STEP_1 AS (SELECT TOTO, RAICHU, FLORIZARRE FROM '
'SELECT_STEP_0)'
)
assert query.transformed_query == expected_transformed_query
assert query.selection_query == 'SELECT TOTO, RAICHU, FLORIZARRE FROM UNIQUEGROUPS_STEP_1'
assert query.query_name == 'UNIQUEGROUPS_STEP_1'
def test_translate_uniquegroups(query):
step = UniqueGroupsStep(name='uniquegroups', on=['TOTO', 'FLORIZARRE'])
query = translate_uniquegroups(
step,
query,
index=1,
)
expected_transformed_query = (
'WITH SELECT_STEP_0 AS (SELECT * FROM products), UNIQUEGROUPS_STEP_1 AS (SELECT TOTO, FLORIZARRE FROM '
'SELECT_STEP_0 GROUP BY TOTO, FLORIZARRE)'
)
assert query.transformed_query == expected_transformed_query
assert query.selection_query == 'SELECT TOTO, FLORIZARRE FROM UNIQUEGROUPS_STEP_1'
assert query.query_name == 'UNIQUEGROUPS_STEP_1'
def test_translate_uniquegroups_error(query):
step = UniqueGroupsStep(name='uniquegroups', on=['TOTO'])
query = translate_uniquegroups(
step,
query,
index=1,
)
expected_transformed_query = (
'WITH SELECT_STEP_0 AS (SELECT * FROM products), UNIQUEGROUPS_STEP_1 AS (SELECT TOTO, RAICHU, FLORIZARRE FROM '
'SELECT_STEP_0 GROUP BY TOTO)'
)
with pytest.raises(AssertionError):
assert query.transformed_query == expected_transformed_query
```
#### File: steps/sql_translator/test_unpivot.py
```python
import pytest
from weaverbird.backends.sql_translator.metadata import ColumnMetadata, SqlQueryMetadataManager
from weaverbird.backends.sql_translator.steps import translate_unpivot
from weaverbird.backends.sql_translator.types import SQLQuery
from weaverbird.pipeline.steps import UnpivotStep
def test_translate_unpivot(mocker):
step = UnpivotStep(
name='unpivot',
keep=['COMPANY', 'COUNTRY'],
unpivot=['CURRENCY', 'PROVIDER'],
unpivot_column_name='KPI',
value_column_name='VALUE',
dropna=True,
)
query = SQLQuery(
query_name='SELECT_STEP_0',
transformed_query='WITH SELECT_STEP_0 AS (SELECT COMPANY, COUNTRY, CURRENCY, PROVIDER FROM PRODUCTS)',
selection_query='SELECT COMPANY, COUNTRY, CURRENCY, PROVIDER FROM SELECT_STEP_0',
metadata_manager=SqlQueryMetadataManager(
tables_metadata={
'PRODUCTS': {
'COMPANY': 'text',
'COUNTRY': 'text',
'CURRENCY': 'text',
'PROVIDER': 'text',
}
},
),
)
res = translate_unpivot(step, query, index=1)
assert (
res.transformed_query
== 'WITH SELECT_STEP_0 AS (SELECT COMPANY, COUNTRY, CURRENCY, PROVIDER FROM PRODUCTS), UNPIVOT_STEP_1 AS (SELECT COMPANY, COUNTRY, KPI, VALUE FROM SELECT_STEP_0 UNPIVOT(VALUE FOR KPI IN (CURRENCY, PROVIDER)))'
)
assert res.selection_query == 'SELECT COMPANY, COUNTRY, KPI, VALUE FROM UNPIVOT_STEP_1'
assert res.metadata_manager.retrieve_query_metadata_columns() == {
'COMPANY': ColumnMetadata(
name='COMPANY',
original_name='COMPANY',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
'COUNTRY': ColumnMetadata(
name='COUNTRY',
original_name='COUNTRY',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
'KPI': ColumnMetadata(
name='KPI',
original_name='KPI',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
'VALUE': ColumnMetadata(
name='VALUE',
original_name='VALUE',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
}
def test_translate_unpivot_error(mocker):
step = UnpivotStep(
name='unpivot',
keep=['COMPANY', 'COUNTRY'],
unpivot=['CURRENCY', 'PROVIDER'],
unpivot_column_name='KPI',
value_column_name='VALUE',
dropna=True,
)
query = SQLQuery(
query_name='SELECT_STEP_0',
transformed_query='WITH SELECT_STEP_0 AS (SELECT COMPANY, COUNTRY, CURRENCY, PROVIDER FROM PRODUCTS)',
selection_query='SELECT COMPANY, COUNTRY, CURRENCY, PROVIDER FROM SELECT_STEP_0',
metadata_manager=SqlQueryMetadataManager(
tables_metadata={
'PRODUCTS': {
'COMPANY': 'text',
'COUNTRY': 'text',
'CURRENCY': 'text',
'PROVIDER': 'text',
}
},
),
)
mocker.patch(
'weaverbird.backends.sql_translator.steps.unpivot.build_selection_query',
side_effect=Exception,
)
with pytest.raises(Exception):
translate_unpivot(step, query, index=1)
```
#### File: tests/steps/test_convert.py
```python
import datetime
from datetime import timedelta
import pandas
from weaverbird.backends.pandas_executor.steps.convert import execute_convert
from weaverbird.pipeline.steps import ConvertStep
def test_benchmark_convert(benchmark):
dates = [
str(datetime.datetime.today() + timedelta(days=nb_day)) for nb_day in list(range(1, 2001))
]
df = pandas.DataFrame(
{
'date': dates,
}
)
step = ConvertStep(name='convert', columns=['date'], data_type='date')
benchmark(execute_convert, step, df)
def test_convert_str_to_date():
df = pandas.DataFrame(
{
'date': ['21/03/2006 21:50:00'],
}
)
step = ConvertStep(name='convert', columns=['date'], data_type='date')
result = execute_convert(step, df)
date_result = result.at[0, 'date']
assert str(date_result) == '2006-03-21 21:50:00'
def test_convert_timestamp_to_date():
"""
Timestamps to date should support MilliSecond Timestamps (for consistency with Mongo Backend),
not NanoSecond (Pandas default)
"""
df = pandas.DataFrame(
{
'timestamp_ms': [1142977800000],
}
)
step = ConvertStep(name='convert', columns=['timestamp_ms'], data_type='date')
result = execute_convert(step, df)
date_result = result.at[0, 'timestamp_ms']
assert str(date_result) == '2006-03-21 21:50:00'
def test_convert_date_to_timestamp():
"""
Date to timestamp should return MilliSecond Timestamps for opposite convertion consistency
"""
df = pandas.DataFrame(
{
'date': [pandas.to_datetime('21/03/2006 21:50:00')],
}
)
step = ConvertStep(name='convert', columns=['date'], data_type='integer')
result = execute_convert(step, df)
timestamp_result = result.at[0, 'date']
assert str(timestamp_result) == '1142977800000'
```
#### File: tests/steps/test_delete.py
```python
from pandas import DataFrame
from weaverbird.backends.pandas_executor.steps.delete import execute_delete
from weaverbird.pipeline.steps import DeleteStep
def test_benchmark_delete(benchmark):
big_df = DataFrame({'value': list(range(1000))})
step = DeleteStep(
name='delete',
columns=['value'],
)
benchmark(execute_delete, step, big_df)
```
#### File: tests/steps/test_lowercase.py
```python
import pandas as pd
import pytest
from tests.steps.test_comparetext import random_string
from tests.utils import assert_dataframes_equals
from weaverbird.backends.pandas_executor.steps.lowercase import execute_lowercase
from weaverbird.pipeline.steps import LowercaseStep
@pytest.fixture()
def sample_df() -> pd.DataFrame:
return pd.DataFrame({'a_text': ['FOO', 'BAR', 'baz'], 'an_int': [1, 2, 3]})
def test_simple_lowercase(sample_df):
step = LowercaseStep(name='lowercase', column='a_text')
result_df = execute_lowercase(step, sample_df)
expected_df = pd.DataFrame({'a_text': ['foo', 'bar', 'baz'], 'an_int': [1, 2, 3]})
assert_dataframes_equals(result_df, expected_df)
def test_it_should_throw_if_applied_on_wrong_type(sample_df):
with pytest.raises(AttributeError):
step = LowercaseStep(name='lowercase', column='an_int')
execute_lowercase(step, sample_df)
def test_benchmark_lowercase(benchmark):
df = pd.DataFrame({'TEXT_1': [random_string() for _ in range(1000)]})
step = LowercaseStep(name='lowercase', column='TEXT_1')
benchmark(execute_lowercase, step, df)
```
#### File: tests/steps/test_moving_average.py
```python
import numpy as np
import pandas as pd
from pandas import DataFrame
from tests.utils import assert_dataframes_equals
from weaverbird.backends.pandas_executor.steps.moving_average import execute_moving_average
from weaverbird.pipeline.steps import MovingAverageStep
def test_moving_average_basic():
df = DataFrame(
{'date': [f'2018-01-0{i}' for i in range(1, 9)], 'value': [75, 80, 82, 83, 80, 86, 79, 76]}
)
df['date'] = pd.to_datetime(df['date'])
step = MovingAverageStep(
name='movingaverage',
valueColumn='value',
columnToSort='date',
movingWindow=2,
)
df_result = execute_moving_average(step, df)
expected_result = df.assign(
**{'value_MOVING_AVG': [None, 77.5, 81, 82.5, 81.5, 83, 82.5, 77.5]}
)
assert_dataframes_equals(df_result, expected_result)
def test_moving_average_with_groups():
df = DataFrame(
{
'country': ['France'] * 6 + ['USA'] * 6,
'date': [f'2018-01-0{i}' for i in range(1, 7)] * 2,
'value': [75, 80, 82, 83, 80, 86] + [69, 73, 73, 75, 70, 76],
}
)
df['date'] = pd.to_datetime(df['date'])
step = MovingAverageStep(
name='movingaverage',
valueColumn='value',
columnToSort='date',
movingWindow=3,
groups=['country'],
newColumnName='rolling_average',
)
df_result = execute_moving_average(step, df)
expected_result = df.assign(
**{
'rolling_average': [None, None, 79, 81.6667, 81.6667, 83]
+ [None, None, 71.6667, 73.6667, 72.6667, 73.6667]
}
)
assert_dataframes_equals(df_result, expected_result)
def test_benchmark_moving_average(benchmark):
df = DataFrame({'value': np.random.random(1000), 'id': list(range(1000))})
step = MovingAverageStep(
name='movingaverage',
valueColumn='value',
columnToSort='id',
movingWindow=3,
newColumnName='rolling_average',
)
benchmark(execute_moving_average, step, df)
```
#### File: tests/steps/test_pivot.py
```python
import random
import numpy as np
import pytest
from pandas import DataFrame
from tests.utils import assert_dataframes_equals
from weaverbird.backends.pandas_executor.steps.pivot import execute_pivot
from weaverbird.pipeline.steps import PivotStep
@pytest.fixture
def sample_df():
return DataFrame(
{
'Label': ['Label 1', 'Label 2', 'Label 3', 'Label 1', 'Label 2', 'Label 3', 'Label 3'],
'Country': ['Country 1'] * 3 + ['Country 2'] * 4,
'Value': [13, 7, 20, 1, 10, 5, 1],
}
)
def test_pivot(sample_df: DataFrame):
step = PivotStep(
name='pivot',
index=['Label'],
column_to_pivot='Country',
value_column='Value',
agg_function='sum',
)
df_result = execute_pivot(step, sample_df)
expected_result = DataFrame(
{
'Label': ['Label 1', 'Label 2', 'Label 3'],
'Country 1': [13, 7, 20],
'Country 2': [1, 10, 6],
}
)
assert_dataframes_equals(df_result, expected_result)
def test_pivot_avg(sample_df: DataFrame):
step = PivotStep(
name='pivot',
index=['Label'],
column_to_pivot='Country',
value_column='Value',
agg_function='avg',
)
df_result = execute_pivot(step, sample_df)
expected_result = DataFrame(
{
'Label': ['Label 1', 'Label 2', 'Label 3'],
'Country 1': [13, 7, 20],
'Country 2': [1, 10, 3],
}
)
assert_dataframes_equals(df_result, expected_result)
def test_benchmark_pivot(benchmark):
groups = ['group_1', 'group_2']
df = DataFrame(
{
'value': np.random.random(1000),
'id': list(range(1000)),
'group': [random.choice(groups) for _ in range(1000)],
}
)
step = PivotStep(
name='pivot',
index=['group'],
column_to_pivot='group',
value_column='value',
agg_function='avg',
)
benchmark(execute_pivot, step, df)
```
#### File: tests/steps/test_split.py
```python
import random
import numpy as np
import pandas as pd
import pytest
from tests.utils import assert_dataframes_equals
from weaverbird.backends.pandas_executor.steps.split import execute_split
from weaverbird.pipeline.steps import SplitStep
@pytest.fixture()
def sample_df():
return pd.DataFrame(
{
'Label': [
'Label 1 - Groupe 1 - France',
'Label 2 - Groupe 1 - Spain',
'Label 3 - Groupe 1 - USA',
'Label 4 - Groupe 2 - France',
'Label 5 - Groupe 2 - Spain',
'Label 6 - Groupe 2 - USA',
],
'Values': [13, 7, 20, 1, 10, 5],
}
)
def test_keep_all_col(sample_df):
step = SplitStep(name='split', column='Label', delimiter='-', number_cols_to_keep=3)
result_df = execute_split(step, sample_df)
expected_df = pd.DataFrame(
{
'Label': [
'Label 1 - Groupe 1 - France',
'Label 2 - Groupe 1 - Spain',
'Label 3 - Groupe 1 - USA',
'Label 4 - Groupe 2 - France',
'Label 5 - Groupe 2 - Spain',
'Label 6 - Groupe 2 - USA',
],
'Label_1': ['Label 1 ', 'Label 2 ', 'Label 3 ', 'Label 4 ', 'Label 5 ', 'Label 6 '],
'Label_2': [' Groupe 1 '] * 3 + [' Groupe 2 '] * 3,
'Label_3': [' France', ' Spain', ' USA'] * 2,
'Values': [13, 7, 20, 1, 10, 5],
}
)
assert_dataframes_equals(result_df, expected_df)
def test_keep_less_columns(sample_df):
step = SplitStep(name='split', column='Label', delimiter='-', number_cols_to_keep=2)
result_df = execute_split(step, sample_df)
expected_df = pd.DataFrame(
{
'Label': [
'Label 1 - Groupe 1 - France',
'Label 2 - Groupe 1 - Spain',
'Label 3 - Groupe 1 - USA',
'Label 4 - Groupe 2 - France',
'Label 5 - Groupe 2 - Spain',
'Label 6 - Groupe 2 - USA',
],
'Label_1': ['Label 1 ', 'Label 2 ', 'Label 3 ', 'Label 4 ', 'Label 5 ', 'Label 6 '],
'Label_2': [' Groupe 1 '] * 3 + [' Groupe 2 '] * 3,
'Values': [13, 7, 20, 1, 10, 5],
}
)
assert_dataframes_equals(result_df, expected_df)
def test_benchmark_split(benchmark):
groups = ['group_1', 'group_2']
df = pd.DataFrame(
{
'value': np.random.random(1000),
'id': list(range(1000)),
'group': [random.choice(groups) for _ in range(1000)],
}
)
step = SplitStep(name='split', column='group', delimiter='_', number_cols_to_keep=2)
benchmark(execute_split, step, df)
```
#### File: tests/steps/test_statistics.py
```python
import random
import numpy as np
import pytest
from pandas import DataFrame
from tests.utils import assert_dataframes_equals
from weaverbird.backends.pandas_executor.steps.statistics import execute_statistics
from weaverbird.pipeline.steps import StatisticsStep
@pytest.fixture
def sample_df():
return DataFrame(
{
'Label': ['Label 1', 'Label 2', 'Label 3', 'Label 4', 'Label 5', 'Label 6'],
'Group': ['Group 1'] * 3 + ['Group 2'] * 3,
'Value': [13, 7, 20, 1, 10, 5],
}
)
def test_statistics(sample_df: DataFrame):
step = StatisticsStep(
name='statistics',
column='Value',
groupbyColumns=[],
statistics=['average', 'count'],
quantiles=[{'label': 'median', 'nth': 1, 'order': 2}],
)
df_result = execute_statistics(step, sample_df)
expected_result = DataFrame({'average': [9.33333], 'count': [6], 'median': [8.5]})
assert_dataframes_equals(df_result, expected_result)
def test_statistics_with_groups(sample_df: DataFrame):
step = StatisticsStep(
name='statistics',
column='Value',
groupbyColumns=['Group'],
statistics=['average', 'count'],
quantiles=[{'label': 'median', 'nth': 1, 'order': 2}],
)
df_result = execute_statistics(step, sample_df)
expected_result = DataFrame(
{
'Group': ['Group 1', 'Group 2'],
'average': [13.33333, 5.33333],
'count': [3, 3],
'median': [13, 5],
}
)
assert_dataframes_equals(df_result, expected_result)
def test_benchmark_statistics(benchmark):
groups = ['group_1', 'group_2']
df = DataFrame(
{
'value': np.random.random(1000),
'id': list(range(1000)),
'group': [random.choice(groups) for _ in range(1000)],
}
)
step = StatisticsStep(
name='statistics',
column='value',
groupbyColumns=['group'],
statistics=['average', 'count'],
quantiles=[{'label': 'median', 'nth': 1, 'order': 2}],
)
benchmark(execute_statistics, step, df)
```
#### File: tests/steps/test_substring.py
```python
import random
import numpy as np
import pandas as pd
import pytest
from tests.utils import assert_dataframes_equals
from weaverbird.backends.pandas_executor.steps.substring import execute_substring
from weaverbird.pipeline.steps import SubstringStep
@pytest.fixture()
def sample_df():
return pd.DataFrame(
{
'Label': ['foo', 'overflow', 'some_text', 'a_word', 'toucan', 'toco'],
'Value:': [13, 7, 20, 1, 10, 5],
}
)
def test_substring_positive_start_end_idx(sample_df):
step = SubstringStep(name='substring', column='Label', start_index=1, end_index=4)
result_df = execute_substring(step, sample_df)
expected_df = pd.DataFrame(
{
'Label': ['foo', 'overflow', 'some_text', 'a_word', 'toucan', 'toco'],
'Value:': [13, 7, 20, 1, 10, 5],
'Label_SUBSTR': ['foo', 'over', 'some', 'a_wo', 'touc', 'toco'],
}
)
assert_dataframes_equals(result_df, expected_df)
def test_substring_positive_start_negative_end(sample_df):
step = SubstringStep(name='substring', column='Label', start_index=2, end_index=-2)
result_df = execute_substring(step, sample_df)
expected_df = pd.DataFrame(
{
'Label': ['foo', 'overflow', 'some_text', 'a_word', 'toucan', 'toco'],
'Value:': [13, 7, 20, 1, 10, 5],
'Label_SUBSTR': ['o', 'verflo', 'ome_tex', '_wor', 'ouca', 'oc'],
}
)
assert_dataframes_equals(result_df, expected_df)
def test_substring_negative_start_negative_end(sample_df):
step = SubstringStep(name='substring', column='Label', start_index=-3, end_index=-1)
result_df = execute_substring(step, sample_df)
expected_df = pd.DataFrame(
{
'Label': ['foo', 'overflow', 'some_text', 'a_word', 'toucan', 'toco'],
'Value:': [13, 7, 20, 1, 10, 5],
'Label_SUBSTR': ['foo', 'low', 'ext', 'ord', 'can', 'oco'],
}
)
assert_dataframes_equals(result_df, expected_df)
def test_substring_new_column_name(sample_df):
step = SubstringStep(
name='substring', column='Label', start_index=-3, end_index=-1, newColumnName='FOO'
)
result_df = execute_substring(step, sample_df)
expected_df = pd.DataFrame(
{
'Label': ['foo', 'overflow', 'some_text', 'a_word', 'toucan', 'toco'],
'Value:': [13, 7, 20, 1, 10, 5],
'FOO': ['foo', 'low', 'ext', 'ord', 'can', 'oco'],
}
)
assert_dataframes_equals(result_df, expected_df)
def test_benchmark_substring(benchmark):
groups = ['group_1', 'group_2']
df = pd.DataFrame(
{
'value': np.random.random(1000),
'id': list(range(1000)),
'group': [random.choice(groups) for _ in range(1000)],
}
)
step = SubstringStep(
name='substring', column='group', start_index=0, end_index=3, newColumnName='FOO'
)
benchmark(execute_substring, step, df)
```
|
{
"source": "jeremyjbowers/nfl-stats",
"score": 3
}
|
#### File: nfl-stats/pynfl/schedule.py
```python
import json
import os
from bs4 import BeautifulSoup
import requests
from pynfl import utils
class Game(utils.DataModelClass):
def __init__(self, **kwargs):
self.id = None
self.location = None
self.time = None
self.date = None
self.week = None
self.site = None
self.home_team = None
self.away_team = None
self.set_fields(**kwargs)
def __unicode__(self):
return str(self.id)
class Team(utils.DataModelClass):
def __init__(self, **kwargs):
self.id = None
self.name = None
self.mascot = None
self.abbr = None
self.set_fields(**kwargs)
def __unicode__(self):
return self.abbr
class Load(utils.BaseModelClass):
def __init__(self, *args, **kwargs):
self.weeks = range(1, 18)
self.schedule_url = 'http://www.nfl.com/schedules/2015/REG%s'
self.games = []
self.set_data_directory()
def read_data(self):
if os.path.isfile('%s/schedule.json' % self.DATA_DIRECTORY):
with open('%s/schedule.json' % self.DATA_DIRECTORY, 'r') as readfile:
for item in json.loads(readfile.read()):
g = Game(**item)
g.home_team = Team(**item['home_team'])
g.away_team = Team(**item['away_team'])
self.games.append(g)
def get_data(self):
for week in self.weeks:
r = requests.get(self.schedule_url % week)
soup = BeautifulSoup(r.content, 'lxml')
list_items = soup.select('ul.schedules-table li')
current_date = None
print "Week %s" % week
for li in list_items:
# Decide if this is a DATE item or a GAME item.
if ("schedules-list-date" in li.attrs['class']) and ("next-game" not in li.attrs['class']):
current_date = li.text.replace('View Full Thursday Night Football Schedule','').replace(u'\u00a0\u00bb','').strip()
if current_date and ("schedules-list-matchup" in li.attrs['class']):
"""
<div class="schedules-list-content post expandable primetime type-reg pro-legacy" data-gameid="2015091000" data-away-abbr="PIT" data-home-abbr="NE" data-away-mascot="Steelers" data-home-mascot="Patriots" data-gamestate="POST" data-gc-url="http://www.nfl.com/gamecenter/2015091000/2015/REG1/steelers@patriots" data-localtime="20:30:00" data-shareid="sb-liu2s52c" data-site="Gillette Stadium">
"""
g = Game()
game = li.select('div.schedules-list-content')[0]
g.id = game.attrs['data-gameid']
g.week = week
g.date = current_date.replace('View Full Thursday Night Football Schedule','').replace(u'\u00a0\u00bb','').strip()
g.time = li.select('div.schedules-list-hd span.time')[0].text.strip()
g.site = game.attrs['data-site']
g.home_team = Team()
g.home_team.abbr = game.attrs['data-home-abbr']
g.home_team.mascot = game.attrs['data-home-mascot']
g.home_team = g.home_team.__dict__
g.away_team = Team()
g.away_team.abbr = game.attrs['data-away-abbr']
g.away_team.mascot = game.attrs['data-away-mascot']
g.away_team = g.away_team.__dict__
self.games.append(g.__dict__)
def write_data(self):
with open('%s/schedule.json' % self.DATA_DIRECTORY, 'w') as writefile:
writefile.write(json.dumps(self.games))
if __name__ == "__main__":
l = Load()
l.get_data()
l.write_data()
l.read_data()
```
|
{
"source": "jeremyje/compute-image-tools",
"score": 2
}
|
#### File: image_build/enterprise_linux/ks_helpers.py
```python
import logging
import os
class RepoString(object):
"""Creates a yum.conf repository section statement for a kickstart file.
See the yum.conf man pages for more information about formatting
requirements.
The attributes listed are the minimun data set for a repo section.
Attributes:
head: The header that should be used for the repo section.
name: The name as it will appear in yum.
baseurl: The url for the repo.
enabled: Set to 1 to enable.
gpgcheck: Set to 1 to enable.
repo_gpgcheck: Set to 1 to enable.
gpgkey: URLs pointing to GPG keys.
"""
url_root = 'https://packages.cloud.google.com/yum/repos'
gpgkey_list = [
'https://packages.cloud.google.com/yum/doc/yum-key.gpg',
'https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg'
]
# New repos should be added here. Create a dict for your repo bellow.
# This dict should contain the following:
# head: The header that should be used for the repo section.
# name: The name as it will appear in yum.
# url_branch: This is combined with url_root (defined in the class) and
# repo_version to create the repo's baseurl. You must include a string
# formatter '%s' to place the repo_version in your URL.
# e.g. /google-compute-engine-%s-x86_64-unstable
# filename: This is the location the yum.conf section file will live on the
# image.
repodict = {
'stable': {
'head': '[google-compute-engine]',
'name': 'Google Compute Engine',
'url_branch': '/google-compute-engine-%s-x86_64-stable',
'filename': '/etc/yum.repos.d/google-cloud.repo'
},
'sdk': {
'head': '[google-cloud-sdk]',
'name': 'Google Cloud SDK',
'url_branch': '/cloud-sdk-%s-x86_64',
'filename': '/etc/yum.repos.d/google-cloud.repo'
},
'unstable': {
'head': '[google-compute-engine-unstable]',
'name': 'Google Compute Engine Unstable',
'url_branch': '/google-compute-engine-%s-x86_64-unstable',
'filename': '/etc/yum.repos.d/google-cloud-unstable.repo'
},
'staging': {
'head': '[google-compute-engine-staging]',
'name': 'Google Compute Engine Staging',
'url_branch': '/google-compute-engine-%s-x86_64-staging',
'filename': '/etc/yum.repos.d/google-cloud-staging.repo'
}
}
def __init__(self, repo_version, repo):
"""Initializes RepoString with attributes passes as arguments.
Args:
repo_version: string; expects 'el7', 'el8'.
repo: string; used to specify which dict in repodict to use to assemble
the yum.conf repo segment.
repodata must contain the following:
head: The header that should be used for the repo entry.
name: The name as it will appear in yum.
url_branch: This is combined with url_root (defined in the class) and
repo_version to create the repo's baseurl. You must include a string
formatter '%s' to place the repo_version in your URL.
e.g. /google-compute-engine-%s-x86_64-unstable
Returns:
An initialized RepoString object.
"""
super(RepoString, self).__init__()
self.repo = repo
self.repo_version = repo_version
self.yumseg = {}
self.yumseg['head'] = self.repodict[self.repo]['head']
self.yumseg['name'] = self.repodict[self.repo]['name']
self.yumseg['baseurl'] = (
self.GetBaseURL(self.repodict[self.repo]['url_branch']))
self.yumseg['enabled'] = '1'
self.yumseg['gpgcheck'] = '1'
self.yumseg['repo_gpgcheck'] = '1'
self.yumseg['gpgkey'] = self.gpgkey_list
def __str__(self):
"""Override the string method to return a yum.conf repository section.
Returns:
RepoString; tell python to treat this as a string using str().
"""
keylist = ['head',
'name',
'baseurl',
'enabled',
'gpgcheck',
'repo_gpgcheck',
'gpgkey']
yum_repo_list = (
[('tee -a %s << EOM' % self.repodict[self.repo]['filename']), ])
for key in keylist:
if key == 'head':
yum_repo_list.append(self.yumseg[key])
elif key == 'gpgkey':
yum_repo_list.append('%s=%s' %
(key, '\n '.join(self.gpgkey_list)))
else:
yum_repo_list.append('%s=%s' % (key, self.yumseg[key]))
yum_repo_list.append('EOM')
return '\n'.join(yum_repo_list)
def GetBaseURL(self, url_branch):
"""Assembles the baseurl attribute of RepoString.
Proccesses the string formatting in url_branch then combines it with
url_root to create the baseurl.
Args:
url_branch: string; this is combined with url_root and repo_version to
create the repo's baseurl. You must include a string
formatter '%s' to place the repo_version in your URL.
e.g. /google-compute-engine-%s-x86_64-unstable
Returns:
string; baseurl
"""
return self.url_root + (url_branch % self.repo_version)
def BuildKsConfig(release, google_cloud_repo, byos, sap):
"""Builds kickstart config from shards.
Args:
release: string; image from metadata.
google_cloud_repo: string; expects 'stable', 'unstable', or 'staging'.
byos: bool; true if using a BYOS RHEL license.
sap: bool; true if building RHEL for SAP.
Returns:
string; a valid kickstart config.
"""
# Each kickstart file will have the following components:
# ks_options: kickstart install options
# ks_packages: kickstart package config
# ks_post: kickstart post install scripts
# Common kickstart sections.
ks_cleanup = FetchConfigPart('cleanup.cfg')
el7_packages = FetchConfigPart('el7-packages.cfg')
el8_packages = FetchConfigPart('el8-packages.cfg')
el7_post = FetchConfigPart('el7-post.cfg')
el8_post = FetchConfigPart('el8-post.cfg')
# For BYOS RHEL, don't remove subscription-manager.
if byos:
logging.info('Building RHEL BYOS image.')
rhel_byos_post = FetchConfigPart('rhel-byos-post.cfg')
# RHEL 7 variants.
if release.startswith('rhel-7'):
logging.info('Building RHEL 7 image.')
repo_version = 'el7'
ks_options = FetchConfigPart('rhel-7-options.cfg')
ks_packages = el7_packages
# Build RHEL post scripts.
rhel_post = FetchConfigPart('rhel-7-post.cfg')
if sap:
logging.info('Building RHEL 7 for SAP')
rhel_sap_post = FetchConfigPart('rhel-7-sap-post.cfg')
point = ''
if release == 'rhel-7-4':
logging.info('Building RHEL 7.4 for SAP')
point = FetchConfigPart('rhel-7-4-post.cfg')
if release == 'rhel-7-6':
logging.info('Building RHEL 7.6 for SAP')
point = FetchConfigPart('rhel-7-6-post.cfg')
if release == 'rhel-7-7':
logging.info('Building RHEL 7.7 for SAP')
point = FetchConfigPart('rhel-7-7-post.cfg')
rhel_post = '\n'.join([point, rhel_sap_post])
if byos:
rhel_post = '\n'.join([rhel_post, rhel_byos_post])
# Combine RHEL specific post install to EL7 post.
ks_post = '\n'.join([rhel_post, el7_post])
# RHEL 8 variants.
elif release.startswith('rhel-8'):
logging.info('Building RHEL 8 image.')
repo_version = 'el8'
ks_options = FetchConfigPart('rhel-8-options.cfg')
ks_packages = el8_packages
# Build RHEL post scripts.
rhel_post = FetchConfigPart('rhel-8-post.cfg')
if sap:
logging.info('Building RHEL 8 for SAP')
rhel_sap_post = FetchConfigPart('rhel-8-sap-post.cfg')
point = ''
if release == 'rhel-8-1':
logging.info('Building RHEL 8.1 for SAP')
point = FetchConfigPart('rhel-8-1-post.cfg')
elif release == 'rhel-8-2':
logging.info('Building RHEL 8.2 for SAP')
point = FetchConfigPart('rhel-8-2-post.cfg')
rhel_post = '\n'.join([point, rhel_sap_post])
if byos:
rhel_post = '\n'.join([rhel_post, rhel_byos_post])
# Combine RHEL specific post install to EL8 post.
ks_post = '\n'.join([rhel_post, el8_post])
# CentOS 7
elif release.startswith('centos-7'):
logging.info('Building CentOS 7 image.')
repo_version = 'el7'
ks_options = FetchConfigPart('centos-7-options.cfg')
ks_packages = el7_packages
ks_post = el7_post
# CentOS 8
elif release.startswith('centos-8'):
logging.info('Building CentOS 8 image.')
repo_version = 'el8'
ks_options = FetchConfigPart('centos-8-options.cfg')
ks_packages = el8_packages
ks_post = el8_post
# CentOS Stream 8
elif release.startswith('centos-stream-8'):
logging.info('Building CentOS Stream image.')
repo_version = 'el8'
ks_options = FetchConfigPart('centos-stream-8-options.cfg')
ks_packages = el8_packages
ks_post = el8_post
else:
logging.error('Unknown Image Name: %s' % release)
# Post section for Google cloud repos.
repo_post = BuildReposPost(repo_version, google_cloud_repo)
# Joined kickstart post sections.
ks_post = '\n'.join([repo_post, ks_post, ks_cleanup])
# Joine kickstart file.
ks_file = '\n'.join([ks_options, ks_packages, ks_post])
# Return the joined kickstart file as a string.
logging.info("Kickstart file: \n%s", ks_file)
return ks_file
def BuildReposPost(repo_version, google_cloud_repo):
"""Creates a kickstart post macro with repos needed by GCE.
Args:
repo_version: string; expects 'el7', or 'el8'.
google_cloud_repo: string; expects 'stable', 'unstable', or 'staging'
Returns:
string; a complete %post macro that can be added to a kickstart file. The
output should look like the following example.
%post
tee -a /etc/yum.repos.d/example.repo << EOF
[example-repo]
name=Example Repo
baseurl=https://example.com/yum/repos/example-repo-ver-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://example.com/yum/doc/yum-key.gpg
https://example.com/yum/doc/rpm-package-key.gpg
EOF
...
%end
The values for enabled, gpgcheck, repo_gpgcheck, and gpgkey are constants.
the values for head, name, and baseurl can be modified to point to use any
repo that will accept the supplied gpg keys.
"""
# Build a list of repos that will be returned. All images will get the
# compute repo. EL7 images get the cloud SDK repo. The unstable, and staging
# repos can be added to either by setting the google_cloud_repo value.
repolist = ['stable']
if repo_version == 'el7' or repo_version == 'el8':
repolist.append('sdk')
if google_cloud_repo == 'unstable':
repolist.append('unstable')
if google_cloud_repo == 'staging':
repolist.append('staging')
filelist = ['%post']
for repo in repolist:
filelist.append(str(RepoString(repo_version, repo)))
filelist.append('%end')
return '\n'.join(filelist)
def FetchConfigPart(config_file):
"""Reads data from a kickstart file.
Args:
config_file: string; the name of a kickstart file shard located in
the 'kickstart' directory.
Returns:
string; contents of config_file should be a string with newlines.
"""
with open(os.path.join('files', 'kickstart', config_file)) as f:
return f.read()
```
|
{
"source": "jeremyje/gce-automated-ad-join",
"score": 2
}
|
#### File: register-computer/gcp/project.py
```python
import googleapiclient.discovery
from googleapiclient.errors import HttpError
class Project(object):
def __init__(self, project_id):
self.__project_id = project_id
self.__gce_client = googleapiclient.discovery.build('compute', 'v1')
def get_zones(self):
page_token = None
zones = []
while True:
result = self.__gce_client.zones().list(
project=self.__project_id,
pageToken=page_token).execute()
zones += [item["name"] for item in result["items"]]
if not page_token:
break
return zones
def get_instance(self, name, zone):
try:
computer = self.__gce_client.instances().get(
project=self.__project_id,
zone=zone,
instance=name).execute()
return computer
except HttpError as e:
# Ignore 404 (Not Found) and return without result. Report all other errors
if e.resp.status == 404:
return
raise
def get_managed_instance_group(self, group_name, zone, region):
try:
if zone:
result = self.__gce_client.instanceGroupManagers().get(
project=self.__project_id,
zone=zone,
instanceGroupManager=group_name).execute()
else:
result = self.__gce_client.regionInstanceGroupManagers().get(
project=self.__project_id,
region=region,
instanceGroupManager=group_name).execute()
return result
except HttpError as e:
# Ignore 404 (Not Found) and return without result. Report all other errors
if e.resp.status == 404:
return
raise
```
#### File: register-computer/kerberos/password.py
```python
import os
import subprocess
import logging
import tempfile
class KerberosException(Exception):
def __init__(self, message, error_code):
self.__message = message
self.__error_code = error_code
super().__init__(self.__message)
def get_error_code(self):
return self.__error_code
class KerberosPasswordClient(object):
KSETPWD_BINARY = "ksetpwd"
def __init__(self, realm, kdc, admin_server, client_upn, client_password):
self.__realm = realm
self.__kdc = kdc
self.__admin_server = admin_server
self.__client_upn = client_upn
self.__client_password = client_password
def __generate_config_file(self):
config = """[libdefaults]
default_tkt_enctypes = rc4-hmac
default_tgs_enctypes = rc4-hmac
dns_lookup_realm = false
dns_lookup_kdc = false
[realms]
%s = {
kdc = %s
admin_server = %s
}
""" % (self.__realm.upper(), self.__kdc, self.__admin_server)
handle, name = tempfile.mkstemp(prefix = "krb5.")
with open(handle, "w", encoding = "utf-8") as f:
f.write(config)
return name
def set_password(self, upn, password):
bin_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "bin")
config_file = self.__generate_config_file()
env = os.environ.copy()
env["LD_LIBRARY_PATH"] = bin_path # for libcom_err.so
env["KRB5_CONFIG"] = config_file
env["KSETPWD_AGENT_PASSWORD"] = self.__client_password
env["KSETPWD_TARGET_PASSWORD"] = password
ksetpwd = os.path.join(bin_path, KerberosPasswordClient.KSETPWD_BINARY)
logging.info("Launching %s with environment config at %s and admin server %s" % (ksetpwd, config_file, self.__admin_server))
# NB. Realm names must be upper case to work.
process = subprocess.run(
[ksetpwd, self.__client_upn.upper(), upn.upper()],
capture_output=True,
env=env)
# Delete KRB5 config
os.unlink(config_file)
if process.returncode == 0:
if process.stderr:
logging.info(process.stderr)
if process.stdout:
logging.info(process.stdout)
else:
if process.stderr:
logging.warning(process.stderr)
if process.stdout:
logging.warning(process.stdout)
raise KerberosException("Password reset failed: %d" % process.returncode, process.returncode)
```
#### File: ad-joining/register-computer/test.py
```python
import requests
import google.auth
import google.auth.transport.requests
OAUTH_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token'
def __get_id_token(target_audience):
bootstrap_credentials, _ = google.auth.default(scopes=['https://www.googleapis.com/auth/iam'])
bootstrap_credentials.refresh(google.auth.transport.requests.Request())
# Construct OAuth 2.0 service account credentials using the signer
# and email acquired from the bootstrap credentials.
# By using the 'target_audience' claim, we cause an IdToken to
# be created.
service_account_credentials = google.oauth2.service_account.Credentials(
bootstrap_credentials.signer,
bootstrap_credentials.service_account_email,
token_uri=OAUTH_TOKEN_URI,
additional_claims={
"target_audience": target_audience,
})
# service_account_credentials gives us a JWT signed by the service
# account. Next, we use that to obtain an OpenID Connect token,
# which is a JWT signed by Google.
token_response = google.oauth2._client._token_endpoint_request(
google.auth.transport.requests.Request(),
OAUTH_TOKEN_URI,
{
'assertion': service_account_credentials._make_authorization_grant_assertion(),
'grant_type': google.oauth2._client._JWT_GRANT_TYPE,
})
return token_response["id_token"]
if __name__ == "__main__":
headers = {
"Authorization": "Bearer " + __get_id_token("https://localhost:5000/")
}
response = requests.post("http://localhost:5000/cleanup", headers=headers)
print(response.text)
```
|
{
"source": "jeremyjevon/TimeFromNowBot",
"score": 3
}
|
#### File: jeremyjevon/TimeFromNowBot/main.py
```python
import api as api_key
import datetime
from telegram.ext import *
# function for the /start bot command
def start_command(update, context):
update.message.reply_text('Welcome to Time From Now Bot!'
'\n\nI initally developed this telegram bot for Genshin Impact mobile players to calculate the time required to fully replenish their original resin.'
'\n\nHaving said that, this telegram bot can be used for any sort of "time from now" calculations.'
'\n\nUse the following telegram bot commands to get started:'
'\n/help to find out more about this bot'
'\n/time to display the current date and time (24-Hour Format)'
'\n/credits to find out more about the developer')
# function for the /help bot command
def help_command(update, context):
update.message.reply_text('For "time from now" calculations, please enter the time in the following format:'
'\n\nHH:MM:SS |'
'\n(eg. 01:15:30) for 1 hour, 15 minutes and 30 seconds'
'\n(eg. 22:00:00) for 22 hours, 0 minute and 0 second'
'\n\nLegend:'
'\nHH = Hours | (eg. 01 or 24)'
'\nMM = Minutes | (eg. 01 or 59)'
'\nSS = Seconds | (eg. 01 or 59)'
'\n\nPlease use the telegram bots commands where applicable.')
# function for the /credits bot command
def credits_command(update, context):
update.message.reply_text('This telegram bot was developed by BobTheBuilder.'
'\n(With the help of stackoverflow)')
# function for the /time bot command
def time_command(update, context):
date_time_now = datetime.datetime.now().strftime("%d/%m/%y, %H:%M:%S %p")
update.message.reply_text("The date and time now is: " + str(date_time_now))
# function to calculate time from now
def userinput(user_input):
# display the current time
time_now = datetime.datetime.now()
time_now_str = time_now.strftime("%d/%m/%y, %H:%M:%S %p")
# split user input into hour, minute and second
hour = user_input.split(":", 1)[0]
minute = user_input.split(":", 2)[1]
second = user_input.split(":", 3)[-1]
# display the calculated time
time_from_now = time_now + datetime.timedelta(hours=int(hour), minutes=int(minute), seconds=int(second))
time_from_now_str = time_from_now.strftime("%d/%m/%y, %H:%M:%S %p")
# bot output
bot_response = "The date and time now is: " + time_now_str + "\nResin will be refilled at: " + time_from_now_str
# logging of user input
print("HH:" + hour + " MM:" + minute + " SS:" + second)
return bot_response
# function to display bot response
def bot_reply(update, context):
user_input = update.message.text
update.message.reply_text(userinput(user_input))
# error logging
def error(update, context):
print(f"Update {update} caused error {context.error}")
# main process
def main():
updater = Updater(api_key.API_KEY, use_context=True)
dp = updater.dispatcher
# link bot commands
dp.add_handler(CommandHandler("start", start_command))
dp.add_handler(CommandHandler("help", help_command))
dp.add_handler(CommandHandler("time", time_command))
dp.add_handler(CommandHandler("credits", credits_command))
dp.add_handler(MessageHandler(Filters.text, bot_reply))
# bot error
dp.add_error_handler(error)
# bot processes
updater.start_polling()
updater.idle()
# logging startup of bot
print("Time From Now Bot is running")
main()
```
|
{
"source": "jeremyjiezhou/Learn-PyTorch",
"score": 3
}
|
#### File: Learn-PyTorch/post_processing/PEMFC_Paraview_Plots.py
```python
exportDir = '/home/wardlawp/Code_workspace/FCST/data/AgglomeratePaper/sigma_P/3_two_orders_smaller_by5/post/'
fileName = exportDir +'1.vtk'
plotName = "Case1_1.0A"
# Is the same as above however during run time you pass the name of the vtk file (Voltage 0.000000, cycle number, Name of file you want to save plots under.).
#=========================================================================================================
#voltage = raw_input("Voltage plz:")
#cycleN = raw_input("Cycle number plz:")
#Name = raw_input("Current [A] plz:")
#exportDir = '/home/kdomican/FCST_2013_09SEP_18/trunk/data/mea/Kailyn/MEA_Param_Homo_MPL_DTK_CD_125_Fitted_th_RHsecond/MEA_40_Nafion/Anode_DP_Agg__Cathode_DT_Agg/'
#fileName = exportDir +'fuel_cell_solution_DataFile_000'+cycleN+'_Cycle_1__V_cell_-'+voltage+'.vtk'
#plotName = ""+Name+"A"
#---------------------------------------------------------------------------------------------------------
layer = 4.0
def cleanEverything():
for key in GetSources():
s = FindSource(key[0])
Delete(s)
for key in GetRepresentations():
r = FindSource(key[0])
Delete(r)
try: paraview.simple
except: from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
cleanEverything()
fuel_cell_solution_DataFile_00017_Cycle_1__V_cell_0_548000_vtk = LegacyVTKReader( FileNames=[fileName] )
RenderView1 = GetRenderView()
RenderView1.InteractionMode = '2D'
RenderView1.Background = [1.0, 1.0, 1.0]
RenderView1.CenterOfRotation = [0.8333250000000001, 0.05, 0.0]
RenderView1.OrientationAxesLabelColor = [0.0, 0.0, 0.0]
RenderView1.OrientationAxesVisibility = 0
#---------------------------------------------------------------------------------------------------------
#Conventional CL
#==============================================================
RenderView1.CameraFocalPoint = [0.8333250000000001, 0.05, 0.0]
RenderView1.CameraParallelScale = 0.051538820320220766
RenderView1.CameraPosition = [0.8333250000000001, 0.05, 0.19913071041509228]
RenderView1.CameraClippingRange = [0.19713940331094135, 0.20211767107131867]
# Low Loading CL
#==============================================================
#RenderView1.CameraFocalPoint = [4.91325, 0.05, 0.0]
#RenderView1.CameraParallelScale = 0.050717354031928785
#RenderView1.CameraPosition = [4.91325, 0.05, 0.19936718414350557]
#RenderView1.CameraClippingRange = [0.1973735123020705, 0.20235769190565817]
#---------------------------------------------------------------------------------------------------------
Calculator5 = Calculator()
Calculator5.Function = '-1.16151-protonic_electrical_potential+electronic_electrical_potential'
Calculator5.ResultArrayName = 'Overpotential'
Threshold1 = Threshold()
Threshold1.Scalars = ['POINTS', 'cell_material_ids']
Threshold1.ThresholdRange = [layer, layer]
DataRepresentation2 = Show()
DataRepresentation2.CubeAxesColor = [0.0, 0.0, 0.0]
DataRepresentation2.EdgeColor = [0.0, 0.0, 0.5]
DataRepresentation2.AmbientColor = [0.0, 0.0, 0.0]
#---------------------------------------------------------------------------------------------------------
#Conventional CL
#==============================================================
#DataRepresentation2.Scale = [150.0, 1.0, 1.0]
# Low Loading CL
#==============================================================
DataRepresentation2.Scale = [25.0, 1.0, 1.0]
#---------------------------------------------------------------------------------------------------------
#######################################protonic_electrical_potential
rangePhiM = Threshold1.GetDataInformation().GetPointDataInformation().GetArrayInformation("protonic_electrical_potential").GetComponentRange(0)
a1_protonic_electrical_potential_PVLookupTable = GetLookupTableForArray( "protonic_electrical_potential", 1, RGBPoints=[rangePhiM[0], 0.0, 0.0, 1.0, rangePhiM[1], 1.0, 0.0, 0.0], VectorMode='Magnitude', NanColor=[0.4980392156862745, 0.4980392156862745, 0.4980392156862745], ColorSpace='HSV', ScalarRangeInitialized=1.0, AllowDuplicateScalars=1 )
a1_protonic_electrical_potential_PiecewiseFunction = CreatePiecewiseFunction( Points=[rangePhiM[0], 0.0, 0.5, 0.0, rangePhiM[1], 1.0, 0.5, 0.0] )
ScalarBarWidgetRepresentation2 = CreateScalarBar( Title='Phi_M', Enabled=1, LabelFontSize=12, LabelColor=[0.0, 0.0, 0.0], LookupTable=a1_protonic_electrical_potential_PVLookupTable, TitleFontSize=12, TitleColor=[0.0, 0.0, 0.0], Position=[0.6, 0.21059113300492627] )
GetRenderView().Representations.append(ScalarBarWidgetRepresentation2)
DataRepresentation2.ScalarOpacityFunction = a1_protonic_electrical_potential_PiecewiseFunction
DataRepresentation2.ColorArrayName = 'protonic_electrical_potential'
DataRepresentation2.LookupTable = a1_protonic_electrical_potential_PVLookupTable
a1_protonic_electrical_potential_PVLookupTable.ScalarOpacityFunction = a1_protonic_electrical_potential_PiecewiseFunction
WriteImage(exportDir + plotName+ '_phiM.png')
ScalarBarWidgetRepresentation2.TitleColor = [0.0, 0.0, 0.0]
ScalarBarWidgetRepresentation2.Enabled = 0
ScalarBarWidgetRepresentation2.Visibility = 0
ScalarBarWidgetRepresentation2.LabelColor = [0.0, 0.0, 0.0]
#######################################electronic_electrical_potential
rangePhiS = Threshold1.GetDataInformation().GetPointDataInformation().GetArrayInformation("electronic_electrical_potential").GetComponentRange(0)
a1_electronic_electrical_potential_PVLookupTable = GetLookupTableForArray( "electronic_electrical_potential", 1, RGBPoints=[rangePhiS[0], 0.0, 0.0, 1.0, rangePhiS[1], 1.0, 0.0, 0.0], VectorMode='Magnitude', NanColor=[0.4980392156862745, 0.4980392156862745, 0.4980392156862745], ColorSpace='HSV', ScalarRangeInitialized=1.0, AllowDuplicateScalars=1 )
a1_electronic_electrical_potential_PiecewiseFunction = CreatePiecewiseFunction( Points=[rangePhiS[0], 0.0, 0.5, 0.0, rangePhiS[1], 1.0, 0.5, 0.0] )
ScalarBarWidgetRepresentation3 = CreateScalarBar( Title='Phi_S', Position2=[0.13, 0.49999999999999956], Enabled=1, LabelFontSize=12, LabelColor=[0.0, 0.0, 0.0], LookupTable=a1_electronic_electrical_potential_PVLookupTable, TitleFontSize=12, TitleColor=[0.0, 0.0, 0.0], Position=[0.6, 0.21059113300492627] )
GetRenderView().Representations.append(ScalarBarWidgetRepresentation3)
DataRepresentation2.ScalarOpacityFunction = a1_electronic_electrical_potential_PiecewiseFunction
DataRepresentation2.ColorArrayName = 'electronic_electrical_potential'
DataRepresentation2.LookupTable = a1_electronic_electrical_potential_PVLookupTable
a1_electronic_electrical_potential_PVLookupTable.ScalarOpacityFunction = a1_electronic_electrical_potential_PiecewiseFunction
WriteImage(exportDir + plotName +'_phiS.png')
ScalarBarWidgetRepresentation3.TitleColor = [0.0, 0.0, 0.0]
ScalarBarWidgetRepresentation3.Enabled = 0
ScalarBarWidgetRepresentation3.Visibility = 0
ScalarBarWidgetRepresentation3.LabelColor = [0.0, 0.0, 0.0]
#######################################Agglomerate Effectiveness
rangeEff = Threshold1.GetDataInformation().GetPointDataInformation().GetArrayInformation("agglomerate_effectiveness").GetComponentRange(0)
a1_agglomerate_effectiveness_PVLookupTable = GetLookupTableForArray( "agglomerate_effectiveness", 1, RGBPoints=[rangeEff[0], 0.0, 0.0, 1.0, rangeEff[1], 1.0, 0.0, 0.0], VectorMode='Magnitude', NanColor=[0.4980392156862745, 0.4980392156862745, 0.4980392156862745], ColorSpace='HSV', ScalarRangeInitialized=1.0, AllowDuplicateScalars=1 )
a1_agglomerate_effectiveness_PiecewiseFunction = CreatePiecewiseFunction( Points=[rangeEff[0], 0.0, 0.5, 0.0, rangeEff[1], 1.0, 0.5, 0.0] )
ScalarBarWidgetRepresentation5 = CreateScalarBar( Title='Agg Eff [%]', Enabled=1, LabelFontSize=12, LabelColor=[0.0, 0.0, 0.0], LookupTable=a1_agglomerate_effectiveness_PVLookupTable, TitleFontSize=12, TitleColor=[0.0, 0.0, 0.0], Position=[0.6, 0.21059113300492627] )
GetRenderView().Representations.append(ScalarBarWidgetRepresentation5)
DataRepresentation2.ScalarOpacityFunction = a1_agglomerate_effectiveness_PiecewiseFunction
DataRepresentation2.ColorArrayName = 'agglomerate_effectiveness'
DataRepresentation2.LookupTable = a1_agglomerate_effectiveness_PVLookupTable
a1_agglomerate_effectiveness_PVLookupTable.ScalarOpacityFunction = a1_agglomerate_effectiveness_PiecewiseFunction
WriteImage(exportDir + plotName + '_eff.png')
ScalarBarWidgetRepresentation5.TitleColor = [0.0, 0.0, 0.0]
ScalarBarWidgetRepresentation5.Enabled = 0
ScalarBarWidgetRepresentation5.Visibility = 0
ScalarBarWidgetRepresentation5.LabelColor = [0.0, 0.0, 0.0]
#######################################Membrane Water Content
scale1 = Threshold1.GetDataInformation().GetPointDataInformation().GetArrayInformation( "membrane_water_content").GetComponentRange(0)
a1_membrane_water_content_PVLookupTable = GetLookupTableForArray( "membrane_water_content", 1, RGBPoints=[scale1[0], 0.0, 0.0, 1.0, scale1[1], 1.0, 0.0, 0.0], VectorMode='Magnitude', NanColor=[0.4980392156862745, 0.4980392156862745, 0.4980392156862745], ColorSpace='HSV', ScalarRangeInitialized=1.0, AllowDuplicateScalars=1 )
a1_membrane_water_content_PiecewiseFunction = CreatePiecewiseFunction( Points=[scale1[0], 0.0, 0.5, 0.0, scale1[1], 1.0, 0.5, 0.0] )
ScalarBarWidgetRepresentation4 = CreateScalarBar( Title=' Water Content', Enabled=1, LabelFontSize=12, LabelColor=[0.0, 0.0, 0.0], LookupTable=a1_membrane_water_content_PVLookupTable, TitleFontSize=12, TitleColor=[0.0, 0.0, 0.0], Position=[0.6, 0.21059113300492627] )
GetRenderView().Representations.append(ScalarBarWidgetRepresentation4)
DataRepresentation2.ScalarOpacityFunction = a1_membrane_water_content_PiecewiseFunction
DataRepresentation2.ColorArrayName = 'membrane_water_content'
DataRepresentation2.LookupTable = a1_membrane_water_content_PVLookupTable
a1_membrane_water_content_PVLookupTable.ScalarOpacityFunction = a1_membrane_water_content_PiecewiseFunction
WriteImage(exportDir + plotName+ '_water.png')
ScalarBarWidgetRepresentation4.TitleColor = [0.0, 0.0, 0.0]
ScalarBarWidgetRepresentation4.Enabled = 0
ScalarBarWidgetRepresentation4.Visibility = 0
ScalarBarWidgetRepresentation4.LabelColor = [0.0, 0.0, 0.0]
#######################################Relative Humidity
rangeRelH = Threshold1.GetDataInformation().GetPointDataInformation().GetArrayInformation( "relative_humidity").GetComponentRange(0)
a1_protonic_electrical_potential_PVLookupTable = GetLookupTableForArray( "relative_humidity", 1, RGBPoints=[rangeRelH[0], 0.0, 0.0, 1.0, rangeRelH[1], 1.0, 0.0, 0.0], VectorMode='Magnitude', NanColor=[0.4980392156862745, 0.4980392156862745, 0.4980392156862745], ColorSpace='HSV', ScalarRangeInitialized=1.0, AllowDuplicateScalars=1 )
a1_protonic_electrical_potential_PiecewiseFunction = CreatePiecewiseFunction( Points=[rangeRelH[0], 0.0, 0.5, 0.0, rangeRelH[1], 1.0, 0.5, 0.0] )
ScalarBarWidgetRepresentation6 = CreateScalarBar( Title=' Relative Humidity', Enabled=1, LabelFontSize=12, LabelColor=[0.0, 0.0, 0.0], LookupTable=a1_protonic_electrical_potential_PVLookupTable, TitleFontSize=12, TitleColor=[0.0, 0.0, 0.0], Position=[0.6, 0.21059113300492627] )
GetRenderView().Representations.append(ScalarBarWidgetRepresentation6)
DataRepresentation2.ScalarOpacityFunction = a1_protonic_electrical_potential_PiecewiseFunction
DataRepresentation2.ColorArrayName = 'relative_humidity'
DataRepresentation2.LookupTable = a1_protonic_electrical_potential_PVLookupTable
a1_protonic_electrical_potential_PVLookupTable.ScalarOpacityFunction = a1_protonic_electrical_potential_PiecewiseFunction
WriteImage(exportDir + plotName+ '_relHumidity.png')
ScalarBarWidgetRepresentation6.TitleColor = [0.0, 0.0, 0.0]
ScalarBarWidgetRepresentation6.Enabled = 0
ScalarBarWidgetRepresentation6.Visibility = 0
ScalarBarWidgetRepresentation6.LabelColor = [0.0, 0.0, 0.0]
#######################################water_molar_fraction
rangeWmF = Threshold1.GetDataInformation().GetPointDataInformation().GetArrayInformation( "water_molar_fraction").GetComponentRange(0)
a1_protonic_electrical_potential_PVLookupTable = GetLookupTableForArray( "water_molar_fraction", 1, RGBPoints=[rangeWmF[0], 0.0, 0.0, 1.0, rangeWmF[1], 1.0, 0.0, 0.0], VectorMode='Magnitude', NanColor=[0.4980392156862745, 0.4980392156862745, 0.4980392156862745], ColorSpace='HSV', ScalarRangeInitialized=1.0, AllowDuplicateScalars=1 )
a1_protonic_electrical_potential_PiecewiseFunction = CreatePiecewiseFunction( Points=[rangeWmF[0], 0.0, 0.5, 0.0, rangeWmF[1], 1.0, 0.5, 0.0] )
ScalarBarWidgetRepresentation7 = CreateScalarBar( Title=' H2O Molar Fraction', Enabled=1, LabelFontSize=12, LabelColor=[0.0, 0.0, 0.0], LookupTable=a1_protonic_electrical_potential_PVLookupTable, TitleFontSize=12, TitleColor=[0.0, 0.0, 0.0], Position=[0.6, 0.21059113300492627] )
GetRenderView().Representations.append(ScalarBarWidgetRepresentation7)
DataRepresentation2.ScalarOpacityFunction = a1_protonic_electrical_potential_PiecewiseFunction
DataRepresentation2.ColorArrayName = 'water_molar_fraction'
DataRepresentation2.LookupTable = a1_protonic_electrical_potential_PVLookupTable
a1_protonic_electrical_potential_PVLookupTable.ScalarOpacityFunction = a1_protonic_electrical_potential_PiecewiseFunction
WriteImage(exportDir + plotName+ '_waterMolarFraction.png')
ScalarBarWidgetRepresentation7.TitleColor = [0.0, 0.0, 0.0]
ScalarBarWidgetRepresentation7.Enabled = 0
ScalarBarWidgetRepresentation7.Visibility = 0
ScalarBarWidgetRepresentation7.LabelColor = [0.0, 0.0, 0.0]
#######################################"oxygen_molar_fraction"
rangeO2 = Threshold1.GetDataInformation().GetPointDataInformation().GetArrayInformation( "oxygen_molar_fraction").GetComponentRange(0)
a1_protonic_electrical_potential_PVLookupTable = GetLookupTableForArray("oxygen_molar_fraction", 1, RGBPoints=[rangeO2[0], 0.0, 0.0, 1.0, rangeO2[1], 1.0, 0.0, 0.0], VectorMode='Magnitude', NanColor=[0.4980392156862745, 0.4980392156862745, 0.4980392156862745], ColorSpace='HSV', ScalarRangeInitialized=1.0, AllowDuplicateScalars=1 )
a1_protonic_electrical_potential_PiecewiseFunction = CreatePiecewiseFunction( Points=[rangeO2[0], 0.0, 0.5, 0.0, rangeO2[1], 1.0, 0.5, 0.0] )
ScalarBarWidgetRepresentation8 = CreateScalarBar( Title=" O2 Molar Fraction", Enabled=1, LabelFontSize=12, LabelColor=[0.0, 0.0, 0.0], LookupTable=a1_protonic_electrical_potential_PVLookupTable, TitleFontSize=12, TitleColor=[0.0, 0.0, 0.0], Position=[0.6, 0.21059113300492627] )
GetRenderView().Representations.append(ScalarBarWidgetRepresentation8)
DataRepresentation2.ScalarOpacityFunction = a1_protonic_electrical_potential_PiecewiseFunction
DataRepresentation2.ColorArrayName = "oxygen_molar_fraction"
DataRepresentation2.LookupTable = a1_protonic_electrical_potential_PVLookupTable
a1_protonic_electrical_potential_PVLookupTable.ScalarOpacityFunction = a1_protonic_electrical_potential_PiecewiseFunction
WriteImage(exportDir + plotName+ '_o2.png')
ScalarBarWidgetRepresentation8.TitleColor = [0.0, 0.0, 0.0]
ScalarBarWidgetRepresentation8.Enabled = 0
ScalarBarWidgetRepresentation8.Visibility = 0
ScalarBarWidgetRepresentation8.LabelColor = [0.0, 0.0, 0.0]
####################################### Overpotential & Contour lines & Auto Cropping
rangOverPot = Threshold1.GetDataInformation().GetPointDataInformation().GetArrayInformation('Overpotential').GetComponentRange(0)
a1_protonic_electrical_potential_PVLookupTable = GetLookupTableForArray("Overpotential", 1, RGBPoints=[rangOverPot[0], 0.0, 0.0, 1.0, rangOverPot[1], 1.0, 0.0, 0.0], VectorMode='Magnitude', NanColor=[0.4980392156862745, 0.4980392156862745, 0.4980392156862745], ColorSpace='HSV', ScalarRangeInitialized=1.0, AllowDuplicateScalars=1 )
a1_protonic_electrical_potential_PiecewiseFunction = CreatePiecewiseFunction( Points=[rangOverPot[0], 0.0, 0.5, 0.0, rangOverPot[1], 1.0, 0.5, 0.0] )
ScalarBarWidgetRepresentation9 = CreateScalarBar( Title="Overpotential", Enabled=1, LabelFontSize=12, LabelColor=[0.0, 0.0, 0.0], LookupTable=a1_protonic_electrical_potential_PVLookupTable, TitleFontSize=12, TitleColor=[0.0, 0.0, 0.0], Position=[0.6, 0.21059113300492627] )
GetRenderView().Representations.append(ScalarBarWidgetRepresentation9)
DataRepresentation2.ScalarOpacityFunction = a1_protonic_electrical_potential_PiecewiseFunction
DataRepresentation2.ColorArrayName = "Overpotential"
DataRepresentation2.LookupTable = a1_protonic_electrical_potential_PVLookupTable
a1_protonic_electrical_potential_PVLookupTable.ScalarOpacityFunction = a1_protonic_electrical_potential_PiecewiseFunction
WriteImage(exportDir + plotName+ '_overPot.png')
ScalarBarWidgetRepresentation9.TitleColor = [0.0, 0.0, 0.0]
ScalarBarWidgetRepresentation9.Enabled = 0
ScalarBarWidgetRepresentation9.Visibility = 0
ScalarBarWidgetRepresentation9.LabelColor = [0.0, 0.0, 0.0]
#################################
#######################################"current_density"
rangeI= Threshold1.GetDataInformation().GetPointDataInformation().GetArrayInformation( "current_density").GetComponentRange(0)
a1_protonic_electrical_potential_PVLookupTable = GetLookupTableForArray("current_density", 1, RGBPoints=[rangeI[0], 0.0, 0.0, 1.0, rangeI[1], 1.0, 0.0, 0.0], VectorMode='Magnitude', NanColor=[0.4980392156862745, 0.4980392156862745, 0.4980392156862745], ColorSpace='HSV', ScalarRangeInitialized=1.0, AllowDuplicateScalars=1 )
a1_protonic_electrical_potential_PiecewiseFunction = CreatePiecewiseFunction( Points=[rangeI[0], 0.0, 0.5, 0.0, rangeI[1], 1.0, 0.5, 0.0] )
ScalarBarWidgetRepresentation10 = CreateScalarBar( Title=" Current Density", Enabled=1, LabelFontSize=12, LabelColor=[0.0, 0.0, 0.0], LookupTable=a1_protonic_electrical_potential_PVLookupTable, TitleFontSize=12, TitleColor=[0.0, 0.0, 0.0], Position=[0.6, 0.21059113300492627] )
GetRenderView().Representations.append(ScalarBarWidgetRepresentation10)
DataRepresentation2.ScalarOpacityFunction = a1_protonic_electrical_potential_PiecewiseFunction
DataRepresentation2.ColorArrayName = "current_density"
DataRepresentation2.LookupTable = a1_protonic_electrical_potential_PVLookupTable
a1_protonic_electrical_potential_PVLookupTable.ScalarOpacityFunction = a1_protonic_electrical_potential_PiecewiseFunction
WriteImage(exportDir + plotName+ '_i.png')
ScalarBarWidgetRepresentation10.TitleColor = [0.0, 0.0, 0.0]
ScalarBarWidgetRepresentation10.Enabled = 0
ScalarBarWidgetRepresentation10.Visibility = 0
ScalarBarWidgetRepresentation10.LabelColor = [0.0, 0.0, 0.0]
##################################################
Render()
```
#### File: PythonFCST/util/baseclass.py
```python
import logging as _logging
# set up logging to file - see previous section for more details
_logging.basicConfig(level=_logging.WARNING,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
)
class fcstobject(object):
r"""
Base class with a few bells and whistles. Mainly output on screen, logging
and pickling. This is the class from which all other classes should inherit.
Parameters
----------
loglevel : int
Level of the logger (10=Debug, 20=INFO, 30=Warning, 40=Error, 50=Critical)
loggername : string
Name of the logger. The default is the name of the class.
Attributes
----------
self._logger : _logging.logger
This class defines a logger for the class and all classes inheriting from it.
it supports all settings of the standard logger. It still needs some fine
tuning.
======== ===== =============================================================
Level Value When it is used
======== ===== =============================================================
DEBUG 10 Detailed information, at diagnostic stage
INFO 20 Confirmation that things are working as expected.
WARNING 30 An indication that something unexpected happened.
ERROR 40 Due to a more serious problem, program might still execute.
CRITICAL 50 A serious error, might compromise program execution
======== ===== =============================================================
"""
def __init__(self,**kwargs):
super(fcstobject,self).__init__()
if 'loggername' in kwargs.keys():
self._logger = _logging.getLogger(kwargs['loggername'])
else:
self._logger = _logging.getLogger(self.__class__.__name__)
if 'loglevel' in kwargs.keys():
loglevel=kwargs['loglevel']
self.set_loglevel(loglevel)
def set_loglevel(self,level=20):
r"""
Sets the effective log level for this class
======== ===== =============================================================
Level Value When it is used
======== ===== =============================================================
DEBUG 10 Detailed information, at diagnostic stage
INFO 20 Confirmation that things are working as expected.
WARNING 30 An indication that something unexpected happened.
ERROR 40 Due to a more serious problem, program might still execute.
CRITICAL 50 A serious error, might compromise program execution
======== ===== =============================================================
Parameters
----------
level : int
Level above which messages should be logged.
"""
self._logger.setLevel(level)
self._logger.debug("Changed log level")
def IOpickle(self,filename="test.pickle"):
r"""
Write the class object to a pickle file.close
Parameters
----------
filename : string
name of the file to be written.
"""
self._logger.debug('Pickle self')
def IOunpickle(self):
self._logger.debug('UnPickle self')
class testinheritance(fcstobject):
r"""
testinheritance: Trial inheritance from lobject.
"""
def __init__(self):
super(testinheritance,self).__init__()
self._logger.debug("Hallo")
```
|
{
"source": "jeremy-jin/nameko-atomicity",
"score": 3
}
|
#### File: nameko-atomicity/nameko_atomicity/rollback_commands.py
```python
import inspect
import wrapt
from nameko.extensions import DependencyProvider
from .commands import CommandsWrapper, CommandsProxy
class _RollbackCommands(CommandsWrapper):
pass
class RollbackCommandsDependencyProvider(DependencyProvider):
"""Dependency provider to access to the `::RollbackCommands::`.
It will return a ``RollbackCommandsWrapper`` instance.
"""
@classmethod
def rollback_once_failed(cls, wrapped=None):
"""Execute the rollback command when a function call fails
The following example demonstrates the use of::
>>> from nameko.rpc import rpc
... from nameko_atomicity import RollbackCommands
...
... def rollback_function():
... pass
...
... class ConversionService(object):
... name = "conversions"
...
... rollback_commands = RollbackCommands()
...
... @rpc
... @rollback_once_failed
... def inches_to_cm(self, inches):
... self.rollback_commands.append(
... func=rollback_function,
... args=(),
... kwargs={},
... )
"""
def find_commands_providers(instance):
commands_provides = inspect.getmembers(
instance, lambda obj: isinstance(obj, _RollbackCommands)
)
return commands_provides
@wrapt.decorator
def wrapper(wrapped, instance, args, kwargs):
commands_provides = find_commands_providers(instance)
commands_provides = [
commands_provide[1] for commands_provide in commands_provides
]
try:
response = wrapped(*args, **kwargs)
except Exception as exc:
# 回滚之前所有操作
instance.storage.session.rollback()
CommandsProxy(commands_provides).exec_commands()
# 提交回滚过程中的操作
instance.storage.session.commit()
raise exc
finally:
CommandsProxy(commands_provides).clear_commands()
return response
if wrapped:
return wrapper(wrapped)
return wrapper
def get_dependency(self, worker_ctx):
return _RollbackCommands(worker_ctx)
RollbackCommands = RollbackCommandsDependencyProvider
rollback_once_failed = RollbackCommandsDependencyProvider.rollback_once_failed
```
|
{
"source": "jeremyjones1017/Mirror",
"score": 3
}
|
#### File: Mirror/stonks/download_stonk_info.py
```python
import yfinance as yf
import pandas as pd
import json
def main():
days_ago_list = [0,1,5,-1]
my_stonks = pd.read_csv('stonks.txt',delimiter='\t')
stonk_list = list_stonks(my_stonks)
stonk_dict = get_stonk_info(stonk_list)
with open('stonk_info.json','w') as outfile:
json.dump(stonk_dict,outfile)
def list_stonks(df):
non_unique_stonk_list = list(df.stonk)
stonk_list = []
for stonk in non_unique_stonk_list:
if stonk not in stonk_list:
stonk_list.append(stonk)
return stonk_list
def get_stonk_info(stonk_list):
data = yf.download(tickers=' '.join(stonk_list),period='1y',group_by='ticker')
dates = list(data[stonk_list[0]].index)
dates = [str(d)[0:10] for d in dates]
stonk_dict = dict()
stonk_dict['Date'] = dates
for stonk in stonk_list:
close_arr = list(data[stonk]['Close'])
stonk_dict[stonk] = close_arr
return stonk_dict
if __name__ == '__main__':
main()
```
#### File: Mirror/stonks/__init__.py
```python
import pandas as pd
import os
import datetime
import json
import math
def main():
#days_ago_list = [0,1,5,20]
#days_ago_list = range(30)
days_ago_list = [0]
data = stonks(days_ago_list)
for i in data:
print(i)
def stonks(days_ago_list):
stonks_df = pd.read_csv('stonks/stonks.txt',delimiter='\t')
transfers_df = pd.read_csv('stonks/money_transfers.txt',delimiter='\t')
to_return = []
for days_ago in days_ago_list:
portfolio,value,date = get_current_value(stonks_df,transfers_df,days_ago)
to_return.append([portfolio,value,date])
return to_return
def get_current_value(stonks_df,transfers_df,count_back_days_ago):
#Add up deposits and withdrawals
value = 0.
for index,row in transfers_df.iterrows():
if row.transfer_type == 'deposit':
value += row.amount
elif row.transfer_type == 'withdrawal':
value -= row.amount
else:
print('Error in tallying transfers. The row looked like this: \n{}'.format(row))
input_value = value
days_ago = -1 - count_back_days_ago
#Read downloaded file
with open('stonks/stonk_info.json') as json_file:
stonk_info = json.load(json_file)
#Add up stonk values
cash = value
portfolio = dict()
stonk_value = dict()
for index,row in stonks_df.iterrows():
trade_date = datetime.datetime.strptime(row.trade_date,'%m-%d-%Y')
position_date = datetime.datetime.strptime(stonk_info['Date'][days_ago],'%Y-%m-%d')
if position_date >= trade_date:
close_prices = stonk_info[row.stonk]
stonk_value[row.stonk] = [close_prices[days_ago],position_date]
i=0
while math.isnan(stonk_value[row.stonk][0]):
i+=1
stonk_value[row.stonk][0] = close_prices[days_ago-i]
stonk_value[row.stonk][1] = datetime.datetime.strptime(stonk_info['Date'][days_ago-i],'%Y-%m-%d')
if row.trade_type == 'buy':
value = value + row.shares * (stonk_value[row.stonk][0] - row.price)
cash -= (row.price * row.shares)
elif row.trade_type == 'sell':
value = value + row.shares * (row.price - stonk_value[row.stonk][0])
cash += (row.price * row.shares)
else:
print('Error in tallying values. The row looked like this: \n{}'.format(row))
portfolio = update_portfolio(row,portfolio)
portfolio['Cash'] = cash
return portfolio,value,position_date
def update_portfolio(r,p):
#r - row of the stonk csv
#p - portfolio dictionary
if r.stonk not in p:
p[r.stonk] = r.shares
if r.trade_type == 'sell': print('Error - first trade is a sell')
else:
if r.trade_type == 'buy':
p[r.stonk] += r.shares
elif r.trade_type == 'sell':
p[r.stonk] -= r.shares
return p
def get_date(n_days):
now = datetime.datetime.now()
today = now.replace(hour=0,minute=0,second=0,microsecond=0)
yesterday = today - datetime.timedelta(days = 1)
if n_days == 0:
if now > now.replace(hour=18,minute=0,second=0,microsecond=0):
date = today
else:
date = yesterday
else:
date = today - datetime.timedelta(days = n_days)
while date.weekday() >= 5:
date -= datetime.timedelta(days = 1)
return date
if __name__ == '__main__':
main()
```
|
{
"source": "jeremyjordan/flower-classifier",
"score": 3
}
|
#### File: flower_classifier/datasets/csv.py
```python
import pandas as pd
import pytorch_lightning as pl
import torch
import torchvision
from PIL import Image
from flower_classifier.datasets.oxford_flowers import NAMES as oxford_idx_to_names
class CSVDataset(torch.utils.data.Dataset):
def __init__(
self,
filename: str,
data_col: str = "filename",
target_col: str = "label",
transforms=[],
class_names=oxford_idx_to_names,
):
self.df = pd.read_csv(filename)
self.data_col = data_col
self.target_col = target_col
self.class_names = class_names
self.transform = torchvision.transforms.Compose(transforms)
def __getitem__(self, idx):
row = self.df.iloc[idx]
img = Image.open(row[self.data_col])
img = self.transform(img)
label = row[self.target_col]
label = torch.tensor(label, dtype=torch.long)
return img, label
def __len__(self):
return self.df.shape[0]
@property
def classes(self):
return self.class_names
class CSVDataModule(pl.LightningDataModule):
def __init__(
self,
train_csv: str = "/content/drive/My Drive/Flowers/oxford102/train_split.csv",
val_csv: str = "/content/drive/My Drive/Flowers/oxford102/val_split.csv",
train_transforms=[],
val_transforms=[],
batch_size=16,
data_col: str = "filename",
target_col: str = "label",
class_names=oxford_idx_to_names,
):
super().__init__()
self.batch_size = batch_size
self.train_dataset = CSVDataset(
filename=train_csv,
data_col=data_col,
target_col=target_col,
transforms=train_transforms,
class_names=class_names,
)
self.val_dataset = CSVDataset(
filename=val_csv,
data_col=data_col,
target_col=target_col,
transforms=val_transforms,
class_names=class_names,
)
def train_dataloader(self):
return torch.utils.data.DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=4)
def val_dataloader(self):
return torch.utils.data.DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=4)
@property
def len_train(self):
return len(self.train_dataset)
@property
def len_valid(self):
return len(self.val_dataset)
```
#### File: datasets/flickr/auth.py
```python
import sqlite3
import webbrowser
from pathlib import Path
OAUTH_TOKEN_CACHE = Path.home() / ".flickr" / "oauth-tokens.sqlite"
USER_ID = "192840150@N08"
def authenticate_new_user(flickr_client):
"""
Example code snippet to authenticate for a new user.
"""
flickr_client.get_request_token(oauth_callback="oob")
authorize_url = flickr_client.auth_url(perms="write")
webbrowser.open_new_tab(authorize_url)
verifier = str(input("Verifier code: "))
flickr_client.get_access_token(verifier)
def show_user_secrets(username="jeremythomasjordan"):
conn = sqlite3.connect(OAUTH_TOKEN_CACHE)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
results = cur.execute(
f"""
SELECT *
FROM oauth_tokens
WHERE username = '{username}';
"""
)
results = dict(results.fetchone())
user_secrets = {
"OAUTH_TOKEN": results["oauth_token"],
"OAUTH_TOKEN_SECRET": results["oauth_token_secret"],
"OAUTH_ACCESS_LEVEL": results["access_level"],
"OAUTH_FULL_NAME": results["fullname"],
"OAUTH_USERNAME": results["username"],
"OAUTH_USER_NSID": results["user_nsid"],
}
return user_secrets
```
#### File: datasets/google/client.py
```python
import json
import logging
from pathlib import Path
from typing import List
from google_images_search import GoogleImagesSearch
from tqdm import tqdm
from flower_classifier import REPO_DIR
from flower_classifier.datasets import get_secrets
from flower_classifier.datasets.oxford_flowers import NAMES as oxford_breeds
logger = logging.getLogger(__name__)
RESULTS_CACHE_FILE = Path(REPO_DIR, "assets", "breed_examples.json")
global RESULTS_CACHE
RESULTS_CACHE = json.loads(RESULTS_CACHE_FILE.read_text())
def query_google_images(flower_name: str):
if flower_name in RESULTS_CACHE:
return RESULTS_CACHE[flower_name]
else:
logger.info(f"Cache miss for {flower_name}, querying Google Images Search...")
secrets = get_secrets()
gis = GoogleImagesSearch(secrets["GIS_API_KEY"], secrets["GIS_PROJECT_CX"])
gis.search({"q": f"{flower_name} flower", "num": 3})
return [result.url for result in gis.results()]
def export_similar_images(breeds: List[str] = oxford_breeds, query_limit: int = 100):
query_count = 0
for breed in tqdm(breeds):
if breed not in RESULTS_CACHE:
urls = query_google_images(breed)
query_count += 1
RESULTS_CACHE[breed] = urls
if query_count >= query_limit:
break
RESULTS_CACHE_FILE.write_text(json.dumps(RESULTS_CACHE))
```
#### File: flower-classifier/flower_classifier/label.py
```python
import concurrent.futures
import json
import shutil
from pathlib import Path
import flickrapi
import typer
from flower_classifier.datasets.flickr.auth import USER_ID as FLICKR_USER_ID
from flower_classifier.datasets.flickr.client import (
download_photo,
get_authenticated_client,
get_photo_tags,
update_photo_tags,
)
app = typer.Typer()
LABELED_TAG = "status:labeled"
UNCERTAIN_TAG = "status:uncertain"
def _confirm_directory_is_empty(dir: Path, exit_message="Exiting program."):
if dir.exists() and any(dir.iterdir()):
overwrite = typer.confirm(f"{dir} is not empty, are you sure you want to overwrite it?")
if not overwrite:
typer.echo(exit_message)
raise typer.Exit()
shutil.rmtree(dir)
def _parse_tags(flickr_client: flickrapi.FlickrAPI, photo_id: str):
tags = get_photo_tags(flickr_client, photo_id)
tags = [t.split(":") for t in tags if ":" in t]
return dict(tags)
@app.command()
def get_photos(
photo_count: int = typer.Option(50, prompt=True),
label_dir: str = typer.Option(str(Path.home() / "flower_classifier_labels"), prompt=True),
):
"""
Download a specified number of photos to be labeled.
"""
# intialize directories
label_dir = Path(label_dir)
unlabeled_dir: Path = label_dir / "unlabeled"
labeled_dir: Path = label_dir / "labeled"
_confirm_directory_is_empty(
unlabeled_dir,
exit_message="Exiting program. Please return existing photos to the labeling pool and start again.",
)
_confirm_directory_is_empty(
labeled_dir,
exit_message="Exiting program. Make sure existing labeled photos are uploaded to Google Drive, "
"empty the local folder (so you don't accidentally upload photos twice), and start again.",
)
unlabeled_dir.mkdir(exist_ok=True, parents=True)
labeled_dir.mkdir(exist_ok=True, parents=True)
flickr_client = get_authenticated_client(format="etree")
def _download_photo_to_label(photo_id, filename):
tags = _parse_tags(flickr_client, photo_id)
user_judgement = tags.get("user_judgement", "unsure")
predicted_breed = tags.get("pred", "unknown")
download_path = unlabeled_dir / user_judgement / predicted_breed / filename
download_path.parent.mkdir(parents=True, exist_ok=True)
download_photo(flickr_client, photo_id, download_path)
update_photo_tags(flickr_client, photo_id, insert_tags=[LABELED_TAG])
return {"photo_id": photo_id, "tags": tags, "download_path": str(download_path)}
count = 0
# collect photos to be downloaded
download_queue = []
for count, photo in enumerate(
flickr_client.walk(
tag_mode="all",
# adding a "-" in front of a tag excludes results that match the tag
tags=f"-{LABELED_TAG}, -{UNCERTAIN_TAG}",
user_id=FLICKR_USER_ID,
)
):
if count >= photo_count:
break
photo_id = photo.get("id")
filename = photo.get("title")
download_queue.append((photo_id, filename))
# download photos in parallel
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_photo_id = {
executor.submit(_download_photo_to_label, photo_id, filename): filename
for photo_id, filename in download_queue
}
# collect the results
downloaded_photos = {}
for future in concurrent.futures.as_completed(future_to_photo_id):
data = future.result()
download_path = data["download_path"]
downloaded_photos[download_path] = {"photo_id": data["photo_id"], "tags": data["tags"]}
(unlabeled_dir / "info.json").write_text(json.dumps(downloaded_photos))
@app.command()
def return_photos_to_labeling_pool(
label_dir: str = typer.Option(str(Path.home() / "flower_classifier_labels"), prompt=True),
remaining_uncertain: bool = typer.Option(False, help="Mark remaining photos as uncertain?", prompt=True),
):
"""
Remove the "labeled" tag from any remaining photos in the unlabeled directory.
If we need further review for remaining photos, tag as uncertain.
"""
# intialize directories
label_dir = Path(label_dir)
unlabeled_dir: Path = label_dir / "unlabeled"
downloaded_photos = json.loads((unlabeled_dir / "info.json").read_text())
# remove the "labeled" tag from any remaining photos
flickr_client = get_authenticated_client()
def _remove_labeled_tag(photo_id):
insert_tags = [UNCERTAIN_TAG] if remaining_uncertain else None
update_photo_tags(flickr_client, photo_id, insert_tags=insert_tags, remove_tags=[LABELED_TAG])
return photo_id
futures = []
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
for photo in unlabeled_dir.rglob("**/*.jpg"):
photo_id = downloaded_photos.get(str(photo), {}).get("photo_id")
if photo_id:
future = executor.submit(_remove_labeled_tag, photo_id)
futures.append(future)
else:
typer.echo(f"Couldn't find a photo id for {photo}, please remove the tag manually.")
for future in concurrent.futures.as_completed(futures):
photo_id = future.result()
typer.echo(f"Returned {photo_id} to the labeling queue.")
shutil.rmtree(unlabeled_dir)
@app.command()
def remove_labeled_photos_from_flickr():
raise NotImplementedError
@app.command()
def print_instructions():
instructions = """
Overview of the labeling process:
1. User requests unlabeled photos from Flickr. These photos are downloaded to a specified
"unlabeled" folder under the structure:
correct/
predicted_breed/
hash.jpg
wrong/
predicted_breed/
unsure/
predicted_breed/
2. User drags photos from the "unlabeled" folder into the "labeled" folder of the same
root directory.
3. User copies photos from "labeled" folder into the Google Drive dataset directory,
conforming to the structure:
labeled_breed/
hash.jpg
labeled_breed/
hash.jpg
4. We'll tag photos on Flickr as "labeled" as soon as they're downloaded to a
user's machine in Step 1. If the user does not finish labeling all of the photos,
they can remove the "labeled" tag from the remaining photos by running the command
to return photos to the labeling pool. We'll do this for all of the photos remaining
in the "unlabeled" folder.
5. (Optional) Flickr limits our album to 1,000 photos. If needed we can delete photos
which have already been marked as labeled. However, we should coordinate with all of
the labelers and make sure they don't need to return any to the queue before doing
this.
"""
print(instructions)
```
#### File: flower_classifier/models/baseline.py
```python
from collections import OrderedDict
import torch
import torch.nn as nn
from torchvision.models import resnet34
class BaselineResnet(nn.Module):
def __init__(self, n_classes=102, pretrained=True):
super().__init__()
assert n_classes > 0, "must have at least one class"
resnet = resnet34(pretrained=pretrained)
self.backbone = nn.Sequential(OrderedDict(list(resnet.named_children())[:-1]))
in_features = resnet.fc.in_features
self.classifier = nn.Linear(in_features, n_classes)
def forward(self, x):
x = self.backbone(x)
x = torch.flatten(x, 1)
logits = self.classifier(x)
return logits
```
#### File: flower_classifier/models/classifier.py
```python
import logging
import hydra
import pytorch_lightning as pl
import timm
import torch
import torch.nn as nn
from omegaconf import DictConfig, OmegaConf
from torchmetrics.functional import confusion_matrix
from flower_classifier.visualizations import generate_confusion_matrix
logger = logging.getLogger(__name__)
# add default for cases where we don't initialize with hydra main
DEFAULT_OPTIMIZER = OmegaConf.create({"_target_": "torch.optim.Adam", "lr": 0.001})
class FlowerClassifier(pl.LightningModule):
def __init__(
self,
architecture: str,
dropout_rate: float = 0.0,
global_pool: str = "avg",
num_classes: int = 102,
batch_size: int = 64,
optimizer_config: DictConfig = DEFAULT_OPTIMIZER,
lr_scheduler_config: DictConfig = None,
pretrained: bool = True,
):
super().__init__()
self.save_hyperparameters()
self.optimizer_config = optimizer_config
self.lr_scheduler_config = lr_scheduler_config
# sanity check values
pool_options = {"avg", "max", "avgmax", "avgmaxc"}
model_options = timm.list_models(pretrained=True)
assert global_pool in pool_options, f"global_pool must be one of: {pool_options}"
assert architecture in model_options, "model architecture not recognized"
# define training objects
self.network = timm.create_model(
architecture,
pretrained=pretrained,
num_classes=num_classes,
drop_rate=dropout_rate,
global_pool=global_pool,
)
self.criterion = nn.CrossEntropyLoss()
self.accuracy_metric = pl.metrics.Accuracy()
@property
def example_input_array(self):
return torch.zeros(1, 3, 256, 256)
def forward(self, x):
return self.network(x)
def _step(self, batch):
imgs, labels = batch
logits = self(imgs)
loss = self.criterion(logits, labels)
preds = nn.functional.softmax(logits, dim=1).argmax(1)
acc = self.accuracy_metric(preds, labels)
return {"loss": loss, "accuracy": acc, "preds": preds, "labels": labels}
def training_step(self, batch, batch_idx):
step_result = self._step(batch)
self.log("train/loss", step_result["loss"], on_step=True)
self.log("train/acc", step_result["accuracy"], on_step=True)
self.log("epoch", self.current_epoch, on_step=True)
return {"loss": step_result["loss"]}
def validation_step(self, batch, batch_idx):
step_result = self._step(batch)
self.log("val/loss", step_result["loss"], on_step=False, on_epoch=True, reduce_fx=torch.mean)
self.log("val/acc", step_result["accuracy"], on_step=False, on_epoch=True, reduce_fx=torch.mean)
return step_result
def validation_epoch_end(self, validation_step_outputs):
if self.logger and self.current_epoch > 0 and self.current_epoch % 5 == 0:
epoch_preds = torch.cat([x["preds"] for x in validation_step_outputs])
epoch_targets = torch.cat([x["labels"] for x in validation_step_outputs])
cm = confusion_matrix(epoch_preds, epoch_targets, num_classes=self.hparams.num_classes).cpu().numpy()
class_names = getattr(self.train_dataloader().dataset, "classes", None)
fig = generate_confusion_matrix(cm, class_names=class_names)
self.logger.experiment.log({"confusion_matrix": fig})
def configure_optimizers(self):
optimizer = hydra.utils.instantiate(self.optimizer_config, params=self.parameters())
if self.lr_scheduler_config is None:
return optimizer
scheduler = hydra.utils.instantiate(self.lr_scheduler_config.scheduler, optimizer=optimizer)
scheduler_dict = OmegaConf.to_container(self.lr_scheduler_config, resolve=True)
scheduler_dict["scheduler"] = scheduler
return [optimizer], [scheduler_dict]
def on_fit_start(self):
if self.global_rank == 0 and getattr(self.trainer.datamodule, "to_csv", False):
experiment = getattr(self.logger, "experiment", None)
logger_dir = getattr(experiment, "dir", "output")
self.trainer.datamodule.to_csv(logger_dir)
def on_train_start(self):
dataset = self.train_dataloader().dataset
classes = getattr(dataset, "classes", None)
self.classes = classes
def on_save_checkpoint(self, checkpoint):
checkpoint["model_prediction_classes"] = self.classes
def on_load_checkpoint(self, checkpoint):
self.classes = checkpoint["model_prediction_classes"]
```
#### File: flower_classifier/ui/model_compare.py
```python
import streamlit as st
from flower_classifier.artifacts import list_run_files
from flower_classifier.ui import components
st.set_page_config(layout="wide", initial_sidebar_state="expanded")
st.title("Model Comparison")
# ----- sidebar -----
pil_image = components.get_photo(use_sidebar=True)
st.sidebar.image(pil_image, caption="Input image", use_column_width=True)
# ----- model comparision -----
col1, col2 = st.beta_columns(2)
def select_model(model_id):
run_id = st.text_input("Model training run ID:", key=model_id)
if not run_id:
st.stop()
files = list_run_files(run_id=run_id)
checkpoint_file = st.selectbox("Choose a checkpoint file:", [None, *files])
return run_id, checkpoint_file
with col1:
st.write("Model A")
run_id, checkpoint_file = select_model("Model A")
if checkpoint_file:
model = components.download_model_wandb(run_id, checkpoint_file)
preds = components.make_prediction(model, pil_image)
components.display_prediction(model, preds)
components.display_top_3_table(model, preds)
components.display_prediction_distribution(model, preds)
with col2:
st.write("Model B")
run_id, checkpoint_file = select_model("Model B")
if checkpoint_file:
model = components.download_model_wandb(run_id, checkpoint_file)
preds = components.make_prediction(model, pil_image)
components.display_prediction(model, preds)
components.display_top_3_table(model, preds)
components.display_prediction_distribution(model, preds)
```
#### File: flower-classifier/tests/test_visualizations.py
```python
import numpy as np
from plotly.graph_objects import Figure
from flower_classifier.visualizations import generate_confusion_matrix
def test_confusion_matrix_with_class_names():
cm = np.array([[6, 0, 1], [2, 4, 0], [1, 1, 8]])
class_names = ["a", "b", "c"]
fig = generate_confusion_matrix(cm, class_names)
assert isinstance(fig, Figure)
def test_confusion_matrix_without_class_names():
cm = np.array([[6, 0, 1], [2, 4, 0], [1, 1, 8]])
class_names = None
fig = generate_confusion_matrix(cm, class_names)
assert isinstance(fig, Figure)
```
|
{
"source": "jeremyjordan/muspy",
"score": 2
}
|
#### File: muspy/muspy/classes.py
```python
from collections import OrderedDict
from typing import Any, Callable, List, Optional
from .base import Base, ComplexBase
from .schemas import DEFAULT_SCHEMA_VERSION
DEFAULT_VELOCITY = 64
__all__ = [
"Annotation",
"Chord",
"DEFAULT_VELOCITY",
"KeySignature",
"Lyric",
"Metadata",
"Note",
"Tempo",
"TimeSignature",
"Track",
]
# pylint: disable=super-init-not-called
class Metadata(Base):
"""A container for metadata.
Attributes
----------
schema_version : str
Schema version. Defaults to the latest version.
title : str, optional
Song title.
creators : list of str, optional
Creator(s) of the song.
copyright : str, optional
Copyright notice.
collection : str, optional
Name of the collection.
source_filename : str, optional
Name of the source file.
source_format : str, optional
Format of the source file.
"""
_attributes = OrderedDict(
[
("schema_version", str),
("title", str),
("creators", str),
("copyright", str),
("collection", str),
("source_filename", str),
("source_format", str),
]
)
_optional_attributes = [
"title",
"creators",
"copyright",
"collection",
"source_filename",
"source_format",
]
_list_attributes = ["creators"]
def __init__(
self,
schema_version: str = DEFAULT_SCHEMA_VERSION,
title: Optional[str] = None,
creators: Optional[List[str]] = None,
copyright: Optional[str] = None,
collection: Optional[str] = None,
source_filename: Optional[str] = None,
source_format: Optional[str] = None,
):
# pylint: disable=redefined-builtin
self.schema_version = schema_version
self.title = title
self.creators = creators if creators is not None else []
self.copyright = copyright
self.collection = collection
self.source_filename = source_filename
self.source_format = source_format
class Tempo(Base):
"""A container for key signature.
Attributes
----------
time : int
Start time of the tempo, in time steps.
qpm : float
Tempo in qpm (quarters per minute).
"""
_attributes = OrderedDict([("time", int), ("qpm", (int, float))])
def __init__(self, time: int, qpm: float):
self.time = time
self.qpm = float(qpm)
def __eq__(self, other):
return self.time == other.time and self.qpm == other.qpm
def _validate(self, attr: str, recursive: bool):
super()._validate(attr, recursive)
if attr == "qpm" and self.qpm <= 0:
raise ValueError("`qpm` must be positive.")
class KeySignature(Base):
"""A container for key signature.
Attributes
----------
time : int
Start time of the key signature, in time steps or seconds.
root : int, optional
Root of the key signature.
mode : str, optional
Mode of the key signature.
fifths : int, optional
Number of flats or sharps. Positive numbers for sharps and
negative numbers for flats.
root_str : str, optional
Root of the key signature as a string.
"""
_attributes = OrderedDict(
[
("time", int),
("root", int),
("mode", str),
("fifths", int),
("root_str", str),
]
)
_optional_attributes = ["root", "mode", "fifths", "root_str"]
def __init__(
self,
time: int,
root: Optional[int] = None,
mode: Optional[str] = None,
fifths: Optional[int] = None,
root_str: Optional[str] = None,
):
self.time = time
self.root = root
self.mode = mode
self.fifths = fifths
self.root_str = root_str
class TimeSignature(Base):
"""A container for time signature.
Attributes
----------
time : int
Start time of the time signature, in time steps or seconds.
numerator : int
Numerator of the time signature.
denominator : int
Denominator of the time signature.
"""
_attributes = OrderedDict(
[("time", int), ("numerator", int), ("denominator", int)]
)
def __init__(self, time: int, numerator: int, denominator: int):
self.time = time
self.numerator = numerator
self.denominator = denominator
def _validate(self, attr: str, recursive: bool):
super()._validate(attr, recursive)
if attr == "numerator" and self.numerator < 1:
raise ValueError("`numerator` must be positive.")
if attr == "denominator" and self.denominator < 1:
raise ValueError("`denominator` must be positive.")
class Lyric(Base):
"""A container for lyric.
Attributes
----------
time : int
Start time of the lyric, in time steps or seconds.
lyric : str
Lyric (sentence, word, syllable, etc.).
"""
_attributes = OrderedDict([("time", int), ("lyric", str)])
def __init__(self, time: int, lyric: str):
self.time = time
self.lyric = lyric
class Annotation(Base):
"""A container for annotation.
Attributes
----------
time : int
Start time of the annotation, in time steps or seconds.
annotation : any
Annotation of any type.
group : str, optional
Group name for better organizing the annotations.
"""
_attributes = OrderedDict([("time", int), ("annotation", str)])
_optional_attributes = ["group"]
def __init__(
self, time: int, annotation: Any, group: Optional[str] = None
):
self.time = time
self.annotation = annotation
self.group = group
class Note(Base):
"""A container for note.
Attributes
----------
time : int
Start time of the note, in time steps.
pitch : int
Note pitch, as a MIDI note number.
duration : int
Duration of the note, in time steps.
velocity : int, optional
Note velocity. Defaults to `muspy.DEFAULT_VELOCITY`.
pitch_str : str
Note pitch as a string, useful for distinguishing C# and Db.
"""
_attributes = OrderedDict(
[
("time", int),
("pitch", int),
("duration", int),
("velocity", int),
("pitch_str", str),
]
)
_optional_attributes = ["velocity", "pitch_str"]
def __init__(
self,
time: int,
pitch: int,
duration: int,
velocity: Optional[int] = None,
pitch_str: Optional[str] = None,
):
self.time = time
self.pitch = pitch
self.duration = duration
self.velocity = velocity if velocity is not None else DEFAULT_VELOCITY
self.pitch_str = pitch_str
@property
def start(self):
"""Start time of the note."""
return self.time
@start.setter
def start(self, start):
"""Setter for start time."""
self.time = start
@property
def end(self):
"""End time of the note."""
return self.time + self.duration
@end.setter
def end(self, end):
"""Setter for end time."""
self.duration = end - self.time
def _validate(self, attr: str, recursive: bool):
super()._validate(attr, recursive)
if attr == "pitch" and (self.pitch < 0 or self.pitch > 127):
raise ValueError("`pitch` must be in between 0 to 127.")
if attr == "duration" and self.duration < 0:
raise ValueError("`duration` must be nonnegative.")
if attr == "velocity" and (self.velocity < 0 or self.velocity > 127):
raise ValueError("`velocity` must be in between 0 to 127.")
def _adjust_time(
self, func: Callable[[int], int], attr: str, recursive: bool
):
if attr == "time":
self.time = func(self.time)
elif attr == "duration":
self.duration = func(self.duration)
def transpose(self, semitone: int) -> "Note":
"""Transpose the note by a number of semitones.
Parameters
----------
semitone : int
Number of semitones to transpose the note. A positive value
raises the pitch, while a negative value lowers the pitch.
Returns
-------
Object itself.
"""
self.pitch += semitone
return self
def clip(self, lower: int = 0, upper: int = 127) -> "Note":
"""Clip the velocity of the note.
Parameters
----------
lower : int, optional
Lower bound. Defaults to 0.
upper : int, optional
Upper bound. Defaults to 127.
Returns
-------
Object itself.
"""
assert upper >= lower, "`upper` must be greater than `lower`."
if self.velocity > upper:
self.velocity = upper
elif self.velocity < lower:
self.velocity = lower
return self
class Chord(Base):
"""A container for chord.
Attributes
----------
time : int
Start time of the chord, in time steps.
pitches : list of int
Note pitches, as MIDI note numbers.
duration : int
Duration of the chord, in time steps.
velocity : int, optional
Chord velocity. Defaults to `muspy.DEFAULT_VELOCITY`.
pitches_str : list of str
Note pitches as strings, useful for distinguishing C# and Db.
"""
_attributes = OrderedDict(
[
("time", int),
("pitches", int),
("duration", int),
("velocity", int),
("pitches_str", str),
]
)
_optional_attributes = ["velocity", "pitches_str"]
def __init__(
self,
time: int,
pitches: List[int],
duration: int,
velocity: Optional[int] = None,
pitches_str: Optional[List[int]] = None,
):
self.time = time
self.pitches = pitches
self.duration = duration
self.velocity = velocity if velocity is not None else DEFAULT_VELOCITY
self.pitches_str = pitches_str
@property
def start(self):
"""Start time of the chord."""
return self.time
@start.setter
def start(self, start):
"""Setter for start time."""
self.time = start
@property
def end(self):
"""End time of the chord."""
return self.time + self.duration
@end.setter
def end(self, end):
"""Setter for end time."""
self.duration = end - self.time
def _validate(self, attr: str, recursive: bool):
super()._validate(attr, recursive)
if attr == "pitches":
for pitch in self.pitches:
if pitch < 0 or pitch > 127:
raise ValueError(
"`pitches` must be a list of integers between 0 to "
"127."
)
if attr == "duration" and self.duration < 0:
raise ValueError("`duration` must be nonnegative.")
if attr == "velocity" and (self.velocity < 0 or self.velocity > 127):
raise ValueError("`velocity` must be in between 0 to 127.")
def _adjust_time(
self, func: Callable[[int], int], attr: str, recursive: bool
):
if attr == "time":
self.time = func(self.time)
elif attr == "duration":
self.duration = func(self.duration)
def transpose(self, semitone: int) -> "Chord":
"""Transpose the notes by a number of semitones.
Parameters
----------
semitone : int
Number of semitones to transpose the notes. A positive value
raises the pitches, while a negative value lowers the
pitches.
Returns
-------
Object itself.
"""
self.pitches += [pitch + semitone for pitch in self.pitches]
return self
def clip(self, lower: int = 0, upper: int = 127) -> "Chord":
"""Clip the velocity of the chord.
Parameters
----------
lower : int, optional
Lower bound. Defaults to 0.
upper : int, optional
Upper bound. Defaults to 127.
Returns
-------
Object itself.
"""
assert upper >= lower, "`upper` must be greater than `lower`."
if self.velocity > upper:
self.velocity = upper
elif self.velocity < lower:
self.velocity = lower
return self
class Track(ComplexBase):
"""A container for music track.
Attributes
----------
program : int, 0-127, optional
Program number according to General MIDI specification [1]_.
Defaults to 0 (Acoustic Grand Piano).
is_drum : bool, optional
Whether it is a percussion track. Defaults to False.
name : str, optional
Track name.
notes : list of :class:`muspy.Note`, optional
Musical notes. Defaults to an empty list.
chords : list of :class:`muspy.Chord`, optional
Chords. Defaults to an empty list.
annotations : list of :class:`muspy.Annotation`, optional
Annotations. Defaults to an empty list.
lyrics : list of :class:`muspy.Lyric`, optional
Lyrics. Defaults to an empty list.
Note
----
Indexing a Track object returns the note at a certain index. That
is, ``track[idx]`` returns ``track.notes[idx]``. Length of a Track
object is the number of notes. That is, ``len(track)`` returns
``len(track.notes)``.
References
----------
.. [1] https://www.midi.org/specifications/item/gm-level-1-sound-set
"""
_attributes = OrderedDict(
[
("program", int),
("is_drum", bool),
("name", str),
("notes", Note),
("chords", Chord),
("lyrics", Lyric),
("annotations", Annotation),
]
)
_optional_attributes = ["name", "notes", "chords", "lyrics", "annotations"]
_list_attributes = ["notes", "chords", "lyrics", "annotations"]
def __init__(
self,
program: int = 0,
is_drum: bool = False,
name: Optional[str] = None,
notes: Optional[List[Note]] = None,
chords: Optional[List[Chord]] = None,
lyrics: Optional[List[Lyric]] = None,
annotations: Optional[List[Annotation]] = None,
):
self.program = program if program is not None else 0
self.is_drum = is_drum if program is not None else False
self.name = name
self.notes = notes if notes is not None else []
self.chords = chords if chords is not None else []
self.lyrics = lyrics if lyrics is not None else []
self.annotations = annotations if annotations is not None else []
def __len__(self) -> int:
return len(self.notes)
def __getitem__(self, key: int) -> Note:
return self.notes[key]
def __setitem__(self, key: int, value: Note):
self.notes[key] = value
def _validate(self, attr: str, recursive: bool):
super()._validate(attr, recursive)
if attr == "program" and self.program < 0 or self.program > 127:
raise ValueError("`program` must be in between 0 to 127.")
def get_end_time(self, is_sorted: bool = False) -> int:
"""Return the time of the last event.
This includes notes, chords, lyrics and annotations.
Parameters
----------
is_sorted : bool
Whether all the list attributes are sorted. Defaults to
False.
"""
def _get_end_time(list_, ref_attr="time"):
if not list_:
return 0
if is_sorted:
return getattr(list_[-1], ref_attr)
return max(getattr(item, ref_attr) for item in list_)
return max(
_get_end_time(self.notes, "end"),
_get_end_time(self.chords, "end"),
_get_end_time(self.lyrics),
_get_end_time(self.annotations),
)
def clip(self, lower: int = 0, upper: int = 127) -> "Track":
"""Clip the velocity of each note.
Parameters
----------
lower : int, optional
Lower bound. Defaults to 0.
upper : int, optional
Upper bound. Defaults to 127.
Returns
-------
Object itself.
"""
for note in self.notes:
note.clip(lower, upper)
return self
def transpose(self, semitone: int) -> "Track":
"""Transpose the notes by a number of semitones.
Parameters
----------
semitone : int
Number of semitones to transpose the notes. A positive value
raises the pitches, while a negative value lowers the
pitches.
Returns
-------
Object itself.
"""
for note in self.notes:
note.transpose(semitone)
return self
```
#### File: muspy/datasets/jsb.py
```python
from pathlib import Path
from typing import Union
from ..inputs import read_midi
from ..music import Music
from .base import DatasetInfo, RemoteFolderDataset
_NAME = "JSB Chorales Dataset"
_DESCRIPTION = """\
The JSB Chorales Dataset is a collection of 382 four-part chorales by Johann \
<NAME>. This dataset is used in the paper "Modeling Temporal \
Dependencies in High-Dimensional Sequences: Application to Polyphonic Music \
Generation and Transcription" in ICML 2012. It comes with train, test and \
validation split used in the paper "Harmonising Chorales by Probabilistic \
Inference" in NIPS 2005."""
_HOMEPAGE = "http://www-etud.iro.umontreal.ca/~boulanni/icml2012"
_CITATION = """\
@inproceedings{boulangerlewandowski2012modeling,
author={<NAME> and <NAME> and Pascal \
Vincent},
title={Modeling Temporal Dependencies in High-Dimensional Sequences: \
Application to Polyphonic Music Generation and Transcription},
booktitle={Proceedings of the 29th International Conference on Machine \
Learning (ICML)},
year={2012}
}"""
class JSBChoralesDataset(RemoteFolderDataset):
"""Johann Sebastian Bach Chorales Dataset."""
_info = DatasetInfo(_NAME, _DESCRIPTION, _HOMEPAGE)
_citation = _CITATION
_sources = {
"jsb": {
"filename": "JSB Chorales.zip",
"url": (
"http://www-etud.iro.umontreal.ca/~boulanni/JSB%20Chorales.zip"
),
"archive": True,
"size": 215242,
"md5": "2fb72faf2659e82e9de08b16f2cca1e9",
"sha256": "69924294546a71620c06374085cd6b0300665ea215e2f854f65a119"
"29f1e723c",
}
}
_extension = "mid"
def read(self, filename: Union[str, Path]) -> Music:
"""Read a file into a Music object."""
music = read_midi(self.root / filename)
# The resolution of MIDI file in this datset should be 120, but
# is incorrectly set to 100
music.resolution = 120
return music
```
#### File: muspy/datasets/utils.py
```python
import gzip
import hashlib
import os
import os.path
import shutil
import tarfile
import zipfile
from pathlib import Path
from typing import Optional, Union
from urllib.request import urlretrieve
from tqdm import tqdm
# TODO: Add SHA checksum supports
class _ProgressBar:
"""A callable progress bar object.
Code is adapted from https://stackoverflow.com/a/53643011.
"""
def __init__(self):
self.pbar = None
def __call__(self, block_num, block_size, total_size):
if self.pbar is None:
self.pbar = tqdm(total=total_size)
downloaded = block_num * block_size
self.pbar.update(downloaded)
def compute_md5(path: Union[str, Path], chunk_size: int):
"""Return the MD5 checksum of a file, calculated chunk by chunk.
Parameters
----------
path : str or Path
Path to the file to be read.
chunk_size : int
Chunk size used to calculate the MD5 checksum.
"""
md5 = hashlib.md5()
with open(str(path), "rb") as f:
for chunk in iter(lambda: f.read(chunk_size), b""):
md5.update(chunk)
return md5.hexdigest()
def check_md5(path: Union[str, Path], md5: str, chunk_size: int = 1024 * 1024):
"""Check if the MD5 checksum of a file matches the expected one.
Parameters
----------
path : str or Path
Path to the file to be check.
md5 : str
Expected MD5 checksum of the file.
chunk_size : int, optional
Chunk size used to calculate the MD5 checksum. Defaults to 2^20.
"""
return md5 == compute_md5(path, chunk_size)
def download_url(
url: str, path: Union[str, Path], md5: Optional[str] = None,
):
"""Download a file from a URL.
Parameters
----------
url : str
URL to the file to download.
path : str or Path
Path to save the downloaded file.
md5 : str, optional
Expected MD5 checksum of the downloaded file. If None, do not
check.
"""
urlretrieve(url, str(path), reporthook=_ProgressBar())
if md5 is not None and not check_md5(path, md5):
raise RuntimeError("Downloaded file is corrupted.")
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith("download_warning"):
return value
return None
def _save_response_content(response, destination, chunk_size=32768):
with open(destination, "wb") as f:
pbar = tqdm(total=None)
progress = 0
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress += len(chunk)
pbar.update(progress - pbar.n)
def extract_archive(
path: Union[str, Path],
root: Optional[Union[str, Path]] = None,
kind: Optional[str] = None,
cleanup: bool = False,
):
"""Extract an archive in TAR, TGZ, TXZ, GZ or ZIP format.
Parameters
----------
path : str or Path
Path to the archive to be extracted.
root : str or Path, optional
Root directory to save the extracted file. If None, use the
dirname of ``path``.
kind : {'tar', 'tgz', 'txz', 'gz', 'zip'}, optional
Fromat of the archive to be extracted. If None, infer the format
from the extension of ``path``.
cleanup : bool, optional
Whether to remove the original archive. Defaults to False.
"""
path = str(path)
if root is None:
root = os.path.dirname(str(path))
if kind is None:
if path.lower().endswith(".tar"):
kind = "tar"
elif path.lower().endswith((".tar.gz", ".tgz")):
kind = "tgz"
elif path.lower().endswith((".tar.xz", ".txz")):
kind = "txz"
elif path.lower().endswith(".gz"):
kind = "gz"
elif path.lower().endswith(".zip"):
kind = "zip"
else:
raise ValueError(
"Got unsupported file format (expect TAR, TGZ, TXZ, GZ or "
"ZIP)."
)
if kind == "tar":
with tarfile.open(str(path), "r") as f:
f.extractall(path=root)
elif kind == "tgz":
with tarfile.open(str(path), "r:gz") as f:
f.extractall(path=root)
elif kind == "txz":
with tarfile.open(str(path), "r:xz") as f:
f.extractall(path=root)
elif kind == "gz":
filename = os.path.join(
root, os.path.splitext(os.path.basename(path))[0]
)
with gzip.open(str(path), "rb") as f_in, open(filename, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
elif kind == "zip":
with zipfile.ZipFile(str(path), "r") as zip_file:
zip_file.extractall(root)
else:
raise ValueError(
"`kind` must be one of 'tar', 'tgz', 'txz', 'gz' and 'zip'."
)
if cleanup:
os.remove(path)
```
#### File: muspy/inputs/yaml.py
```python
from pathlib import Path
from typing import Union
import yaml
from ..music import Music
def load_yaml(path: Union[str, Path]) -> Music:
"""Load a YAML file into a Music object.
Parameters
----------
path : str or Path
Path to the file to load.
Returns
-------
:class:`muspy.Music`
Loaded Music object.
"""
with open(str(path)) as f:
data = yaml.safe_load(f)
return Music.from_dict(data)
```
#### File: muspy/outputs/json.py
```python
import json
from pathlib import Path
from typing import TYPE_CHECKING, Union
if TYPE_CHECKING:
from ..music import Music
def save_json(path: Union[str, Path], music: "Music"):
"""Save a Music object to a JSON file.
Parameters
----------
path : str or Path
Path to save the JSON file.
music : :class:`muspy.Music`
Music object to save.
"""
with open(str(path), "w") as f:
f.write(json.dumps(music.to_ordered_dict()))
```
|
{
"source": "jeremyjpj0916/PySBR",
"score": 3
}
|
#### File: pysbr/config/sportsbook.py
```python
from typing import Dict, List, Union, Optional
from collections import OrderedDict
import pysbr.utils as utils
from pysbr.config.config import Config
class Sportsbook(Config):
"""Provides access to sportsbook config file.
In the config file there is information provided for the 27 sportsbooks found on
SBR.
Note that the id returned from lines-related queries is called 'sportsbook id' by
this application, which is translated from 'paid', the name returned from SBR.
There is another sportsbook id that is only used by the 'Sportsbooks' query, that
is called 'system sportsbook id' by this application, which is translated from
'sbid', the name returned from SBR. The system sportsbook id is not used by other
parts of the application.
Attributes:
names (Dict[int, str]): Map sportsbook id to name. Used by Query.list() and
Query.dataframe() in order to translate from id to name.
"""
def __init__(self):
super().__init__()
self._sportsbooks = self._translate_dict(
utils.load_yaml(utils.build_yaml_path("sportsbooks"))
)
self._sportsbook_ids = self._build_sportsbook_ids()
self.names = {
x["sportsbook id"]: x["name"] for x in self._sportsbooks["sportsbooks"]
}
def _build_sportsbook_ids(self) -> Dict[str, Dict[str, int]]:
"""Build sportsbook id search dictionary."""
s = self._sportsbooks["sportsbooks"]
sportsbooks = {}
for k in ["name", "alias"]:
sportsbooks[k] = {}
for x in s:
sportsbooks[k][x[k].lower()] = x["sportsbook id"]
return sportsbooks
def sportsbook_config(self) -> List[Dict[str, Union[str, int]]]:
"""Get sportsbook config list.
Each list element is a dict representing a sportsbook, with 'name',
n 'short name', 'sportsbook id' and 'system sportsbook id' as keys.
"""
return self._sportsbooks
def id(self, term: Union[int, str]) -> Optional[int]:
"""Take provided search term and return matching sportsbook id.
If search term is string, search for matching sportsbook. If search term is
int, assume that it is the ID, and return it.
This method is provided as a convenience so that you don't need to
remember sportsbook id numbers. Case is ignored for search terms.
Example search terms:
'pinnacle'
'PINNACLE'
'bodog'
'bodog sportsbook'
20
Raises:
TypeError:
If a provided search term is not an int or str.
ValueError:
If a provided search term string cannot be matched with a sportsbook.
"""
return self.ids(term)[0]
def ids(self, terms: Union[List[Union[int, str]], int, str]) -> List[int]:
"""Take provided search terms and return list of matching sportsbook ids.
If search term is string, search for matching sportsbook. If search term is
int, assume that it is the ID, and insert it into the list to be returned.
This method is provided as a convenience so that you don't need to
remember sportsbook id numbers. Case is ignored for search terms.
Example search terms:
'pinnacle'
'PINNACLE'
'bodog'
'bodog sportsbook'
20
Raises:
TypeError:
If a provided search term is not an int or str.
ValueError:
If a provided search term string cannot be matched with a sportsbook.
"""
terms = utils.make_list(terms)
ids = []
for t in terms:
if isinstance(t, int):
ids.append(t)
else:
old_t = t
try:
t = t.lower()
except AttributeError:
raise TypeError("Search terms must be ints or strings.")
try:
id = [v[t] for k, v in self._sportsbook_ids.items() if t in v][0]
ids.append(id)
except IndexError:
raise ValueError(f"Could not find sportsbook {old_t}.")
return list(OrderedDict.fromkeys(ids))
```
#### File: pysbr/queries/lines.py
```python
import copy
from typing import List, Dict, Union, Tuple
import pandas as pd
from pysbr.queries.query import Query
from pysbr.config.sport import (
NFL,
NCAAF,
ATP,
Bundesliga,
EPL,
LaLiga,
MLB,
NBA,
NCAAB,
NHL,
UCL,
UEFANationsLeague,
UFC,
)
from pysbr.config.sport import (
Football,
Basketball,
Baseball,
Hockey,
Soccer,
Tennis,
Fighting,
)
from pysbr.config.sport import Sport
from pysbr.config.sportsbook import Sportsbook
class Lines(Query):
"""Implements methods particular to queries about betting lines.
This class should not be directly instantiated; use the subclasses defined for each
lines-related query.
"""
def __init__(self):
self._events = None
self._event_descriptions = {}
self._event_leagues = {}
self._event_sports = {}
self._event_scores = {}
self._event_statuses = {}
# these are the participant ids for Over/Under lines for all sports I checked
self._participants = {15143: "over", 15144: "under"}
self._leagues = {
16: NFL,
6: NCAAF,
23: ATP,
11: Bundesliga,
2: EPL,
17: LaLiga,
5: NBA,
3: MLB,
14: NCAAB,
7: NHL,
8: UCL,
1911: UEFANationsLeague,
26: UFC,
}
self._leagues_init = {}
self._sports = {
4: Football,
5: Basketball,
3: Baseball,
6: Hockey,
1: Soccer,
8: Tennis,
9: Fighting,
}
self._sports_init = {}
self._sportsbooks = None
self._with_ids_translated = None
super().__init__()
def _clean_lines(self, data: List[Dict]) -> List[Dict]:
"""Remove unneeded keys from the query response.
This is necessary for lines-related queries because they don't accept any
fields, so some unneeded fields are returned.
"""
to_remove = [
"boid",
"lineid",
"sequence",
"dp",
"bs",
"iof",
"sbid",
"sid",
"fpd",
"fpn",
"sort",
]
for term in to_remove:
for line in data:
try:
# ConsensusHistory has 'line' as a key, instead of being the
# top-level dictionary.
line = line["line"]
except KeyError:
pass
try:
# The earliest (time-wise) lines in consensusHistory may not have
# 'line' as a key.
line.pop(term, None)
except AttributeError:
pass
return data
def _init_config(self, data: List[Dict]) -> None:
"""Initialize private instance variables.
There is a lot of data that needs to be initialized in order to translate from
ids to information. This method does that.
self._leagues and self._sports map league and sport ids to their configuration
classes. Other private instance variables map event and participant ids to their
translations.
"""
if self._sportsbooks is None:
self._sportsbooks = Sportsbook().names
league_ids = [e.get("league id") for e in self._events.list()]
for id in set(league_ids):
try:
self._leagues_init[id] = self._leagues[id]()
except KeyError:
pass
sport_ids = [e.get("sport id") for e in self._events.list()]
for id in set(sport_ids):
try:
self._sports_init[id] = self._sports[id]()
except KeyError:
pass
for e in self._events.list():
self._event_descriptions[e.get("event id")] = e.get("description")
self._event_leagues[e.get("event id")] = e.get("league id")
self._event_sports[e.get("event id")] = e.get("sport id")
self._event_scores[e.get("event id")] = e.get("scores")
self._event_statuses[e.get("event id")] = e.get("event status")
try:
for p in e["participants"]:
participant_id = p.get("participant id")
source = p.get("source")
if "last name" in source and source.get("last name") is not None:
self._participants[participant_id] = source.get("last name")
elif (
"abbreviation" in source
and source.get("abbreviation") is not None
):
self._participants[participant_id] = source.get("abbreviation")
elif "name" in source and source.get("name") is not None:
self._participants[participant_id] = source.get("name")
except KeyError:
# Should not reach here; e['participants'] should be empty list at
# least.
pass
def _get_config(self, line: List[Dict]) -> Sport:
"""Get league or sport config class.
If neither the league nor the sport has a configuration class, then the market
name and bet result will not appear in the Query.list() or Query.dataframe().
"""
try:
# League needs to be before sport because it may override some attributes.
# E.g. NCAAB market periods.
return self._leagues_init[self._event_leagues.get(line.get("event id"))]
except KeyError:
try:
return self._sports_init[self._event_sports.get(line.get("event id"))]
except KeyError:
# neither league nor sport has a config file
return None
def _resolve_market(self, line: List[Dict]) -> str:
"""Attempt to get the name of the market from its id."""
try:
return self._get_config(line).market_names.get(line.get("market id"))
except AttributeError:
return None
def _tally_points(
self,
line: List[Dict],
period_scores: List[Dict[str, int]],
market_range: List[int],
) -> Tuple[int, int]:
"""Sum up the points scored over the range of interest, according to the market
in question.
If the market is a total, the return is of the form (total, []). Otherwise, the
return looks like (points_scored_by_team, points_scored_by_other_team).
"""
scores = copy.deepcopy(period_scores)
# TODO: check that len scores == market range (current and future events)
if market_range is not None:
scores = [s for s in period_scores if s.get("period") in market_range]
participant_id = line.get("participant id")
o_scores = []
if participant_id not in [15143, 15144]:
scores = [
s for s in period_scores if s.get("participant id") == participant_id
]
o_scores = [
s for s in period_scores if s.get("participant id") != participant_id
]
try:
return sum([s.get("points scored") for s in scores]), sum(
[s.get("points scored") for s in o_scores]
)
except TypeError:
return None, None
def _evaluate_bet(self, line: List[Dict], points, o_points):
"""Evaluate whether the bet won or lost.
Given a line, market, and point totals, determine whether the bet won or lost,
returning True if win, and False if lose.
"""
try:
market_type = self._get_config(line).market_types.get(line.get("market id"))
except AttributeError:
return None
participant_id = line.get("participant id")
spread_or_total = line["spread / total"]
if market_type == "total":
# participant ids for o/u
over = 15143
under = 15144
if (
participant_id == over
and points > spread_or_total
or participant_id == under
and points < spread_or_total
):
return True
else:
return False
else:
try:
if points + spread_or_total > o_points:
return True
else:
return False
except ValueError:
return None
def _resolve_bet(self, line: List[Dict]) -> Tuple[str, float]:
"""Given a line, determine what the result of the bet was.
Utilizes event scores list which is part of an event query response. This
method calculates the point totals for the team of the line in question vs. the
other team, over the periods / quarters that the bet is evaluated, in order to
determine the result of the bet.
Returns a tuple where the first value is 'W' or 'L', and the second value is
the amount of profit the bet would return on a $100 bet. If some step fails,
(None, None) is returned and the bet result / profit are not included for that
particular line. Errors are not raised.
"""
event_status = self._event_statuses.get(line.get("event id"))
scores = self._event_scores.get(line.get("event id"))
# Event query didn't have scores, or the game hasn't been played yet (query
# returns empty list), or event is in progress.
if not scores or event_status != "complete":
return None, None, None
try:
market_periods = self._get_config(line).market_periods.get(
line.get("market id")
)
except AttributeError:
return
try:
market_range = list(range(market_periods[0], market_periods[-1]))
except TypeError:
market_range = None
points, o_points = self._tally_points(line, scores, market_range)
if points is None or o_points is None:
return None, None, None
is_win = self._evaluate_bet(line, points, o_points)
if is_win is None:
return None, None, None
try:
profit = (
round((line.get("decimal odds") - 1) * 100, 2) if is_win else -100.0
)
except ValueError:
return None, None, None
return ("W" if is_win else "L", profit, points)
def _translate_ids(self, data: List[Dict]) -> List[Dict]:
"""Add new entries to each element in the list for the element's id fields.
The response for lines-related queries has many ids without associated
information, making it hard to remember which line is related to which event.
This method adds that related information to each element in the list as long
as a list of events has been passed in when calling self.list() or
self.dataframe().
If a list of events (that the lines come from) is not passed to self.list() or
self.dataframe(), this method has no effect. Otherwise, it adds the following
to each element:
event description
betting market name
sportsbook name
participant information
bet result information (whether it won or lost; only for completed events)
self._with_ids_translated caches the returned list.
"""
if self._events is None:
return data
if self._with_ids_translated is not None:
return self._with_ids_translated
self._init_config(data)
for line in data:
line["event"] = self._event_descriptions.get(line.get("event id"))
market = self._resolve_market(line)
if market is not None:
line["market"] = market
result, profit, points = self._resolve_bet(line)
if result is not None:
line["result"] = result
if profit is not None:
line["profit"] = profit
if points is not None:
line["participant score"] = points
line["sportsbook"] = self._sportsbooks.get(line.get("sportsbook id"))
line["participant"] = self._participants.get(line.get("participant id"))
self._with_ids_translated = data
return data
def _copy_and_translate_data(self) -> List[Dict]:
"""Translate SBR fields in GraphQL response, and return a copy.
This method is used by self.list() and self.dataframe(). Overrides Query.
_copy_and_translate_data() in order to add steps for cleaning the response and
translating the ids in the response.
"""
data = copy.deepcopy(self._find_data())
self._clean_lines(data)
self._translate_dict(data)
return self._translate_ids(data)
def list(self, events=None) -> List[Dict[str, Union[str, List, Dict]]]:
"""Get a list of translated elements returned from the query.
If a list of events the lines are for is passed in, extra information about
each line will be added to each element of the returned list, including event
description, participant information, and betting market name.
"""
self._events = events
return super().list()
def dataframe(self, events=None) -> pd.DataFrame:
"""Get a dataframe of elements returned from the query.
If a list of events the lines are for is passed in, extra information about
each line will be added to each row of the returned dataframe, including event
description, participant information, and betting market name.
"""
self._events = events
return super().dataframe()
```
|
{
"source": "jeremyjyang/BioClients",
"score": 2
}
|
#### File: BioClients/bindingdb/Utils.py
```python
import sys,os,re,time,json,logging
import urllib.parse
from ..util import rest
#
##############################################################################
def GetLigandsByUniprot(base_url, ids, ic50_max, fout):
n_out=0; tags=None;
for id_this in ids:
rval = rest.Utils.GetURL(base_url+'/getLigandsByUniprots?uniprot=%s&cutoff=%d&response=application/json'%(id_this, ic50_max), parse_json=True)
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
ligands = rval["getLigandsByUniprotsResponse"]["affinities"] if "getLigandsByUniprotsResponse" in rval and "affinities" in rval["getLigandsByUniprotsResponse"] else []
for ligand in ligands:
if not tags:
tags = ligand.keys()
fout.write("\t".join(tags)+"\n")
vals = [(str(ligand[tag]) if tag in ligand else '') for tag in tags]
fout.write("\t".join(vals)+"\n")
logging.info("n_out: {}".format(n_out))
##############################################################################
def GetTargetsByCompound(base_url, smiles, sim_min, fout):
rval = rest.Utils.GetURL(base_url+'/getTargetByCompound?smiles=%s&cutoff=%.2f'%(urllib.parse.quote(smiles),
sim_min), parse_xml=True)
fout.write(rval.tostring())
##############################################################################
```
#### File: BioClients/biogrid/Utils.py
```python
import sys,os,re,json,time,logging,yaml
#
from ..util import rest
#
##############################################################################
def ReadParamFile(fparam):
params={};
with open(fparam, 'r') as fh:
for param in yaml.load_all(fh, Loader=yaml.BaseLoader):
for k,v in param.items():
params[k] = v
return params
##############################################################################
def ListOrganisms(base_url, params, fout):
n_all=0; n_out=0; n_err=0;
url=base_url+'/organisms/?accesskey=%s'%params['API_KEY']
url+=('&format=tab2')
rval=rest.Utils.GetURL(url, parse_json=False)
txt = rval
lines = txt.splitlines()
fout.write('organism_id, organism"\n')
for line in lines:
oid,org = re.split('\t', line)
fout.write('%s\t"%s"\n'%(oid, org))
n_out+=1
logging.info('n_out: %d'%(n_out))
##############################################################################
def ListIdTypes(base_url, params, fout):
n_all=0; n_out=0; n_err=0;
url=base_url+'/identifiers/?accesskey=%s'%params['API_KEY']
url+=('&format=tab2')
rval=rest.Utils.GetURL(url, parse_json=False)
txt = rval
lines = txt.splitlines()
for line in lines:
fout.write('%s\n'%(line))
n_all+=1
n_out+=1
logging.info('n_out: %d'%(n_out))
##############################################################################
def GetInteractions(base_url, params, ids, fout):
n_all=0; n_out=0; n_err=0;
t0=time.time()
tags=[];
for iid in ids:
url=base_url+'/interactions/%s?'%iid
url+=('&accesskey=%s&format=json'%params['API_KEY'])
rval=rest.Utils.GetURL(url, parse_json=True)
logging.debug(json.dumps(rval, indent=2, sort_keys=False)+'\n')
if type(rval) is not dict:
n_err+=1
continue
if not iid in rval :
n_err+=1
continue
intr = rval[iid]
n_all+=1
if n_all==1 or not tags:
tags=intr.keys()
fout.write('\t'.join(tags)+'\n')
vals=[intr[tag] if tag in intr else '' for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
logging.info('n_all: %d; n_out: %d; n_err: %d'%(n_all, n_out, n_err))
##############################################################################
def SearchInteractions(base_url, params, ids, search_params, fout):
n_all=0; n_out=0; n_err=0;
t0=time.time()
tags=[];
url=base_url+('/interactions/?accesskey=%s&format=json'%params['API_KEY'])
if ids:
url+=('&geneList=%s'%('|'.join(ids)))
url+=('&interSpeciesExcluded=%s'%str(not search_params['inc_interspecies']).lower())
url+=('&selfInteractionsExcluded=%s'%str(not search_params['inc_self']).lower())
if search_params['elist']:
url+=('&includeEvidence=%s'%str(search_params['inc_evidence']).lower())
url+=('&evidenceList=%s'%('|'.join(search_params['elist'])))
if search_params['addl_idtypes']:
url+=('&additionalIdentifierTypes=%s'%('|'.join(search_params['addl_idtypes'])))
if search_params['human']:
url+=('&taxId=9606')
skip=0; chunk=1000;
while True:
url_this=url+('&start=%d&max=%d'%(skip, chunk))
rval=rest.Utils.GetURL(url_this, parse_json=True)
logging.debug(json.dumps(rval, indent=2, sort_keys=False)+'\n')
if not rval: break
if type(rval) is not dict:
n_err+=1
continue
intrs = rval
for iid, intr in intrs.items():
n_all+=1
if n_all==1 or not tags:
tags=intr.keys()
fout.write('\t'.join(tags)+'\n')
vals=[];
for tag in tags:
vals.append(intr[tag] if tag in intr else '')
fout.write(('\t'.join(vals))+'\n')
n_out+=1
skip+=chunk
logging.info('n_all: %d; n_out: %d; n_err: %d'%(n_all, n_out, n_err))
logging.info(('Elapsed time: %s'%(time.strftime('%Hh:%Mm:%Ss', time.gmtime(time.time()-t0)))))
##############################################################################
```
#### File: BioClients/chembl/FetchByID.py
```python
import sys,os,argparse,logging
import csv
from chembl_webresource_client.new_client import new_client
NCHUNK = 50
#############################################################################
act_tags_selected = ['activity_id', 'assay_chembl_id', 'assay_type', 'src_id',
'relation', 'standard_relation',
'target_chembl_id', 'target_pref_name', 'target_organism', 'target_tax_id',
'molecule_chembl_id', 'parent_molecule_chembl_id', 'molecule_pref_name',
'canonical_smiles',
'document_chembl_id', 'document_year',
'pchembl_value', 'value', 'standard_value',
'text_value', 'published_value',
'units', 'qudt_units', 'uo_units', 'published_units', 'standard_units',
'type', 'published_type', 'standard_type']
def CID2Activity(cids, args, fout):
"""
Compounds to targets through activities.
Normally select biochemical assays, protein targets, activities with pChembl.
Process in chunks to avoid timeouts due to size.
"""
tags=None;
n_act=0;
i_start=(args.skip if args.skip else 0)
i_end = min(args.nmax, len(cids)-i_start) if args.nmax else (len(cids)-i_start)
writer = csv.writer(fout, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
for i in range(i_start, i_end, NCHUNK):
if args.include_phenotypic:
acts = new_client.activity.filter(molecule_chembl_id__in=cids[i:i+NCHUNK]).only(act_tags_selected)
else:
acts = new_client.activity.filter(molecule_chembl_id__in=cids[i:i+NCHUNK], pchembl_value__isnull=False).only(act_tags_selected)
for act in acts:
if not tags:
tags = list(act.keys())
writer.writerow(tags)
writer.writerow([(act[tag] if tag in act else '') for tag in tags])
n_act+=1
logging.info('Progress: CIDs: %d / %d ; activities: %d'%(i-i_start, i_end-i_start, n_act))
logging.info('Output activities: %d'%n_act)
#############################################################################
def TID2Targetcomponents(tids, args, fout):
t_tags=['target_chembl_id','target_type','organism', 'species_group_flag', 'tax_id']
tc_tags=['component_id','component_type','component_description','relationship', 'accession']
ntc=0;
writer = csv.writer(fout, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
for i in range(0, len(tids), NCHUNK):
targets = new_client.target.filter(target_chembl_id__in=tids[i:i+NCHUNK])
for t in targets:
logging.debug('target tags: %s'%(str(t.keys())))
logging.debug('target: %s'%(str(t)))
t_vals=[(t[tag] if tag in t else '') for tag in t_tags]
for tc in t['target_components']:
if tc['component_type'] != 'PROTEIN':
continue
if ntc==0:
writer.writerow(t_tags+tc_tags)
tc_vals=[(tc[tag] if tag in tc else '') for tag in tc_tags]
writer.writerow(t_vals+tc_vals)
ntc+=1
logging.info('Output target components (PROTEIN): %d'%ntc)
#############################################################################
def DID2Documents(dids, args, fout):
d_tags=['document_chembl_id', 'doc_type', 'src_id', 'pubmed_id',
'patent_id', 'doi', 'doi_chembl', 'year', 'journal', 'authors',
'volume', 'issue', 'title', 'journal_full_title', 'abstract']
ndoc=0;
writer = csv.writer(fout, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
for i in range(0, len(dids), NCHUNK):
documents = new_client.document.filter(document_chembl_id__in=dids[i:i+NCHUNK])
for d in documents:
logging.debug('document tags: %s'%(str(d.keys())))
logging.debug('document: %s'%(str(d)))
d_vals=[(d[tag] if tag in d else '') for tag in d_tags]
if ndoc==0:
writer.writerow(d_tags)
writer.writerow(d_vals)
ndoc+=1
logging.info('Output documents: %d'%ndoc)
#############################################################################
def InchiKey2Molecule(inkeys, args, fout):
m_tags=[
'availability_type', 'biotherapeutic', 'black_box_warning', 'chebi_par_id',
'chirality', 'dosed_ingredient', 'first_approval', 'first_in_class',
'helm_notation', 'indication_class', 'inorganic_flag', 'max_phase',
'molecule_chembl_id', 'molecule_type', 'natural_product', 'oral',
'parenteral', 'polymer_flag', 'pref_name', 'prodrug', 'structure_type',
'therapeutic_flag', 'topical', 'usan_stem', 'usan_stem_definition',
'usan_substem', 'usan_year', 'withdrawn_class', 'withdrawn_country',
'withdrawn_flag', 'withdrawn_reason', 'withdrawn_year']
n_mol=0;
writer = csv.writer(fout, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
for i in range(0, len(inkeys), NCHUNK):
mol = new_client.molecule
mols = mol.get(inkeys[i:i+NCHUNK])
for ii,m in enumerate(mols):
inkey = inkeys[i+ii]
logging.debug('mol tags: %s'%(str(m.keys())))
logging.debug('mol: %s'%(str(m)))
m_vals=[inkey]+[(m[tag] if tag in m else '') for tag in m_tags]
if n_mol==0:
writer.writerow(['inchikey']+m_tags)
writer.writerow(m_vals)
n_mol+=1
logging.info('Output mols: %d'%n_mol)
#############################################################################
if __name__=='__main__':
parser = argparse.ArgumentParser(description='ChEMBL REST API client: lookup by IDs')
ops = ['cid2Activity', 'tid2Targetcomponents', 'did2Documents', 'inchikey2Mol']
parser.add_argument("op", choices=ops, help='operation')
parser.add_argument("--i", dest="ifile", help="input file, IDs")
parser.add_argument("--ids", help="input IDs, comma-separated")
parser.add_argument("--o", dest="ofile", help="output (TSV)")
parser.add_argument("--skip", type=int)
parser.add_argument("--nmax", type=int)
parser.add_argument("--include_phenotypic", action="store_true", help="else pChembl required")
parser.add_argument("-v","--verbose", action="count", default=0)
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s:%(message)s', level=(logging.DEBUG if args.verbose>1 else logging.INFO))
if not (args.ifile or args.id):
parser.error('--i or --id required.')
if args.ofile:
fout = open(args.ofile, 'w')
else:
fout = sys.stdout
ids=[]
if args.ifile:
with open(args.ifile, 'r') as fin:
reader = csv.reader(fin, delimiter='\t')
for row in reader:
ids.append(row[0])
elif args.ids:
ids = re.split('[, ]+', args.ids.strip())
logging.info('Input IDs: %d'%len(ids))
if args.op == 'cid2Activity':
CID2Activity(ids, args, fout)
elif args.op == 'inchikey2Mol':
InchiKey2Molecule(ids, args, fout)
elif args.op == 'tid2Targetcomponents':
TID2Targetcomponents(ids, args, fout)
elif args.op == 'did2Documents':
DID2Documents(ids, args, fout)
```
#### File: BioClients/chembl/Utils.py
```python
import sys,os,re,json,time,urllib.parse,logging,tqdm
import pandas as pd
#
from ..util import rest
#
NCHUNK=100
#
API_HOST="www.ebi.ac.uk"
API_BASE_PATH="/chembl/api/data"
BASE_URL="https://"+API_HOST+API_BASE_PATH
#
##############################################################################
def Status(base_url=BASE_URL, fout=None):
rval = rest.Utils.GetURL(f"{base_url}/status.json", parse_json=True)
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
df = pd.DataFrame({tag:(rval[tag] if tag in rval else "") for tag in rval.keys()})
if fout is not None: df.to_csv(fout, "\t", index=False)
else: return df
#############################################################################
def GetTargetByUniprot(ids, base_url=BASE_URL, fout=None):
n_out=0;
ids_chembl = set()
fout.write("UniprotId\ttarget_chembl_id\n")
for uniprot in ids:
id_chembl=None
rval = rest.Utils.GetURL(f"{base_url}/target.json?target_components__accession={uniprot}", parse_json=True)
targets = rval["targets"] if "targets" in rval else []
for target in targets:
id_chembl = target["target_chembl_id"]
ids_chembl.add(id_chembl)
fout.write(f"{uniprot}\t\{id_chembl}\n")
n_out+=1
if len(ids_chembl)>1:
logging.info(f"Uniprot ambiguous: {uniprot}")
for id_chembl in list(ids_chembl):
logging.debug(f"Uniprot: {uniprot} -> ChEMBL: {id_chembl}")
logging.info(f"n_out: {n_out}")
#############################################################################
def GetActivity(ids, resource, pmin, skip=0, nmax=None, api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
'''Get activity data and necessary references only, due to size concerns. resource = assay|target|molecule. Filter on pChEMBL value, standardized negative log molar half-max response activity.'''
n_act=0; n_out=0; n_pval=0; n_pval_ok=0; tags=None; tq=None;
for i,id_this in enumerate(ids):
if i<skip: continue
if not tq: tq = tqdm.tqdm(total=len(ids)-skip, unit=resource+"s")
tq.update()
url_next = (f"{api_base_path}/activity.json?{resource}_chembl_id={id_this}&limit={NCHUNK}")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if rval is None: break
acts = rval["activities"] if "activities" in rval else []
for act in acts:
logging.debug(json.dumps(act, sort_keys=True, indent=2))
n_act+=1
if not tags:
tags = list(act.keys())
for tag in tags[:]:
if type(act[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(act[tag])}): "{tag}"')
fout.write('\t'.join(tags)+'\n')
vals = [(str(act[tag]) if tag in act else '') for tag in tags]
if pmin is not None:
try:
pval = float(act["pchembl_value"])
n_pval+=1
if pval >= pmin:
n_pval_ok+=1
fout.write('\t'.join(vals)+'\n')
n_out+=1
logging.debug(f"[{n_act}] pVal ok ({pval:4.1f} >= {pmin:4.1f})")
else:
logging.debug(f"[{n_act}] pVal low ({pval:4.1f} < {pmin:4.1f})")
except:
logging.debug(f"[{n_act}] pVal missing.")
else:
fout.write('\t'.join(vals)+'\n')
n_out+=1
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if nmax and i>=(nmax-skip): break
if tq is not None: tq.close()
logging.info(f"n_qry: {len(ids)}; n_act: {n_act}; n_out: {n_out}")
if pmin is not None:
logging.info(f"n_pval: {n_pval}; n_pval_ok: {n_pval_ok}; pVals missing: {n_act-n_pval}")
#############################################################################
def GetActivityProperties(ids, skip=0, nmax=None, base_url=BASE_URL, fout=None):
n_out=0; tags=None;
for i,id_this in enumerate(ids):
if i<skip: continue
act = rest.Utils.GetURL((f"{base_url}/activity/{id_this}.json"), parse_json=True)
assay_chembl_id = act["assay_chembl_id"] if "assay_chembl_id" in act else ""
molecule_chembl_id = act["molecule_chembl_id"] if "molecule_chembl_id" in act else ""
props = act["activity_properties"] if "activity_properties" in act else []
for prop in props:
if not tags:
tags = list(prop.keys())
fout.write('\t'.join(["activity_id", "assay_chembl_id", "molecule_chembl_id"]+tags)+"\n")
logging.debug(json.dumps(prop, sort_keys=True, indent=2))
vals = [str(prop[tag]) if tag in prop else "" for tag in tags]
fout.write(('\t'.join([id_this, assay_chembl_id, molecule_chembl_id]+vals))+'\n')
n_out+=1
if nmax and i>=(nmax-skip): break
logging.info(f"n_qry: {len(ids)}; n_out: {n_out}")
#############################################################################
def ListTargets(skip, nmax, api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
'''One row per target. Ignore synonyms. If one-component, single protein, include UniProt accession.'''
n_tgt=0; n_cmt=0; n_out=0; tags=None; tq=None;
url_next = (f"{api_base_path}/target.json?limit={NCHUNK}&offset={skip}")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
tgts = rval["targets"] if rval and "targets" in rval else []
for tgt in tgts:
logging.debug(json.dumps(tgt, indent=2))
n_tgt+=1
if not tags:
tags = sorted(tgt.keys())
for tag in tags[:]:
if type(tgt[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(tgt[tag])}): "{tag}"')
tags.extend(["component_count", "accession"])
fout.write('\t'.join(tags)+'\n')
vals = [str(tgt[tag]) if tag in tgt else "" for tag in tags]
if "target_components" in tgt and tgt["target_components"]:
cmts = tgt["target_components"]
n_cmt+=len(cmts)
vals.append(f"{len(cmts)}")
vals.append(cmts[0]["accession"] if len(cmts)==1 else "")
else:
logging.debug(f"no-component target: {vals[0]}")
vals.extend(["", ""])
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.update()
if nmax and n_out>=nmax: break
if nmax and n_out>=nmax: break
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="tgts")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_targets: {n_tgt}; n_target_components: {n_cmt}; n_out: {n_out}")
#############################################################################
def GetTarget(ids, base_url=BASE_URL, fout=None):
'''One row per target. Ignore synonyms. If one-component, single protein, include UniProt accession.'''
n_tgt=0; n_cmt=0; n_out=0; tags=None; tq=None;
for id_this in ids:
tgt = rest.Utils.GetURL(f"{base_url}/target/{id_this}.json", parse_json=True)
if not tgt:
logging.error(f'Not found: "{id_this}"')
continue
n_tgt+=1
if not tq: tq = tqdm.tqdm(total=len(ids), unit="tgts")
tq.update()
if n_tgt==1 or not tags:
tags = sorted(tgt.keys())
for tag in tags[:]:
if type(tgt[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(tgt[tag])}): "{tag}"')
tags.extend(["component_count", "accession"])
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(tgt,sort_keys=True,indent=2))
vals = [str(tgt[tag]) if tag in tgt else "" for tag in tags]
if "target_components" in tgt and tgt["target_components"]:
cmts = tgt["target_components"]
n_cmt+=len(cmts)
vals.append(f"{len(cmts)}")
vals.append(str(cmts[0]["accession"]) if len(cmts)==1 else "")
else:
logging.debug(f"no-component target: {vals[0]}")
vals.extend(["", ""])
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.close()
logging.info(f"n_qry: {len(ids)}; n_targets: {n_tgt}; n_target_components: {n_cmt}; n_out: {n_out}")
#############################################################################
def GetTargetComponents(ids, skip=0, nmax=None, base_url=BASE_URL, fout=None):
n_tgt=0; n_out=0; tags=[]; cmt_tags=[]; df=None; tq=None;
for i,id_this in enumerate(ids):
if i<skip: continue
if not tq: tq = tqdm.tqdm(total=len(ids)-skip, unit="tgts")
tq.update()
tgt = rest.Utils.GetURL(f"{base_url}/target/{id_this}.json", parse_json=True)
if not tgt: continue
n_tgt+=1
vals = [str(tgt[tag]) if tag in tgt else "" for tag in tags]
cmts = tgt["target_components"] if "target_components" in tgt and tgt["target_components"] else []
if not cmts: continue
for cmt in cmts:
logging.debug(json.dumps(cmt, indent=2))
if not tags:
for tag in tgt.keys():
if type(tgt[tag]) not in (dict, list, tuple):
tags.append(tag)
for tag in cmt.keys():
if type(cmt[tag]) not in (dict, list, tuple):
cmt_tags.append(tag)
df_this = pd.concat([
pd.DataFrame({tag:[(tgt[tag] if tag in tgt else None)] for tag in tags}),
pd.DataFrame({tag:[(cmt[tag] if tag in cmt else None)] for tag in cmt_tags})],
axis=1)
if fout is None:
df = pd.concat([df, df_this])
else:
df_this.to_csv(fout, "\t", index=False)
n_out+=df_this.shape[0]
if nmax and i>=(nmax-skip): break
if tq is not None: tq.close()
logging.info(f"n_qry: {len(ids)}; n_targets: {n_tgt}; n_out: {n_out}")
if fout is None: return df
#############################################################################
def GetDocument(ids, skip=0, nmax=None, base_url=BASE_URL, fout=None):
n_pmid=0; n_doi=0; n_out=0; tags=None; tq=None;
for i,id_this in enumerate(ids):
if i<skip: continue
if not tq: tq = tqdm.tqdm(total=len(ids)-skip, unit="docs")
tq.update()
doc = rest.Utils.GetURL(f"{base_url}/document/{id_this}.json", parse_json=True)
if not doc:
logging.error(f'Not found: "{id_this}"')
continue
if not tags:
tags = list(doc.keys())
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(doc, sort_keys=True, indent=2))
if "pubmed_id" in tags and doc["pubmed_id"]: n_pmid+=1
if "doi" in tags and doc["doi"]: n_doi+=1
vals = [str(doc[tag]) if tag in doc else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.close()
logging.info(f"n_qry: {len(ids)}; n_pmid: {n_pmid}; n_doi: {n_doi}; n_out: {n_out}")
#############################################################################
def ListSources(api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_out=0; tags=None;
url_next = (api_base_path+"/source.json")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
sources = rval["sources"] if "sources" in rval else []
for source in sources:
if not tags:
tags = list(source.keys())
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(source, sort_keys=True, indent=2))
vals = [str(source[tag]) if tag in source else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
logging.info(f"n_out: {n_out}")
#############################################################################
def ListCellLines(api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_clo=0; n_efo=0; n_out=0; tags=None;
url_next = (api_base_path+"/cell_line.json")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
cells = rval["cell_lines"] if "cell_lines" in rval else []
for cell in cells:
if not tags:
tags = list(cell.keys())
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(cell, sort_keys=True, indent=2))
if "clo_id" in cell and cell["clo_id"]: n_clo+=1
if "efo_id" in cell and cell["efo_id"]: n_efo+=1
vals = [str(cell[tag]) if tag in cell else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
logging.info(f"n_out: {n_out}; n_clo: {n_clo}; n_efo: {n_efo}")
#############################################################################
def ListOrganisms(api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_out=0; tags=None;
url_next = (api_base_path+"/organism.json")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
orgs = rval["organisms"] if "organisms" in rval else []
for org in orgs:
if not tags:
tags = list(org.keys())
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(org, sort_keys=True, indent=2))
vals = [str(org[tag]) if tag in org else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
logging.info(f"n_out: {n_out}")
#############################################################################
def ListProteinClasses(api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_out=0; tags=None;
url_next = (api_base_path+"/protein_class.json")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
pcls = rval["protein_classes"] if "protein_classes" in rval else []
for pcl in pcls:
if not tags:
tags = list(pcl.keys())
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(pcl, sort_keys=True, indent=2))
vals = [str(pcl[tag]) if tag in pcl else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
logging.info(f"n_out: {n_out}")
#############################################################################
def ListDrugIndications(skip, nmax, api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_efo=0; n_out=0; tags=None; tq=None;
url_next = (f"{api_base_path}/drug_indication.json?limit={NCHUNK}&offset={skip}")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
dins = rval["drug_indications"] if "drug_indications" in rval else []
for din in dins:
if not tags:
tags = list(din.keys())
for tag in tags[:]:
if type(din[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(din[tag])}): "{tag}"')
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(din, sort_keys=True, indent=2))
if "efo_id" in din and din["efo_id"]: n_efo+=1
vals = [str(din[tag]) if tag in din else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.update()
if nmax and n_out>=nmax: break
if nmax and n_out>=nmax: break
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="inds")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_out: {n_out}; n_efo: {n_efo}")
#############################################################################
def ListTissues(api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_bto=0; n_efo=0; n_caloha=0; n_uberon=0; n_out=0; tags=None; tq=None;
url_next = (api_base_path+"/tissue.json")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
tissues = rval["tissues"] if "tissues" in rval else []
for tissue in tissues:
if not tags:
tags = list(tissue.keys())
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(tissue, sort_keys=True, indent=2))
if "bto_id" in tissue and tissue["bto_id"]: n_bto+=1
if "efo_id" in tissue and tissue["efo_id"]: n_efo+=1
if "uberon_id" in tissue and tissue["uberon_id"]: n_uberon+=1
if "caloha_id" in tissue and tissue["caloha_id"]: n_caloha+=1
vals = [str(tissue[tag]) if tag in tissue else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.update()
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="tissues")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_out: {n_out}; n_bto: {n_bto}; n_efo: {n_efo}; n_caloha: {n_caloha}; n_uberon: {n_uberon}")
#############################################################################
def ListMechanisms(api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_out=0; tags=None; tq=None;
url_next = (api_base_path+"/mechanism.json")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
mechs = rval["mechanisms"] if "mechanisms" in rval else []
for mech in mechs:
if not tags:
tags = list(mech.keys())
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(mech, sort_keys=True, indent=2))
vals = [str(mech[tag]) if tag in mech else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.update()
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="mechs")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_out: {n_out}")
#############################################################################
def ListDocuments(skip, nmax, api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_pmid=0; n_doi=0; n_out=0; n_err=0; tags=None; tq=None;
url_next = (f"{api_base_path}/document.json?limit={NCHUNK}&offset={skip}")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
docs = rval["documents"] if "documents" in rval else []
for doc in docs:
if not tags:
tags = list(doc.keys())
if "abstract" in tags: tags.remove("abstract") #unnecessary, verbose
fout.write('\t'.join(tags)+'\n')
logging.debug(json.dumps(doc, sort_keys=True, indent=2))
if "pubmed_id" in tags and doc["pubmed_id"]: n_pmid+=1
if "doi" in tags and doc["doi"]: n_doi+=1
vals = [str(doc[tag]) if tag in doc else "" for tag in tags]
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.update()
if nmax and n_out>=nmax: break
if nmax and n_out>=nmax: break
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="docs")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_out: {n_out}; n_pmid: {n_pmid}; n_doi: {n_doi}")
#############################################################################
def GetAssay(ids, base_url=BASE_URL, fout=None):
n_out=0; tags=None;
for id_this in ids:
assay = rest.Utils.GetURL(f"{base_url}/assay/{id_this}.json", parse_json=True)
if not assay:
continue
if not tags:
tags = list(assay.keys())
for tag in tags[:]:
if type(assay[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(assay[tag])}): "{tag}"')
fout.write('\t'.join(tags)+'\n')
vals = [(str(assay[tag]) if tag in assay else "") for tag in tags]
fout.write('\t'.join(vals)+'\n')
n_out+=1
logging.info(f"n_in: {len(ids)}; n_out: {n_out}")
#############################################################################
def ListAssays(skip, nmax, api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_ass=0; n_out=0; tags=None; tq=None;
url_next = (f"{api_base_path}/assay.json?offset={skip}&limit={NCHUNK}")
t0 = time.time()
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
assays = rval["assays"] if "assays" in rval else []
for assay in assays:
n_ass+=1
if not tags:
tags = list(assay.keys())
for tag in tags[:]:
if type(assay[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(assay[tag])}): "{tag}"')
fout.write('\t'.join(tags)+'\n')
vals = [(str(assay[tag]).replace('\t', " ") if tag in assay else "") for tag in tags]
fout.write('\t'.join(vals)+'\n')
n_out+=1
if tq is not None: tq.update()
if nmax and n_out>=nmax: break
if nmax and n_out>=nmax: break
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="assays")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_out: {n_out}")
logging.info(f"""Elapsed time: {time.strftime('%Hh:%Mm:%Ss', time.gmtime(time.time()-t0))}""")
#############################################################################
def SearchAssays(asrc, atype, skip, nmax, api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
'''Select assays based on source and optionally type.'''
n_ass=0; n_out=0; tags=None; tq=None;
url_next = (f"{api_base_path}/assay.json?offset={skip}&limit={NCHUNK}")
if asrc: url_next+=(f"&src_id={asrc}")
if atype: url_next+=(f"&assay_type={atype}")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
assays = rval["assays"] if "assays" in rval else []
for assay in assays:
n_ass+=1
if not tags:
tags = list(assay.keys())
for tag in tags[:]:
if type(assay[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(assay[tag])}): "{tag}"')
fout.write('\t'.join(tags)+'\n')
vals = [(str(assay[tag]) if tag in assay else "") for tag in tags]
fout.write('\t'.join(vals)+'\n')
n_out+=1
if tq is not None: tq.update()
if nmax and n_out>=nmax: break
if nmax and n_out>=nmax: break
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="assays")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_assay: {n_ass}; n_out: {n_out}")
##############################################################################
def GetMolecule(ids, base_url=BASE_URL, fout=None):
'''Ignore molecule_synonyms.'''
n_out=0; tags=None; struct_tags=None; prop_tags=None; tq=None;
for id_this in ids:
mol = rest.Utils.GetURL(f"{base_url}/molecule/{id_this}.json", parse_json=True)
if not mol: continue
if not tq: tq = tqdm.tqdm(total=len(ids), unit="mols")
tq.update()
if not tags:
tags = sorted(list(mol.keys()))
for tag in tags[:]:
if type(mol[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(mol[tag])}): "{tag}"')
struct_tags = sorted(mol["molecule_structures"].keys())
struct_tags.remove("molfile")
prop_tags = sorted(mol["molecule_properties"].keys())
fout.write('\t'.join(tags+struct_tags+prop_tags+["parent_chembl_id"])+'\n')
logging.debug(json.dumps(mol, sort_keys=True, indent=2))
vals = [(mol["molecule_hierarchy"]["parent_chembl_id"] if "molecule_hierarchy" in mol and "parent_chembl_id" in mol["molecule_hierarchy"] else "")]
vals.extend([(str(mol[tag]) if tag in mol else "") for tag in tags])
vals.extend([(str(mol["molecule_structures"][tag]) if "molecule_structures" in mol and tag in mol["molecule_structures"] else "") for tag in struct_tags])
vals.extend([(str(mol["molecule_properties"][tag]) if "molecule_properties" in mol and tag in mol["molecule_properties"] else "") for tag in prop_tags])
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.close()
logging.info(f"n_in: {len(ids)}; n_out: {n_out}")
#############################################################################
def ListMolecules(dev_phase, skip, nmax, api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
'''Ignore synonyms here.'''
n_mol=0; n_out=0; n_err=0; tags=None; struct_tags=None; prop_tags=None; tq=None;
url_next=(f"{api_base_path}/molecule.json?limit={NCHUNK}")
if skip: url_next+=(f"&offset={skip}")
if dev_phase: url_next+=(f"&max_phase={dev_phase}")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
mols = rval["molecules"] if "molecules" in rval else []
for mol in mols:
n_mol+=1
logging.debug(json.dumps(mol, sort_keys=True, indent=2))
if not tags:
tags = sorted(mol.keys())
for tag in tags[:]:
if type(mol[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(mol[tag])}): "{tag}"')
struct_tags = sorted(mol["molecule_structures"].keys())
struct_tags.remove("molfile")
prop_tags = sorted(mol["molecule_properties"].keys())
fout.write('\t'.join(tags+struct_tags+prop_tags+["parent_chembl_id"])+'\n')
vals = [(mol["molecule_hierarchy"]["parent_chembl_id"] if "molecule_hierarchy" in mol and mol["molecule_hierarchy"] and "parent_chembl_id" in mol["molecule_hierarchy"] else "")]
vals.extend([(str(mol[tag]) if tag in mol else "") for tag in tags])
vals.extend([(str(mol["molecule_structures"][tag]) if "molecule_structures" in mol and mol["molecule_structures"] and tag in mol["molecule_structures"] else "") for tag in struct_tags])
vals.extend([(str(mol["molecule_properties"][tag]) if "molecule_properties" in mol and mol["molecule_properties"] and tag in mol["molecule_properties"] else "") for tag in prop_tags])
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if nmax and n_mol>=nmax: break
if nmax and n_mol>=nmax: break
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="mols")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_out: {n_out}")
#############################################################################
def ListDrugs(skip, nmax, api_host=API_HOST, api_base_path=API_BASE_PATH, fout=None):
n_mol=0; n_out=0; n_err=0; tags=None; struct_tags=None; prop_tags=None; tq=None;
url_next = (f"{api_base_path}/drug.json?limit={NCHUNK}&offset={skip}")
while True:
rval = rest.Utils.GetURL("https://"+api_host+url_next, parse_json=True)
if not rval: break
mols = rval["drugs"] if "drugs" in rval else []
for mol in mols:
n_mol+=1
logging.debug(json.dumps(mol, sort_keys=True, indent=2))
if not tags:
tags = sorted(mol.keys())
for tag in tags[:]:
if type(mol[tag]) in (dict, list, tuple):
tags.remove(tag)
logging.debug(f'Ignoring field ({type(mol[tag])}): "{tag}"')
struct_tags = sorted(mol["molecule_structures"].keys())
struct_tags.remove("molfile")
prop_tags = sorted(mol["molecule_properties"].keys())
fout.write('\t'.join(tags+struct_tags+prop_tags+["parent_chembl_id"])+'\n')
vals = [(mol["molecule_hierarchy"]["parent_chembl_id"] if "molecule_hierarchy" in mol and mol["molecule_hierarchy"] and "parent_chembl_id" in mol["molecule_hierarchy"] else "")]
vals.extend([(str(mol[tag]) if tag in mol else "") for tag in tags])
vals.extend([(str(mol["molecule_structures"][tag]) if "molecule_structures" in mol and mol["molecule_structures"] and tag in mol["molecule_structures"] else "") for tag in struct_tags])
vals.extend([(str(mol["molecule_properties"][tag]) if "molecule_properties" in mol and mol["molecule_properties"] and tag in mol["molecule_properties"] else "") for tag in prop_tags])
fout.write(('\t'.join(vals))+'\n')
n_out+=1
if tq is not None: tq.update()
if nmax and n_mol>=nmax: break
if nmax and n_mol>=nmax: break
total_count = rval["page_meta"]["total_count"] if "page_meta" in rval and "total_count" in rval["page_meta"] else None
if not tq: tq = tqdm.tqdm(total=total_count, unit="drugs")
url_next = rval["page_meta"]["next"] if "page_meta" in rval and "next" in rval["page_meta"] else None
if not url_next: break
if tq is not None: tq.close()
logging.info(f"n_out: {n_out}")
##############################################################################
def SearchMoleculeByName(ids, base_url=BASE_URL, fout=None):
"""IDs should be names/synonyms."""
n_out=0; n_notfound=0; synonym_tags=None;
tags = ["molecule_chembl_id"]
for id_this in ids:
rval = rest.Utils.GetURL(f"{base_url}/molecule/search?q={urllib.parse.quote(id_this)}", headers={"Accept":"application/json"}, parse_json=True)
if not rval:
logging.info(f'Not found: "{id_this}"')
n_notfound+=1
continue
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
mols = rval["molecules"] if "molecules" in rval else []
for mol in mols:
logging.debug(json.dumps(mol, sort_keys=True, indent=2))
synonyms = mol["molecule_synonyms"] if "molecule_synonyms" in mol else []
for synonym in synonyms:
if not synonym_tags:
synonym_tags = list(synonym.keys())
fout.write('\t'.join(tags+synonym_tags)+'\n')
molecule_synonym = synonym["molecule_synonym"] if "molecule_synonym" in synonym else ""
if not re.search(id_this, molecule_synonym, re.I):
continue
vals = [(mol["molecule_chembl_id"] if "molecule_chembl_id" in mol else "")]
vals.extend([(str(synonym[tag]) if tag in synonym else "") for tag in synonym_tags])
fout.write(('\t'.join(vals))+'\n')
n_out+=1
logging.info(f"n_in: {len(ids)}; n_found: {len(ids)-n_notfound}; n_out: {n_out}")
#############################################################################
def GetMoleculeByInchikey(ids, base_url=BASE_URL, fout=None):
"""Requires InChI key, e.g. "GHBOEFUAGSHXPO-XZOTUCIWSA-N"."""
n_out=0; tags=[]; struct_tags=[]; df=None; tq=None;
for id_this in ids:
if not tq: tq = tqdm.tqdm(total=len(ids), unit="mols")
tq.update()
mol = rest.Utils.GetURL(f"{base_url}/molecule/{id_this}.json", parse_json=True)
if not mol:
continue
struct = mol["molecule_structures"] if "molecule_structures" in mol else None
if not struct: continue
if not tags:
for tag in mol.keys():
if type(mol[tag]) not in (list,dict): tags.append(tag)
for tag in struct.keys():
if type(struct[tag]) not in (list,dict): struct_tags.append(tag)
struct_tags.remove("molfile")
df_this = pd.concat([
pd.DataFrame({tag:[(mol[tag] if tag in mol else None)] for tag in tags}),
pd.DataFrame({tag:[(struct[tag] if tag in struct else None)] for tag in struct_tags})],
axis=1)
if fout is None:
df = pd.concat([df, df_this])
else:
df_this.to_csv(fout, "\t", index=False)
n_out+=1
if tq is not None: tq.close()
logging.info(f"n_qry: {len(ids)}; n_out: {n_out}; n_not_found: {len(ids)-n_out}")
if fout is None: return df
#############################################################################
```
#### File: BioClients/drugcentral/Test.py
```python
import os,sys,unittest
from .. import drugcentral
class TestAPI(unittest.TestCase):
def __init__(self, methodName=""):
super().__init__(methodName)
self.params = drugcentral.ReadParamFile(os.environ['HOME']+"/.drugcentral.yaml")
self.dbcon = drugcentral.Utils.Connect(self.params['DBHOST'], self.params['DBPORT'], self.params['DBNAME'], self.params['DBUSR'], self.params['DBPW'])
def test_Version(self):
self.assertTrue(type(drugcentral.Version(self.dbcon)) is not None)
def test_Version_02(self):
self.assertEqual(drugcentral.Version(self.dbcon).shape[0], 1)
def test_Describe(self):
self.assertTrue(drugcentral.Describe(self.dbcon).shape[0]>10)
def test_Counts(self):
self.assertTrue(drugcentral.Counts(self.dbcon).shape[0]>10)
def test_ListStructures(self):
df = drugcentral.ListStructures(self.dbcon)
self.assertTrue(df.shape[0]>4000)
def test_ListStructures2Smiles(self):
df = drugcentral.ListStructures2Smiles(self.dbcon)
self.assertTrue(df.shape[0]>4000)
def test_ListProducts(self):
df = drugcentral.ListProducts(self.dbcon)
self.assertTrue(df.shape[0]>4000)
def test_ListActiveIngredients(self):
df = drugcentral.ListActiveIngredients(self.dbcon)
self.assertTrue(df.shape[0]>4000)
def test_ListIndications(self):
self.assertTrue(drugcentral.ListIndications(self.dbcon).shape[0]>100)
def test_SearchIndications(self):
self.assertTrue(drugcentral.SearchIndications(self.dbcon, "Alzheimer").shape[0]>0)
def test_GetStructure(self):
self.assertTrue(drugcentral.GetStructure(self.dbcon, ["1725"]).shape[0]==1)
def test_GetStructureBySynonym(self):
self.assertTrue(drugcentral.GetStructureBySynonym(self.dbcon, ["zantac"]).shape[0]==1)
def test_GetStructureIds(self):
self.assertTrue(drugcentral.GetStructureIds(self.dbcon, ["1725"]).shape[0]>5)
def test_GetStructureProducts(self):
self.assertTrue(drugcentral.GetStructureProducts(self.dbcon, ["1725"]).shape[0]>10)
#############################################################################
if __name__ == '__main__':
unittest.main(verbosity=2)
```
#### File: ensembl/biomart/Utils.py
```python
import sys,os,re,time,json,logging,tqdm,io
import pandas as pd
import requests, urllib.parse
#
API_HOST='www.ensembl.org'
API_BASE_PATH='/biomart/martservice'
#
BASE_URL='http://'+API_HOST+API_BASE_PATH
#
ENSG2NCBI_xml="""\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE Query>
<Query virtualSchemaName = "default" formatter = "TSV" header = "1" uniqueRows = "0" count = "" datasetConfigVersion = "0.6" >
<Dataset name = "hsapiens_gene_ensembl" interface = "default" >
<Attribute name = "ensembl_gene_id" />
<Attribute name = "ensembl_gene_id_version" />
<Attribute name = "entrezgene_id" />
</Dataset>
</Query>
"""
#
ENSG2HGNC_xml="""\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE Query>
<Query virtualSchemaName = "default" formatter = "TSV" header = "1" uniqueRows = "0" count = "" datasetConfigVersion = "0.6" >
<Dataset name = "hsapiens_gene_ensembl" interface = "default" >
<Attribute name = "ensembl_gene_id" />
<Attribute name = "ensembl_gene_id_version" />
<Attribute name = "hgnc_id" />
<Attribute name = "hgnc_symbol" />
</Dataset>
</Query>
"""
#
ENSG2NCBIHGNC_xml="""\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE Query>
<Query virtualSchemaName = "default" formatter = "TSV" header = "1" uniqueRows = "0" count = "" datasetConfigVersion = "0.6" >
<Dataset name = "hsapiens_gene_ensembl" interface = "default" >
<Attribute name = "ensembl_gene_id" />
<Attribute name = "ensembl_gene_id_version" />
<Attribute name = "entrezgene_id" />
<Attribute name = "hgnc_id" />
<Attribute name = "hgnc_symbol" />
</Dataset>
</Query>
"""
#
##############################################################################
def XMLQuery(xmltxt, base_url=BASE_URL, fout=None):
url_this = base_url+f"?query={urllib.parse.quote(xmltxt)}"
logging.debug(url_this)
rval = requests.get(url_this)
if not rval.ok:
logging.error(f"{rval.status_code}")
logging.debug(rval.text)
return None
df = pd.read_table(io.StringIO(rval.text), "\t")
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"Output rows: {df.shape[0]}; cols: {df.shape[1]} ({str(df.columns.tolist())})")
return df
##############################################################################
def ENSG2NCBI(base_url=BASE_URL, fout=None):
return XMLQuery(ENSG2NCBI_xml, base_url, fout)
##############################################################################
def ENSG2HGNC(base_url=BASE_URL, fout=None):
return XMLQuery(ENSG2HGNC_xml, base_url, fout)
##############################################################################
def ENSG2NCBIHGNC(base_url=BASE_URL, fout=None):
return XMLQuery(ENSG2NCBIHGNC_xml, base_url, fout)
##############################################################################
```
#### File: fda/aer/Client.py
```python
import sys,os,re,json,argparse,time,yaml,logging
from ... import fda
#
#############################################################################
def ReadParamFile(fparam):
params={};
with open(fparam, 'r') as fh:
for param in yaml.load_all(fh, Loader=yaml.BaseLoader):
for k,v in param.items():
params[k] = v
return params
##############################################################################
if __name__=='__main__':
NMAX=100;
epilog='''\
Example UNII: 786Z46389E
'''
parser = argparse.ArgumentParser(description='OpenFDA Adverse Event Reports client', epilog=epilog)
ops = ['search', 'counts', 'info', 'showfields']
parser.add_argument("op", choices=ops, help='operation')
parser.add_argument("--o", dest="ofile", help="output (TSV)")
parser.add_argument("--drug_class", help="EPC pharmacologic class")
parser.add_argument("--drug_ind", help="drug indication")
parser.add_argument("--drug_unii", help="drug ID UNII")
parser.add_argument("--drug_ndc", help="drug ID NDC")
parser.add_argument("--drug_spl", help="drug ID SPL")
parser.add_argument("--serious", type=bool, help="serious adverse events")
parser.add_argument("--fatal", type=bool, help="fatal adverse events (seriousnessdeath)")
parser.add_argument("--tfrom", help="time-from (received by FDA) (YYYYMMDD)")
parser.add_argument("--tto", default=time.strftime('%Y%m%d',time.localtime()), help="time-to (received by FDA) (YYYYMMDD)")
parser.add_argument("--rawquery")
parser.add_argument("--nmax", type=int, default=NMAX, help="max returned records")
parser.add_argument("--api_host", default=fda.aer.API_HOST)
parser.add_argument("--api_base_path", default=fda.aer.API_BASE_PATH)
parser.add_argument("--param_file", default=os.environ['HOME']+"/.fda.yaml")
parser.add_argument("--api_key")
parser.add_argument("-v", "--verbose", default=0, action="count")
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s:%(message)s', level=(logging.DEBUG if args.verbose>1 else logging.INFO))
api_base_url = 'https://'+args.api_host+args.api_base_path
fout = open(args.ofile, "w+") if args.ofile else sys.stdout
params = ReadParamFile(args.param_file)
if args.api_key: params['API_KEY'] = args.api_key
if not params['API_KEY']:
parser.error('Please specify valid API_KEY via --api_key or --param_file')
t0=time.time()
if args.op == "search":
fda.aer.Utils.Search(args.drug_class, args.drug_ind, args.drug_unii, args.drug_ndc, args.drug_spl, args.tfrom, args.tto, args.serious, args.fatal, args.rawquery, args.nmax, params['API_KEY'], api_base_url, fout)
elif args.op == "info":
rval = fda.aer.Utils.Info(api_base_url)
for field in rval.keys():
if field=='results': continue
print(f"{field:16s}: {rval[field]}")
elif args.op == "counts":
print(fda.aer.Utils.GetCounts(args.tfrom, args.tto, api_base_url))
elif args.op == "showfields":
fields = fda.aer.Utils.GetFields(api_base_url)
for field in fields:
print(f"\t{field}")
else:
parser.error(f"Invalid operation: {args.op}")
logging.info(f"Elapsed time: {time.strftime('%Hh:%Mm:%Ss',time.gmtime(time.time()-t0))}")
```
#### File: BioClients/geneontology/Utils.py
```python
import os,sys,re,json,time,logging
import urllib.parse
import pandas as pd
from ..util import rest
API_HOST='api.geneontology.org'
API_BASE_PATH='/api'
BASE_URL = 'https://'+API_HOST+API_BASE_PATH
#
##############################################################################
def GetEntities(ids, base_url=BASE_URL, fout=None):
"""For only one type of entity per call (gene, term)."""
tags=[]; df=pd.DataFrame();
for id_this in ids:
ent = rest.Utils.GetURL(base_url+'/bioentity/'+urllib.parse.quote(id_this), parse_json=True)
logging.debug(json.dumps(ent, sort_keys=True, indent=2))
if not tags: tags = ent.keys()
df = pd.concat([df, pd.DataFrame({tags[j]:[ent[tags[j]]] for j in range(len(tags))})])
logging.info('n_ent: {}'.format(df.shape[0]))
if fout: df.to_csv(fout, "\t", index=False)
return df
##############################################################################
def GetGeneTerms(ids, base_url=BASE_URL, fout=None):
tags=[]; df=pd.DataFrame();
for id_this in ids:
rval = rest.Utils.GetURL(base_url+'/bioentity/gene/{}/function'.format(urllib.parse.quote(id_this)), parse_json=True)
assns = rval['associations'] if 'associations' in rval else []
for assn in assns:
logging.debug(json.dumps(assn, sort_keys=True, indent=2))
if not tags: tags = assn.keys()
df = pd.concat([df, pd.DataFrame({tags[j]:[assn[tags[j]]] for j in range(len(tags))})])
logging.info('n_gene: {}; n_assn: {}'.format(len(ids), df.shape[0]))
if fout: df.to_csv(fout, "\t", index=False)
return df
##############################################################################
```
#### File: BioClients/glygen/Utils.py
```python
import sys,os,re,json,collections,time,urllib.parse,logging,tqdm
import pandas as pd
import requests
#
NCHUNK=100
#
API_HOST="api.glygen.org"
API_BASE_PATH="/glycan"
BASE_URL="https://"+API_HOST+API_BASE_PATH
#
##############################################################################
def Get(ids, base_url=BASE_URL, fout=None):
n_out=0; tags=None; df=None;
for id_this in ids:
response = requests.get(f"{base_url}/detail/{id_this}", headers={"Content-Type":"application/json"}).json()
logging.debug(json.dumps(response, indent=2))
entity = response
if not tags: tags = [tag for tag in entity.keys() if type(entity[tag]) not in (list, dict, collections.OrderedDict)]
df_this = pd.DataFrame({tags[j]:[entity[tags[j]]] for j in range(len(tags))})
if fout is None: df = pd.concat([df, df_this])
else: df_this.to_csv(fout, "\t", index=False, header=bool(n_out==0))
n_out += df_this.shape[0]
logging.info(f"n_out: {n_out}")
if fout is None: return df
##############################################################################
```
#### File: BioClients/gwascatalog/Utils.py
```python
import sys,os,re,json,time,logging,tqdm
import urllib.parse
import pandas as pd
#
from ..util import rest
#
API_HOST='www.ebi.ac.uk'
API_BASE_PATH='/gwas/rest/api'
BASE_URL='https://'+API_HOST+API_BASE_PATH
#
NCHUNK=100;
#
##############################################################################
def ListStudies(base_url=BASE_URL, fout=None):
"""Only simple metadata."""
tags=[]; n_study=0; rval=None; df=None; tq=None;
url_this = base_url+f'/studies?size={NCHUNK}'
while True:
if rval:
if 'next' not in rval['_links']: break
elif url_this == rval['_links']['last']['href']: break
else: url_this = rval['_links']['next']['href']
logging.debug(url_this)
rval = rest.Utils.GetURL(url_this, parse_json=True)
if not rval or '_embedded' not in rval or 'studies' not in rval['_embedded']: break
studies = rval['_embedded']['studies']
if not studies: break
if tq is None: tq = tqdm.tqdm(total=rval["page"]["totalElements"], unit="studies")
for study in studies:
tq.update()
if not tags:
for tag in study.keys():
if type(study[tag]) not in (list, dict) or tag=="diseaseTrait":
tags.append(tag) #Only simple metadata.
df_this = pd.DataFrame({tags[j]:([str(study[tags[j]])] if tags[j] in study else ['']) for j in range(len(tags))})
if fout: df_this.to_csv(fout, "\t", index=False, header=(n_study==0), mode=('w' if n_study==0 else 'a'))
if fout is None: df = pd.concat([df, df_this])
n_study+=1
logging.info(f"n_study: {n_study}")
if fout is None: return(df)
##############################################################################
def GetStudyAssociations(ids, skip=0, nmax=None, base_url=BASE_URL, fout=None):
"""
Mapped genes via SNP links.
arg = authorReportedGene
sra = strongestRiskAllele
https://www.ebi.ac.uk/gwas/rest/api/studies/GCST001430/associations?projection=associationByStudy
"""
n_id=0; n_assn=0; n_loci=0; n_arg=0; n_sra=0; n_snp=0; df=None; tq=None;
quiet = bool(logging.getLogger().getEffectiveLevel()>15)
gcsts=set([]); tags_assn=[]; tags_study=[]; tags_locus=[]; tags_sra=[]; tags_arg=[];
url = base_url+'/studies'
if skip>0: logging.info(f"SKIP IDs skipped: {skip}")
for id_this in ids[skip:]:
if not quiet and tq is None: tq = tqdm.tqdm(total=len(ids)-skip, unit="studies")
if tq is not None: tq.update()
url_this = url+f'/{id_this}/associations?projection=associationByStudy'
rval = rest.Utils.GetURL(url_this, parse_json=True)
if not rval: continue
if '_embedded' in rval and 'associations' in rval['_embedded']:
assns = rval['_embedded']['associations']
else:
logging.error(f'No associations for study: {id_this}')
continue
df_this=None;
for assn in assns:
n_assn+=1
if n_assn==1:
for key,val in assn.items():
if type(val) not in (list, dict):
tags_assn.append(key)
for key in assn['study'].keys():
if type(assn['study'][key]) not in (list, dict):
tags_study.append(key)
for key in assn['loci'][0].keys():
if key not in ('strongestRiskAlleles', 'authorReportedGenes'):
tags_locus.append(key)
for key in assn['loci'][0]['strongestRiskAlleles'][0].keys():
if key != '_links':
tags_sra.append(key)
df_assn = pd.DataFrame({tags_assn[j]:[assn[tags_assn[j]]] for j in range(len(tags_assn))})
df_study = pd.DataFrame({tags_study[j]:[assn['study'][tags_study[j]]] for j in range(len(tags_study))})
df_locus = pd.DataFrame({tags_locus[j]:[assn['loci'][0][tags_locus[j]]] for j in range(len(tags_locus))})
df_locus.columns = ['locus_'+s for s in df_locus.columns]
df_sra = pd.DataFrame({tags_sra[j]:[assn['loci'][0]['strongestRiskAlleles'][0][tags_sra[j]]] for j in range(len(tags_sra))})
df_sra.columns = ['allele_'+s for s in df_sra.columns]
df_assn = pd.concat([df_assn, df_study, df_locus, df_sra], axis=1)
df_this = pd.concat([df_this, df_assn], axis=0)
if 'accessionId' in assn['study']: gcsts.add(assn['study']['accessionId'])
n_loci += len(assn['loci'])
for locus in assn['loci']:
n_sra += len(locus['strongestRiskAlleles'])
for sra in locus['strongestRiskAlleles']:
snp_href = sra['_links']['snp']['href'] if '_links' in sra and 'snp' in sra['_links'] and 'href' in sra['_links']['snp'] else ''
if snp_href: n_snp+=1
if fout: df_this.to_csv(fout, "\t", index=False, header=(n_id==0), mode=('w' if n_id==0 else 'a'))
if fout is None: df = pd.concat([df, df_this], axis=0)
n_id+=1
if n_id==nmax:
logging.info(f"NMAX IDs reached: {nmax}")
break
if tq is not None: tq.close()
n_gcst = len(gcsts)
logging.info(f"INPUT RCSTs: {n_id}; OUTPUT RCSTs: {n_gcst} ; assns: {n_assn} ; loci: {n_loci} ; alleles: {n_sra} ; snps: {n_snp}")
if fout is None: return(df)
##############################################################################
def GetSnps(ids, skip=0, nmax=None, base_url=BASE_URL, fout=None):
"""
Input: rs_id, e.g. rs7329174
loc = location
gc = genomicContext
"""
n_snp=0; n_gene=0; n_loc=0; df=None; tq=None;
tags_snp=[]; tags_loc=[]; tags_gc=[]; tags_gcloc=[]; tags_gene=[];
quiet = bool(logging.getLogger().getEffectiveLevel()>15)
url = base_url+'/singleNucleotidePolymorphisms'
if skip>0: logging.info(f"SKIP IDs skipped: {skip}")
for id_this in ids[skip:]:
if not quiet and tq is None: tq = tqdm.tqdm(total=len(ids)-skip, unit="snps")
if tq is not None: tq.update()
url_this = url+'/'+id_this
snp = rest.Utils.GetURL(url_this, parse_json=True)
if not snp: continue
if 'genomicContexts' not in snp: continue
if len(snp['genomicContexts'])==0: continue
df_this=None;
for gc in snp['genomicContexts']:
if not tags_snp:
for key,val in snp.items():
if type(val) not in (list, dict): tags_snp.append(key)
for key in gc.keys():
if key not in ('gene', 'location', '_links'): tags_gc.append(key)
for key in gc['location'].keys():
if key != '_links': tags_gcloc.append(key)
for key in gc['gene'].keys():
if key != '_links': tags_gene.append(key)
df_snp = pd.DataFrame({tags_snp[j]:[snp[tags_snp[j]]] for j in range(len(tags_snp))})
df_gc = pd.DataFrame({tags_gc[j]:[gc[tags_gc[j]]] for j in range(len(tags_gc))})
gcloc = gc['location']
df_gcloc = pd.DataFrame({tags_gcloc[j]:[gcloc[tags_gcloc[j]]] for j in range(len(tags_gcloc))})
gene = gc['gene']
try: gene["ensemblGeneIds"] = (",".join([gid["ensemblGeneId"] for gid in gene["ensemblGeneIds"]]))
except: pass
try: gene["entrezGeneIds"] = (",".join([gid["entrezGeneId"] for gid in gene["entrezGeneIds"]]))
except: pass
df_gene = pd.DataFrame({tags_gene[j]:[gene[tags_gene[j]]] for j in range(len(tags_gene))})
df_snp = pd.concat([df_snp, df_gc, df_gcloc, df_gene], axis=1)
df_this = pd.concat([df_this, df_snp], axis=0)
n_gene+=1
if tq is not None: tq.close()
if fout: df_this.to_csv(fout, "\t", index=False, header=(n_snp==0), mode=('w' if n_snp==0 else 'a'))
if fout is None: df = pd.concat([df, df_this], axis=0)
n_snp+=1
if n_snp==nmax:
logging.info(f"NMAX IDs reached: {nmax}")
break
logging.info(f"SNPs: {n_snp}; genes: {n_gene}")
if fout is None: return(df)
##############################################################################
def SearchStudies(ids, searchtype, base_url=BASE_URL, fout=None):
tags=[]; n_study=0; rval=None; df=None; tq=None;
url = base_url+'/studies/search'
if searchtype=='gcst':
url+='/'
elif searchtype.lower()=='pubmedid':
url+='/findByPublicationIdPubmedId?pubmedId={}'
elif searchtype.lower()=='efotrait':
url+='/findByEfoTrait?efoTrait={}'
elif searchtype.lower()=='efouri':
url+='/findByEfoUri?efoUri={}'
elif searchtype.lower()=='accessionid':
url+='/findByAccessionId?accessionId={}'
else:
logging.error(f'Searchtype not supported: {searchtype}')
return
for id_this in ids:
url_this = url.format(urllib.parse.quote(id_this))
rval = rest.Utils.GetURL(url_this, parse_json=True)
if not rval or '_embedded' not in rval or 'studies' not in rval['_embedded']: continue
studies = rval['_embedded']['studies']
if not studies: continue
for study in studies:
if not tags:
for tag in study.keys():
if type(study[tag]) not in (list, dict) or tag=="diseaseTrait":
tags.append(tag) #Only simple metadata.
n_study+=1
df_this = pd.DataFrame({tags[j]:([str(study[tags[j]])] if tags[j] in study else ['']) for j in range(len(tags))})
if fout is None: df = pd.concat([df, df_this])
else: df_this.to_csv(fout, "\t", index=False)
logging.debug(json.dumps(rval, sort_keys=True, indent=2))
logging.info(f"n_study: {n_study}")
if fout is None: return(df)
##############################################################################
```
#### File: idg/pharos/Utils.py
```python
import sys,os,json,re,time,logging
#
from python_graphql_client import GraphqlClient
#
# curl 'https://pharos-api.ncats.io/graphql' -H 'Accept-Encoding: gzip, deflate, br' -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'Connection: keep-alive' -H 'DNT: 1' -H 'Origin: https://pharos-api.ncats.io' --data-binary '{"query":"query targetDetails {\n target(q: { sym: \"ACE2\" }) {\n name\n tdl\n fam\n sym\n description\n novelty\n }\n}\n"}' --compressed
#
API_ENDPOINT="https://pharos-api.ncats.io/graphql"
#
API_HEADERS = { 'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/json', 'Accept': 'application/json',
'Connection': 'keep-alive', 'DNT': '1',
'Origin': 'https://pharos-api.ncats.io' }
#
IDTYPES_TARGET = {
'tcrdid':{'type':'Int'},
'uniprot':{'type':'String'},
'sym':{'type':'String'},
}
IDTYPES_DISEASE = {
'cui':{'type':'String'},
'doid':{'type':'String'},
'name':{'type':'String'},
}
#
#############################################################################
def GetTargets(ids, idtype, ep=API_ENDPOINT, fout=None):
tags=[]; n_out=0;
client = GraphqlClient(endpoint=ep, verify=True)
query = f"""\
query targetDetails($id: {IDTYPES_TARGET[idtype]['type']}) {{
target(q: {{ {idtype}: $id }}) {{
name
tdl
fam
sym
tcrdid
uniprot
description
novelty
}}
}}
"""
for id_this in ids:
logging.debug(f"id_this: {id_this}")
variables = { "id": int(id_this) if IDTYPES_TARGET[idtype]['type']=='Int' else id_this }
try:
data = client.execute(query=query, variables=variables, headers=API_HEADERS)
except Exception as e:
logging.error(e)
continue
print(json.dumps(data, indent=2))
n_out+=1
logging.info(f"n_in: {len(ids)}; n_out: {n_out}")
#############################################################################
def GetDiseases(ids, idtype, ep=API_ENDPOINT, fout=None):
tags=[]; n_out=0;
client = GraphqlClient(endpoint=ep, verify=True)
query = f"""\
query diseaseDetails($id: String) {{
disease(name: $id) {{
name
dids {{ id, doName, dataSources, doDefinition }}
doDescription
uniprotDescription
children {{
name
dids {{ id, doName, dataSources, doDefinition }}
doDescription
}}
targetCounts {{
name
value
}}
}}
}}
"""
for id_this in ids:
logging.debug(f"id_this: {id_this}")
variables = { "id": id_this }
try:
data = client.execute(query=query, variables=variables, headers=API_HEADERS)
except Exception as e:
logging.error(e)
continue
print(json.dumps(data, indent=2))
n_out+=1
logging.info(f"n_in: {len(ids)}; n_out: {n_out}")
#############################################################################
def Test(ep=API_ENDPOINT, fout=None):
client = GraphqlClient(endpoint=ep, verify=True)
query = """\
query diseaseDetails {{
disease {{
name
dids {{
id(id:"DOID:2841"), doName
}}
doDescription
uniprotDescription
"""
try:
data = client.execute(query=query, headers=API_HEADERS)
except Exception as e:
logging.error(e)
return
print(json.dumps(data, indent=2))
```
#### File: idg/tcrd/Utils.py
```python
import os,sys,re,time,json,logging,yaml,tqdm
import pandas as pd
TDLS = ['Tdark', 'Tbio', 'Tchem', 'Tclin']
#############################################################################
def Info(dbcon, fout=None):
sql = 'SELECT * FROM dbinfo'
df = pd.read_sql(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
return df
#############################################################################
def ListTables(dbcon, fout=None):
sql = 'SHOW TABLES'
df = pd.read_sql(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_table: {df.shape[0]}")
return df
#############################################################################
def ListDatasets(dbcon, fout=None):
sql = 'SELECT * FROM dataset'
df = pd.read_sql(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_table: {df.shape[0]}")
return df
#############################################################################
def DescribeTables(dbcon, fout=None):
return ListColumns(dbcon, fout)
#############################################################################
def ListColumns(dbcon, fout=None):
n_table=0; df=pd.DataFrame();
for table in ListTables(dbcon).iloc[:,0]:
n_table+=1
sql = ('DESCRIBE '+table)
df_this = pd.read_sql(sql, dbcon)
cols = list(df_this.columns.values)
df_this['table'] = table
df_this = df_this[['table']+cols]
df = pd.concat([df, df_this])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_columns: {df.shape[0]}; n_tables: {df.table.nunique()}")
return df
#############################################################################
def TableRowCounts(dbcon, fout=None):
n_table=0; df=pd.DataFrame();
for table in ListTables(dbcon).iloc[:,0]:
n_table+=1
sql = ('SELECT count(*) AS row_count FROM '+table)
df_this = pd.read_sql(sql, dbcon)
cols = list(df_this.columns.values)
df_this['table'] = table
df_this = df_this[['table']+cols]
df = pd.concat([df, df_this])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_table: {df.shape[0]}")
return df
#############################################################################
def TDLCounts(dbcon, fout=None):
sql="SELECT tdl, COUNT(id) AS target_count FROM target GROUP BY tdl"
df = pd.read_sql(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
return df
#############################################################################
def AttributeCounts(dbcon, fout=None):
sql='SELECT ga.type, COUNT(ga.id) FROM gene_attribute ga GROUP BY ga.type'
df = pd.read_sql(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
return df
#############################################################################
def XrefCounts(dbcon, fout=None):
sql = ('SELECT xtype, COUNT(DISTINCT value) FROM xref GROUP BY xtype ORDER BY xtype')
df = pd.read_sql(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
return df
#############################################################################
def ListXrefTypes(dbcon, fout=None):
sql='SELECT DISTINCT xtype FROM xref ORDER BY xtype'
df = pd.read_sql(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_xreftypes: {df.shape[0]}")
return df
#############################################################################
def ListXrefs(dbcon, xreftypes, fout=None):
if xreftypes and type(xreftypes) not in (list, tuple):
xreftypes = [xreftypes]
cols=['target_id', 'protein_id', 'xtype', 'value']
sql = f"""SELECT DISTINCT {(", ".join(cols))} FROM xref"""
if xreftypes:
sql += f""" WHERE xtype IN ({("'"+("','".join(xreftypes)+"'"))})"""
logging.debug(f'SQL: "{sql}"')
df = pd.read_sql(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"Target count: {df.target_id.nunique()}")
logging.info(f"Protein count: {df.protein_id.nunique()}")
logging.info(f"XrefType count: {df.xtype.nunique()}")
logging.info(f"Xref count: {df.value.nunique()}")
return df
#############################################################################
def ListTargetFamilies(dbcon, fout=None):
sql="SELECT DISTINCT fam TargetFamily FROM target ORDER BY fam"
df = pd.read_sql(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_tfams: {df.shape[0]}")
return df
#############################################################################
def ListTargets(dbcon, tdls, tfams, fout=None):
sql = """
SELECT
target.id tcrdTargetId,
target.name tcrdTargetName,
target.fam tcrdTargetFamily,
target.tdl TDL,
target.ttype tcrdTargetType,
target.idg idgList,
protein.id tcrdProteinId,
protein.uniprot uniprotId,
protein.sym tcrdGeneSymbol,
protein.family tcrdProteinFamily,
protein.geneid ncbiGeneId,
protein.up_version uniprotVersion,
protein.chr,
protein.description tcrdProteinDescription,
protein.dtoid dtoId,
protein.dtoclass dtoClass,
protein.stringid ensemblProteinId,
x.value ensemblGeneId
FROM
target
LEFT OUTER JOIN
t2tc ON t2tc.target_id = target.id
LEFT OUTER JOIN
protein ON protein.id = t2tc.protein_id
LEFT OUTER JOIN
(SELECT * FROM xref WHERE xref.xtype = 'Ensembl' AND xref.value REGEXP '^ENSG'
) x ON x.protein_id = protein.id
"""
wheres=[]
if tdls: wheres.append(f"""target.tdl IN ({("'"+("','".join(tdls))+"'")})""")
if tfams: wheres.append(f"""target.fam IN ({("'"+("','".join(tfams))+"'")})""")
if wheres: sql+=(' WHERE '+(' AND '.join(wheres)))
logging.debug(sql)
df = pd.read_sql(sql, dbcon)
if tdls: logging.info(f"""TDLs: {",".join(tdls)}""")
if tfams: logging.info(f"""TargetFamilies: {",".join(tfams)}""")
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"rows: {df.shape[0]}")
logging.info(f"Target count: {df.tcrdTargetId.nunique()}")
logging.info(f"Protein count: {df.tcrdProteinId.nunique()}")
logging.info(f"Uniprot count: {df.uniprotId.nunique()}")
logging.info(f"ENSP count: {df.ensemblProteinId.nunique()}")
logging.info(f"ENSG count: {df.ensemblGeneId.nunique()}")
logging.info(f"GeneSymbol count: {df.tcrdGeneSymbol.nunique()}")
return df
#############################################################################
def ListTargetsByDTO(dbcon, fout=None):
sql = """
SELECT DISTINCT
target.id tcrdTargetId,
target.name tcrdTargetName,
target.fam tcrdTargetFamily,
protein.id tcrdProteinId,
protein.sym tcrdGeneSymbol,
CONVERT(protein.geneid, CHAR) ncbiGeneId,
protein.uniprot uniprotId,
p2dto.dtoid dtoId,
CONVERT(p2dto.generation, CHAR) dtoGeneration,
dto.name dtoClass
FROM
target
JOIN t2tc ON t2tc.target_id = target.id
JOIN protein ON protein.id = t2tc.protein_id
LEFT OUTER JOIN p2dto ON p2dto.protein_id = protein.id
LEFT OUTER JOIN dto ON dto.dtoid = p2dto.dtoid
ORDER BY
target.id, protein.id, p2dto.generation DESC
"""
logging.debug(sql)
df = pd.read_sql(sql, dbcon, coerce_float=False)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"rows: {df.shape[0]}")
logging.info(f"Target count: {df.tcrdTargetId.nunique()}")
logging.info(f"Protein count: {df.tcrdProteinId.nunique()}")
logging.info(f"Uniprot count: {df.uniprotId.nunique()}")
logging.info(f"GeneSymbol count: {df.tcrdGeneSymbol.nunique()}")
logging.info(f"DTO_ID count: {df.dtoId.nunique()}")
return df
#############################################################################
def GetTargetPage(dbcon, tid, fout=None):
df = GetTargets(dbcon, [tid], "TID", None)
target = df.to_dict(orient='records')
fout.write(json.dumps(target, indent=2))
#############################################################################
def GetTargets(dbcon, ids, idtype, fout=None):
cols=[
'target.id',
'target.name',
'target.description',
'target.fam',
'target.tdl',
'target.ttype',
'protein.id',
'protein.sym',
'protein.uniprot',
'protein.geneid',
'protein.name',
'protein.description',
'protein.family',
'protein.chr']
sql = f"""\
SELECT
{(','.join(map(lambda s: (s+" "+s.replace('.', '_')), cols)))}
FROM
target
LEFT OUTER JOIN
t2tc ON target.id = t2tc.target_id
LEFT OUTER JOIN
protein ON protein.id = t2tc.protein_id
"""
n_id=0; n_hit=0; df=pd.DataFrame();
for id_this in ids:
n_id+=1
wheres=[]
if idtype.lower()=='tid':
wheres.append(f"target.id = {id_this}")
elif idtype.lower()=='uniprot':
wheres.append(f"protein.uniprot = '{id_this}'")
elif idtype.lower() in ('gid', 'geneid'):
wheres.append(f"protein.geneid = '{id_this}'")
elif idtype.lower() in ('genesymb', 'genesymbol'):
wheres.append(f"protein.sym = '{id_this}'")
elif idtype.lower() == 'ensp':
wheres.append(f"protein.stringid = '{id_this}'")
else:
logging.error(f'Unknown ID type: {idtype}')
return
sql_this = sql+(' WHERE '+(' AND '.join(wheres)))
logging.debug(f'SQL: "{sql_this}"')
logging.debug(f'ID: {idtype} = "{id_this}"')
df_this = pd.read_sql(sql_this, dbcon)
if df_this.shape[0]>0: n_hit+=1
df = pd.concat([df, df_this])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_id: {n_id}; n_hit: {n_hit}")
return df
#############################################################################
def GetTargetsByXref(dbcon, ids, xreftypes, fout=None):
if xreftypes and type(xreftypes) not in (list, tuple):
xreftypes = [xreftypes]
cols=[ 'target.description',
'target.id',
'target.fam',
'target.name',
'target.tdl',
'target.ttype',
'protein.chr',
'protein.description',
'protein.family',
'protein.geneid',
'protein.id',
'protein.name',
'protein.sym',
'protein.uniprot',
'xref.xtype',
'xref.value']
sql = f"""\
SELECT
{(','.join(map(lambda s: s+" "+s.replace('.', '_')), cols))}
FROM
target
JOIN
t2tc ON target.id = t2tc.target_id
JOIN
protein ON protein.id = t2tc.protein_id
JOIN
xref ON xref.protein_id = protein.id
"""
n_id=0; n_hit=0; df=pd.DataFrame();
for id_this in ids:
n_id+=1
wheres=[]
wheres.append(f"""xref.xtype IN ({("'"+("','".join(xreftypes))+"'")})""")
wheres.append(f"xref.value = '{id_this}'")
sql_this = sql+('WHERE '+(' AND '.join(wheres)))
logging.debug(f'SQL: "{sql_this}"')
df_this = pd.read_sql(sql_this, dbcon)
if df_this.shape[0]>0:
n_hit+=1
df = pd.concat([df, df_this])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_id: {n_id}; n_hit: {n_hit}")
return df
#############################################################################
def GetPathways(dbcon, ids, fout=None):
n_id=0; n_hit=0; df=pd.DataFrame(); pids_all=set();
cols=[ 't2p.target_id',
't2p.id',
't2p.source',
't2p.id_in_source',
't2p.name',
't2p.description',
't2p.url' ]
sql = f"""SELECT {(','.join(map(lambda s: s+" "+s.replace('.', '_')), cols))} FROM target2pathway t2p JOIN target t ON t.id = t2p.target_id"""
for id_this in ids:
n_id+=1
sql_this = sql+(f"WHERE t.id = {id_this}")
logging.debug(f'SQL: "{sql_this}"')
df_this = pd.read_sql(sql_this, dbcon)
if df_this.shape[0]>0: n_hit+=1
df = pd.concat([df, df_this])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_query: {n_query}; n_hit: {n_hit}")
return df
#############################################################################
def ListDiseases(dbcon, fout=None):
"Ontologies: DOID, UMLS, MESH, AmyCo, OrphaNet, NCBIGene"
sql="""
SELECT
disease.name diseaseName,
disease.did diseaseId,
IF((disease.did REGEXP '^C[0-9]+$'), 'UMLS', SUBSTR(disease.did, 1, LOCATE(':', disease.did)-1)) diseaseOntology,
disease.description diseaseDescription,
disease.dtype associationType,
disease_type.description associationDescription,
disease.source associationSource,
COUNT(disease.protein_id) nTargetAssociations
FROM
disease
LEFT OUTER JOIN disease_type ON disease_type.name = disease.dtype
GROUP BY
disease.dtype,
disease_type.description,
disease.name,
disease.did,
disease.description,
disease.source
"""
df = pd.read_sql(sql, dbcon)
if fout is not None: df.to_csv(fout, "\t", index=False)
logging.info(f"rows: {df.shape[0]}")
logging.info(f"diseaseIDs: {df.diseaseId.nunique()}")
logging.info(f"diseaseNames: {df.diseaseName.nunique()}")
for ontology in sorted(df.diseaseOntology.unique().astype(str).tolist()):
if df[df.diseaseOntology==ontology].diseaseId.nunique()>0:
logging.info(f"[{ontology}] diseaseIDs: {df[df.diseaseOntology==ontology].diseaseId.nunique()}")
return df
#############################################################################
def ListDiseaseTypes(dbcon, fout=None):
df = ListDiseases(dbcon, None)
df = df[["associationType", "associationDescription", "nTargetAssociations"]]
df = df.groupby(["associationType", "associationDescription"]).sum()
df.reset_index(drop=False, inplace=True)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"rows: {df.shape[0]}")
return df
#############################################################################
def GetDiseaseAssociations(dbcon, ids, fout=None):
n_id=0; n_hit=0; df=pd.DataFrame();
sql = """\
SELECT
disease.name diseaseName,
disease.did diseaseId,
IF((disease.did REGEXP '^C[0-9]+$'), 'UMLS', SUBSTR(disease.did, 1, LOCATE(':', disease.did)-1)) diseaseOntology,
disease.description diseaseDescription,
disease.dtype associationType,
disease_type.description associationDescription,
disease.source associationSource,
protein.id tcrdProteinId,
protein.uniprot uniprotId,
protein.sym tcrdGeneSymbol
FROM
disease
LEFT OUTER JOIN disease_type ON disease_type.name = disease.dtype
JOIN protein ON protein.id = disease.protein_id
WHERE
disease.did = '{0}'
"""
for id_this in ids:
n_id+=1
sql_this = sql.format(id_this)
df_this = pd.read_sql(sql_this, dbcon)
if df_this.shape[0]>0: n_hit+=1
if fout is not None: df_this.to_csv(fout, "\t", index=False)
else: df = pd.concat([df, df_this])
logging.info(f"n_id: {n_id}; n_hit: {n_hit}")
if fout is None: return df
#############################################################################
def GetDiseaseAssociationsPage(dbcon, did, fout=None):
df = GetDiseaseAssociations(dbcon, [did], None)
disease = df[["diseaseName", "diseaseId", "diseaseOntology", "diseaseDescription"]].drop_duplicates().to_dict(orient='records')[0]
associations = df[["associationType", "associationDescription", "associationSource", "tcrdProteinId", "uniprotId", "tcrdGeneSymbol"]].to_dict(orient='records')
disease["associations"] = associations
fout.write(json.dumps(disease, indent=2))
#############################################################################
def ListPhenotypes(dbcon, fout=None):
sql="""
SELECT
p.ptype,
CONCAT(IF(INSTR(IFNULL(p.trait, ""), ';')>0, SUBSTR(IFNULL(p.trait, ""), 1, INSTR(IFNULL(p.trait, ""), ';')-1), IFNULL(p.trait, "")),IFNULL(p.term_id, "")) p_identifier,
p.term_name,
p.term_description,
pt.ontology ptype_ontology,
pt.description ptype_description,
COUNT(p.protein_id) n_target_associations
FROM
phenotype p
JOIN phenotype_type pt ON pt.name = p.ptype
GROUP BY
p.ptype,
p_identifier,
p.term_name,
p.term_description,
pt.ontology,
pt.description
"""
df = pd.read_sql(sql, dbcon)
df.loc[((df['ptype']=='OMIM') & ((df['ptype_ontology']=='')|(df['ptype_ontology'].isna()))),'ptype_ontology'] = 'OMIM' # Kludge
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"rows: {df.shape[0]}")
for ptype in df.ptype.unique().tolist():
logging.info(f"[{ptype}] phenotype_IDs: {df[df.ptype==ptype].p_identifier.nunique()}")
return df
#############################################################################
def ListPhenotypeTypes(dbcon, fout=None):
df = ListPhenotypes(dbcon, None)
df = df[["ptype", "ptype_ontology", "n_target_associations"]]
df = df.groupby(["ptype", "ptype_ontology"]).sum()
df.reset_index(drop=False, inplace=True)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"rows: {df.shape[0]}")
return df
#############################################################################
def ListPublications(dbcon, fout=None):
df=None; n_out=0; tq=None; NCHUNK=1000;
quiet = bool(logging.getLogger().getEffectiveLevel()>15)
N_row = pd.read_sql("SELECT COUNT(*) FROM pubmed", dbcon).iloc[0,0]
sql="""
SELECT
id AS pmid,
title,
journal,
date,
authors,
REPLACE(abstract, '\n', ' ') AS abstract
FROM
pubmed
"""
df_itr = pd.read_sql(sql, dbcon, chunksize=NCHUNK)
for df_this in df_itr:
if not quiet and tq is None: tq = tqdm.tqdm(total=N_row, unit="pubs")
if tq is not None: tq.update(NCHUNK)
if fout is not None: df_this.to_csv(fout, "\t", index=False)
else: df = pd.concat([df, df_this])
n_out += df_this.shape[0]
if tq is not None: tq.close()
logging.info(f"rows: {n_out}")
if fout is None: return df
#############################################################################
```
#### File: BioClients/jensenlab/Utils.py
```python
import sys,os,re,json,time,logging
import pandas as pd
from ..util import rest
#
API_HOST='api.jensenlab.org'
API_BASE_PATH=''
BASE_URL='https://'+API_HOST+API_BASE_PATH
#
##############################################################################
def GetDiseaseGenes(channel, ids, nmax, base_url=BASE_URL, fout=None):
tags=[]; df=pd.DataFrame();
for id_this in ids:
rval = rest.Utils.GetURL(base_url+f'/{channel}?type1=-26&id1={id_this}&type2=9606&limit={nmax}&format=json', parse_json=True)
genes = rval[0] #dict
ensgs = list(genes.keys())
flag = rval[1] #?
for ensg in ensgs:
gene = genes[ensg]
logging.debug(json.dumps(gene, indent=2))
if not tags: tags = list(gene.keys())
df = pd.concat([df, pd.DataFrame({tags[j]:[gene[tags[j]]] for j in range(len(tags))})])
if fout: df.to_csv(fout, "\t", index=False)
logging.info("n_out: {}".format(df.shape[0]))
return df
##############################################################################
def GetPubmedComentionGenes(ids, base_url=BASE_URL, fout=None):
"""Search by co-mentioned terms."""
tags=[]; df=pd.DataFrame();
for id_this in ids:
rval = rest.Utils.GetURL(base_url+f'/Textmining?query={id_this}[tiab]&type2=9606&limit=10&format=json', parse_json=True)
genes = rval[0] #dict
ensgs = list(genes.keys())
flag = rval[1] #?
for ensg in ensgs:
gene = genes[ensg]
logging.debug(json.dumps(gene, indent=2))
if not tags: tags = list(gene.keys())
df = pd.concat([df, pd.DataFrame({tags[j]:[gene[tags[j]]] for j in range(len(tags))})])
if fout: df.to_csv(fout, "\t", index=False)
logging.info("n_out: {}".format(df.shape[0]))
return df
##############################################################################
```
#### File: BioClients/mygene/Utils.py
```python
import sys,os
import pandas as pd
import mygene as mg
#
FIELDS = 'HGNC,symbol,name,taxid,entrezgene,ensemblgene'
NCHUNK=100;
#
#############################################################################
def GetGenes(ids, fields=FIELDS, fout=None):
"""Get genes by Entrez or Ensembl gene ID."""
df=pd.DataFrame();
mgi = mg.MyGeneInfo()
ichunk=0;
while ichunk*NCHUNK<len(ids):
df_this = mgi.getgenes(ids[ichunk*NCHUNK:((ichunk+1)*NCHUNK)], fields, as_dataframe=True)
df = pd.concat([df, df_this])
ichunk+=1
if fout: df.to_csv(fout, "\t", index=False)
return df
#############################################################################
def SearchGenes(queries, species, fout=None):
"""Search genes by symbol, etc. using MyGene syntax."""
df=pd.DataFrame();
mgi = mg.MyGeneInfo()
for qry in queries:
df_this = mgi.query(qry, species=species, as_dataframe=True)
df = pd.concat([df, df_this])
if fout: df.to_csv(fout, "\t", index=False)
return df
#############################################################################
```
#### File: pubchem/ftp/pubchem_ftp_gini_index.py
```python
import os,sys,re,getopt,gzip,tempfile
try:
import gdbm
except:
import dbm as gdbm
from ... import pubchem
from ...util import gini_utils
PROG=os.path.basename(sys.argv[0])
DATADIR="/pangolin_home/data/pubchem/bioassay/csv/data"
#############################################################################
if __name__=='__main__':
usage='''
%(PROG)s - Gini selectivity analysis from assay CSV files
options:
--indir=<DIR> ... dir w/ csv.gz assay data [%(DATADIR)s]
or
--infile=<FILE> ... input csv.gz assay data
--aids=<AIDS> ... list of AIDs to select within --indir dir
--aidfile=<AIDFILE> ... file of AIDs to select within --indir dir
--sids=<SIDS> ... list of SIDs to select
--sidfile=<SIDFILE> ... file of SIDs to select (ignore others)
--out_gini=<outfile> ... SIDs w/ activity counts and Gini
--out_raw=<outfile> ... SIDs w/ scores per assay
--use_cids ... use CIDs instead of SIDs (all i/o)
--resume_gdbm_file=<gdbm_file> ... resume job with existing gdbm file
--v ... verbose
--vv ... very verbose
--h ... this help
'''%{'PROG':PROG,'DATADIR':DATADIR}
def ErrorExit(msg):
print >>sys.stderr,msg
sys.exit(1)
indir=DATADIR;
infile=None; verbose=0;
out_gini_file=None;
out_raw_file=None;
aids=None; aidfile=None; sids=None; sidfile=None;
use_cids=False; resume_gdbm_file=None;
opts,pargs = getopt.getopt(sys.argv[1:],'',['h','v','vv','indir=',
'aids=','aidfile=','infile=','out_gini=','resume_gdbm_file=',
'out_raw=',
'sids=','sidfile=','use_cids'])
if not opts: ErrorExit(usage)
for (opt,val) in opts:
if opt=='--h': ErrorExit(usage)
elif opt=='--indir': indir=val
elif opt=='--infile': infile=val
elif opt=='--out_gini': out_gini_file=val
elif opt=='--out_raw': out_raw_file=val
elif opt=='--aids': aids=val
elif opt=='--aidfile': aidfile=val
elif opt=='--sids': sids=val
elif opt=='--sidfile': sidfile=val
elif opt=='--use_cids': use_cids=True
elif opt=='--resume_gdbm_file': resume_gdbm_file=val
elif opt=='--vv': verbose=2
elif opt=='--v': verbose=1
else: ErrorExit('Illegal option: %s'%val)
idtag='SID'
if use_cids: idtag='CID'
if not (indir or infile):
ErrorExit('input dir or file required\n'+usage)
aidset=[]
if aids:
for val in re.split('[\s,]+',aids):
if not val.strip(): continue
aidset.append(int(val.strip()))
elif aidfile:
f=open(aidfile)
if not f:
ErrorExit('cannot open: %s'%aidfile)
lines=f.readlines()
for line in lines:
if not line.strip(): continue
aidset.append(int(line.strip()))
sidset={} ## use a hash for speed (could be many sids)
if sids:
for val in re.split('[\s,]+',sids):
if not val.strip(): continue
sidset[int(val.strip())]=True
elif sidfile:
f=open(sidfile)
if not f:
ErrorExit('cannot open: %s'%sidfile)
lines=f.readlines()
for line in lines:
if not line.strip(): continue
line=re.sub('\s.*$','',line)
sidset[int(line.strip())]=True
if len(sidset.keys())==0: sidset=None
else:
print >>sys.stderr, '%s: total %ss selected: %d'%(PROG,idtag,len(sidset.keys()))
files=[]
if infile:
try:
aid=int(re.sub('\.csv\.gz','',os.path.basename(infile)))
except:
ErrorExit('cannot parse AID: "%s"'%infile)
files.append((infile,aid))
else:
aidset_foundfile=[]
for file in os.listdir(indir):
if not re.search('\.csv\.gz',file): continue
try:
aid=int(re.sub('\.csv\.gz','',file))
except:
print >>sys.stderr, 'cannot parse AID: "%s"'%file
continue
if aidset and (aid not in aidset): continue
fpath=indir+'/'+file
files.append((fpath,aid))
aidset_foundfile.append(aid)
if aidset:
for aid in aidset:
if aid not in aidset_foundfile:
print >>sys.stderr, 'ERROR: no file for AID: "%d"'%aid
fout_gini=None
if out_gini_file:
fout_gini=open(out_gini_file,'w')
if not fout_gini:
ErrorExit('cannot open: %s'%out_gini_file)
fout_raw=None
if out_raw_file:
fout_raw=open(out_raw_file,'w')
if not fout_raw:
ErrorExit('cannot open: %s'%out_raw_file)
if resume_gdbm_file:
sids_db_path=resume_gdbm_file
sids_db=gdbm.open(sids_db_path,'w')
else:
tempfile.tempdir=os.environ['HOME']+'/Downloads'
fd,sids_db_path = tempfile.mkstemp('_sids_db',PROG)
os.close(fd)
sids_db=gdbm.open(sids_db_path,'n')
sids_list=[]
if fout_gini:
fout_gini.write('SID\tn_inactive\tn_active\tn_inconclusive\tn_unspecified\tn_discrepant\tn_tested\tgini\n')
sids_active=[]; sids_inactive=[]; sids_inconclusive=[]; sids_unspecified=[];
sids_discrepant=[]; sids_tested=[];
n_datapoints_total=0
i_file=0
for fpath,aid in files:
i_file+=1
print >>sys.stderr, '%d. [%d]: %s'%(i_file,aid,fpath)
if sids_db.has_key('AID_DONE_FLAG_%d'%aid):
continue
f=gzip.open(fpath)
ftxt=f.read()
sids_this=pubchem.ftp.Utils.ExtractOutcomes(ftxt,sidset,use_cids)
n_active=0; n_inactive=0; n_inconclusive=0; n_unspecified=0; n_discrepant=0;
for sid in sids_this.keys():
if not sids_db.has_key('%d'%sid):
sids_db['%d'%sid]=''
sids_list.append(sid)
outcome=sids_this[sid]['outcome']
sids_db['%d_%d'%(sid,aid)]=('%d'%outcome)
aids_this=pubchem.ftp.Utils.Str2Ints(sids_db['%d'%(sid)])
if aid not in aids_this:
aids_this.append(aid)
sids_db['%d'%(sid)]=pubchem.ftp.Utils.Ints2Str(aids_this)
n_datapoints_total+=1
if outcome==2: n_active+=1
elif outcome==1: n_inactive+=1
elif outcome==3: n_inconclusive+=1
elif outcome==4: n_unspecified+=1
elif outcome==5: n_discrepant+=1
else: print >>sys.stderr, 'ERROR: outcome=%d'%(sids_this[sid]['outcome'])
f.close()
n_total=n_active+n_inactive+n_inconclusive+n_unspecified+n_discrepant
n_tested=n_total-n_unspecified
if verbose>1:
print >>sys.stderr, '\t active: %3d'%(n_active)
print >>sys.stderr, '\t inactive: %3d'%(n_inactive)
print >>sys.stderr, '\t inconclusive: %3d'%(n_inconclusive)
print >>sys.stderr, '\t discrepant: %3d'%(n_discrepant)
print >>sys.stderr, '\t(total tested: %3d)'%(n_tested)
print >>sys.stderr, '\t unspecified: %3d'%(n_unspecified)
print >>sys.stderr, '\t total: %3d'%(n_total)
sids_db['AID_DONE_FLAG_%d'%aid]='done' #file done; for resume
n_ginis=0;
for sid in sids_list:
if fout_raw:
fout_raw.write('%d'%(sid))
aids_this=pubchem.ftp.Utils.Str2Ints(sids_db['%d'%(sid)])
if not aids_this:
pass #?
scores=[]
n_total=0; n_active=0; n_inactive=0; n_inconclusive=0;
n_unspecified=0; n_discrepant=0;
for aid in aids_this:
outcome=sids_db['%d_%d'%(sid,aid)]
if outcome=='1':
score=0.0 # 1:inactive
n_inactive+=1
elif outcome=='2':
score=1.0 # 2:active
n_active+=1
elif outcome=='3':
score=0.5 # 3:inconclusive
n_inconclusive+=1
elif outcome=='4':
n_unspecified+=1
elif outcome=='5':
score=0.5 # 5:discrepant
n_discrepant+=1
else: continue #error
scores.append(score)
if fout_raw:
fout_raw.write(',%d:%.1f'%(aid,score))
if fout_raw:
fout_raw.write('\n')
n_total=n_active+n_inactive+n_inconclusive+n_unspecified+n_discrepant
n_tested=n_total-n_unspecified
if not scores:
pass #?
gini=gini_utils.GiniProcessor(scores)
n_ginis+=1
if fout_gini:
fout_gini.write('%d\t%d\t%d\t%d\t%d\t%d\t%d\t%.2f\n'%(sid,n_inactive,n_active,n_inconclusive,n_unspecified,n_discrepant,n_tested,gini))
if fout_gini: fout_gini.close()
if fout_raw: fout_raw.close()
os.unlink(sids_db_path)
print >>sys.stderr, 'DEBUG: gdbm file: %s'%(sids_db_path)
print >>sys.stderr, '%s: number of assay files: %d'%(PROG,len(files))
print >>sys.stderr, '%s: total %ss: %d'%(PROG,idtag,len(sids_list))
print >>sys.stderr, '%s: total Gini Indices: %d'%(PROG,n_ginis)
print >>sys.stderr, '%s: total datapoints: %d'%(PROG,n_datapoints_total)
```
#### File: pubchem/rdf/Client.py
```python
import sys,os,re,getopt,types,codecs
#
from ...util import rest
#
PROG=os.path.basename(sys.argv[0])
#
API_HOST='rdf.ncbi.nlm.nih.gov'
API_BASE_PATH='/pubchem'
#
OFMTS={
'CSV': {'ext':'csv', 'mime':'text/csv'},
'HTML': {'ext':'html', 'mime':'text/html'},
'JSON': {'ext':'json', 'mime':'application/json'},
'RDF': {'ext':'rdf', 'mime':'text/rdf'},
'RDFXML': {'ext':'rdfxml', 'mime':'application/rdf+xml'},
'TURTLE': {'ext':'ttl', 'mime':'application/x-turtle'}
}
#
# 'NTRIPLES': {'ext':'ntriples', 'mime':'text/plain'},
# 'N3': {'ext':'n3', 'mime':'text/rdf+n3'},
# 'RDFXML-ABBREV': {'ext':'rdfxml-abbrev','mime':'application/rdf+xml+abbrev'},
#############################################################################
### Problem: since each request returns headers, how do we request RDF
### for multiple IDs? (Hence kludge here.)
#
def GetData(base_uri,pfx,ids,ofmt,fout,verbose):
i=0;
for id_query in ids:
url=(base_uri+'/%s%d.%s'%(pfx,id_query,OFMTS[ofmt.upper()]['ext']))
res=rest.Utils.GetURL(url,headers={'Accept':'%s'%OFMTS[ofmt.upper()]['mime']},verbose=verbose)
if not res:
break
elif type(res) in types.StringTypes:
if i>0: #Kludge
for line in res.splitlines():
if not line.startswith('@'):
fout.write(line+'\n')
else:
fout.write(res)
i+=1
else:
print >>sys.stderr, 'ERROR: "%s"'%str(res)
#############################################################################
def GetCompound(base_uri,ids,ofmt,fout,verbose):
GetData(base_uri,'compound/CID',ids,ofmt,fout,verbose)
#############################################################################
def GetSubstance(base_uri,ids,ofmt,fout,verbose):
GetData(base_uri,'substance/SID',ids,ofmt,fout,verbose)
#############################################################################
def GetProtein(base_uri,ids,ofmt,fout,verbose):
GetData(base_uri,'protein/GI',ids,ofmt,fout,verbose)
#############################################################################
def GetGene(base_uri,ids,ofmt,fout,verbose):
GetData(base_uri,'gene/GID',ids,ofmt,fout,verbose)
#############################################################################
def GetBiosystem(base_uri,ids,ofmt,fout,verbose):
GetData(base_uri,'biosystem/BSID',ids,ofmt,fout,verbose)
#############################################################################
def GetBioassay(base_uri,ids,ofmt,fout,verbose):
GetData(base_uri,'bioassay/AID',ids,ofmt,fout,verbose)
#############################################################################
def GetEndpoint(base_uri,sids,aids,ofmt,fout,verbose):
i=0;
for sid in sids:
for aid in aids:
url=(base_uri+'/endpoint/SID%d_AID%d.%s'%(sid,aid,OFMTS[ofmt.upper()]['ext']))
res=rest.Utils.GetURL(url,headers={'Accept':'%s'%OFMTS[ofmt.upper()]['mime']},verbose=verbose)
if not res:
break
elif type(res) in types.StringTypes:
if i>0: #Kludge
for line in res.splitlines():
if not line.startswith('@'):
fout.write(line+'\n')
else:
fout.write(res)
i+=1
else:
print >>sys.stderr, 'ERROR: "%s"'%str(res)
##############################################################################
if __name__=='__main__':
ofmt='RDF';
USAGE='''\
%(PROG)s - (REST client)
required (one of):
--get_compound ..................(requires CIDs)
--get_substance .................(requires SIDs)
--get_protein ...................(requires GIs)
--get_gene ......................(requires GIDs)
--get_biosystem .................(requires BSIDs)
--get_bioassay ..................(requires AIDs)
--get_endpoint ..................(requires SIDs, AIDs)
options:
--i IFILE ....................... input IDs file
--id ID ......................... input ID
--aidfile AIDFILE ............... input AIDs file
--aid AID ....................... input AID
--o OFILE ....................... output file [stdout]
--ofmt FMT ...................... output formats [%(OFMT)s]
--api_host HOST ................. [%(API_HOST)s]
--api_base_path PATH ............ [%(API_BASE_PATH)s]
--skip N ........................ skip (input IDs)
--nmax N ........................ max count (input IDs)
--v ............................. verbose
--h ............................. this help
Example IDs:
2244 (compound)
144206602 (substance)
124375976 (protein)
367 (gene)
82991 (biosystem)
1117354 (bioassay)
Output formats:
%(OFMTS)s
'''%{
'PROG':PROG,
'API_HOST':API_HOST,
'API_BASE_PATH':API_BASE_PATH,
'OFMT':ofmt,
'OFMTS':('|'.join(OFMTS.keys()))
}
def ErrorExit(msg):
print >>sys.stderr,msg
sys.exit(1)
id_query=None; ifile=None;
aid_query=None; aidfile=None;
name_query=None;
ofile='';
verbose=0;
get_compound=False;
get_substance=False;
get_protein=False;
get_gene=False;
get_biosystem=False;
get_bioassay=False;
get_endpoint=False;
skip=None; nmax=None;
dbusr=None; dbpw=None;
opts,pargs=getopt.getopt(sys.argv[1:],'',['i=','o=',
'id=',
'aid=',
'aidfile=',
'ofmt=',
'get_compound',
'get_substance',
'get_protein',
'get_gene',
'get_biosystem',
'get_bioassay',
'get_endpoint',
'api_host=','api_base_path=','dbusr=','dbpw=',
'skip=', 'nmax=','version=','help','v','vv','vvv'])
if not opts: ErrorExit(USAGE)
for (opt,val) in opts:
if opt=='--help': ErrorExit(USAGE)
elif opt=='--o': ofile=val
elif opt=='--i': ifile=val
elif opt=='--id': id_query=int(val)
elif opt=='--aidfile': aidfile=val
elif opt=='--aid': aid_query=int(val)
elif opt=='--ofmt': ofmt=val
elif opt=='--dbusr': dbusr=val
elif opt=='--dbpw': DBPW=val
elif opt=='--get_compound': get_compound=True
elif opt=='--get_substance': get_substance=True
elif opt=='--get_protein': get_protein=True
elif opt=='--get_gene': get_gene=True
elif opt=='--get_biosystem': get_biosystem=True
elif opt=='--get_bioassay': get_bioassay=True
elif opt=='--get_endpoint': get_endpoint=True
elif opt=='--api_host': API_HOST=val
elif opt=='--api_base_path': API_BASE_PATH=val
elif opt=='--skip': skip=int(val)
elif opt=='--nmax': nmax=int(val)
elif opt=='--v': verbose=1
elif opt=='--vv': verbose=2
elif opt=='--vvv': verbose=3
else: ErrorExit('Illegal option: %s'%(opt))
BASE_URL='http://'+API_HOST+API_BASE_PATH
if ofile:
fout=open(ofile,"w+")
#fout=codecs.open(ofile,"w","utf8","replace")
if not fout: ErrorExit('ERROR: cannot open outfile: %s'%ofile)
else:
fout=sys.stdout
#fout=codecs.getwriter('utf8')(sys.stdout,errors="replace")
if ofmt.upper() not in OFMTS.keys():
ErrorExit('ERROR: --ofmt "%s" not allowed.s'%ofmt)
ids=[]
if ifile:
fin=open(ifile)
if not fin: ErrorExit('ERROR: cannot open ifile: %s'%ifile)
while True:
line=fin.readline()
if not line: break
try:
ids.append(int(line.rstrip()))
except:
print >>sys.stderr, 'ERROR: bad input ID: %s'%line
continue
if verbose:
print >>sys.stderr, '%s: input IDs: %d'%(PROG,len(ids))
fin.close()
id_query=ids[0]
elif id_query:
ids=[id_query]
aids=[]
if aidfile:
fin=open(aidfile)
if not fin: ErrorExit('ERROR: cannot open aidfile: %s'%aidfile)
while True:
line=fin.readline()
if not line: break
try:
aids.append(int(line.rstrip()))
except:
print >>sys.stderr, 'ERROR: bad input AID: %s'%line
continue
if verbose:
print >>sys.stderr, '%s: input AIDs: %d'%(PROG,len(aids))
fin.close()
aid_query=aids[0]
elif aid_query:
aids=[aid_query]
if get_compound:
if not ids: ErrorExit('ERROR: ID required\n'+USAGE)
GetCompound(BASE_URL,ids,ofmt,fout,verbose)
elif get_substance:
if not ids: ErrorExit('ERROR: ID required\n'+USAGE)
GetSubstance(BASE_URL,ids,ofmt,fout,verbose)
elif get_protein:
if not ids: ErrorExit('ERROR: ID required\n'+USAGE)
GetProtein(BASE_URL,ids,ofmt,fout,verbose)
elif get_gene:
if not ids: ErrorExit('ERROR: ID required\n'+USAGE)
GetGene(BASE_URL,ids,ofmt,fout,verbose)
elif get_biosystem:
if not ids: ErrorExit('ERROR: ID required\n'+USAGE)
GetBiosystem(BASE_URL,ids,ofmt,fout,verbose)
elif get_bioassay:
if not aids: ErrorExit('ERROR: AID required\n'+USAGE)
GetBioassay(BASE_URL,aids,ofmt,fout,verbose)
elif get_endpoint:
if not ids: ErrorExit('ERROR: ID required\n'+USAGE)
if not aids: ErrorExit('ERROR: AID required\n'+USAGE)
GetEndpoint(BASE_URL,ids,aids,ofmt,fout,verbose)
else:
ErrorExit('ERROR: no operation specified.\n'+USAGE)
```
#### File: pubchem/soap/pug_aids2assays.py
```python
import sys,os,re,urllib,time,getopt,tempfile,gzip
import pug_utils
PROG=os.path.basename(sys.argv[0])
PUGURL='http://pubchem.ncbi.nlm.nih.gov/pug/pug.cgi'
POLL_WAIT=10
MAX_WAIT=600
#############################################################################
def QueryPug_fetchassays(aids,format='csv',verbose=0):
qxml='''\
<PCT-Data><PCT-Data_input><PCT-InputData>
<PCT-InputData_query><PCT-Query><PCT-Query_type><PCT-QueryType>
<PCT-QueryType_bas><PCT-QueryAssayData>
'''
if format=='xml':
qxml+='<PCT-QueryAssayData_output value="assay-xml">1</PCT-QueryAssayData_output>'
else:
qxml+='<PCT-QueryAssayData_output value="csv">4</PCT-QueryAssayData_output>'
qxml+='''\
<PCT-QueryAssayData_aids><PCT-QueryUids><PCT-QueryUids_ids>
<PCT-ID-List>
<PCT-ID-List_db>pcassay</PCT-ID-List_db>
<PCT-ID-List_uids>
'''
for aid in aids:
qxml+=('<PCT-ID-List_uids_E>%d</PCT-ID-List_uids_E>\n'%aid)
qxml+='''\
</PCT-ID-List_uids>
</PCT-ID-List>
</PCT-QueryUids_ids></PCT-QueryUids></PCT-QueryAssayData_aids>
<PCT-QueryAssayData_dataset value="complete">0</PCT-QueryAssayData_dataset>
<PCT-QueryAssayData_focus>
<PCT-Assay-FocusOption>
<PCT-Assay-FocusOption_group-results-by value="substance">4</PCT-Assay-FocusOption_group-results-by>
</PCT-Assay-FocusOption>
</PCT-QueryAssayData_focus>
</PCT-QueryAssayData></PCT-QueryType_bas>
</PCT-QueryType></PCT-Query_type></PCT-Query></PCT-InputData_query>
</PCT-InputData></PCT-Data_input></PCT-Data>
'''
# <PCT-Assay-FocusOption_group-results-by value="compound">0</PCT-Assay-FocusOption_group-results-by>
if verbose:
print >>sys.stderr,'connecting %s...'%PUGURL
print >>sys.stderr,'submitting query for %d bioassays...'%len(aids)
f=pug_utils.UrlOpenTry(PUGURL,qxml)
pugxml=f.read()
f.close()
status,reqid,url,qkey,wenv,error=pug_utils.ParsePugXml(pugxml)
return status,reqid,url,error
#############################################################################
if __name__=='__main__':
n_chunk=1000;
usage='''
%(PROG)s - fetch PubChem bioassay files (CSV or XML) for AIDs
required:
--i=<infile> ... input IDs file
or
--aids=<IDs> ... input IDs, comma-separated
and
--o=<outfile> ... output file
options:
--format=<FMT> ... csv|xml [default via filename]
--nmax=<N> ... maximum N IDs
--skip=<N> ... skip 1st N IDs in file
--n_chunk=<N> ... IDs per PUG request [%(N_CHUNK)d]
--gz ... output gzipped [default via filename]
--v ... verbose
--vv ... very verbose
--h ... this help
'''%{'PROG':PROG,'N_CHUNK':n_chunk}
ifile=None; iaids=None; ofile=None; verbose=0; skip=0; nmax=0;
format=None; gz=False;
opts,pargs = getopt.getopt(sys.argv[1:],'',['h','v','vv','i=','in=',
'aids=','o=','out=','fmt=','format=','gz','gz','skip=','nmax=','n=',
'n_chunk='])
if not opts: pug_utils.ErrorExit(usage)
for (opt,val) in opts:
if opt=='--h': pug_utils.ErrorExit(usage)
elif opt in ('--i','--in'): ifile=val
elif opt=='--aids': iaids=val
elif opt in ('--o','--out'): ofile=val
elif opt in ('--n','--nmax'): nmax=int(val)
elif opt in ('--format','--fmt'): format=val
elif opt in ('--gzip','--gz'): gz=True
elif opt=='--skip': skip=int(val)
elif opt=='--n_chunk': n_chunk=int(val)
elif opt=='--vv': verbose=2
elif opt=='--v': verbose=1
else: pug_utils.ErrorExit('Illegal option: %s'%val)
if not ofile:
pug_utils.ErrorExit('--o required\n'+usage)
if not (ifile or iaids):
pug_utils.ErrorExit('--i or --aids required\n'+usage)
if (ofile[-3:]=='.gz'):
gz=True
ext=re.sub('^.*\.','',ofile[:-3])
else:
ext=re.sub('^.*\.','',ofile)
if not format: format=ext
if format not in ('csv','xml'):
pug_utils.ErrorExit('format "csv" or "xml" required\n'+usage)
if ifile:
faids=file(ifile)
if not faids:
pug_utils.ErrorExit('cannot open: %s\n%s'%(ifile,usage))
i=0; aids=[];
while True:
line=faids.readline()
if not line: break
line=line.strip()
if not line: continue
i+=1
if skip and i<=skip: continue
if nmax and i>nmax: break
try:
field=re.sub('\s.*$','',line) ## may be addl field
aid=int(field)
aids.append(aid)
except:
print >>sys.stderr,'cannot parse aid: "%s"'%line
continue
else:
buff=iaids
try:
buff=re.sub('\s','',buff)
aids=map(lambda x:int(x),re.split(',',buff))
except:
pug_utils.ErrorExit('cannot parse "--aids %s"'%iaids)
print >>sys.stderr,'aids read: %d'%len(aids)
if not gz:
fd,ofile_tmp=tempfile.mkstemp('.'+format+'.gz',PROG)
os.close(fd)
fout=file(ofile_tmp,'w')
else:
fout=file(ofile,'w')
if not fout:
pug_utils.ErrorExit('cannot open: %s\n%s'%(ofile,usage))
n=0; i_query=0; nbytes_total=0; n_query_total=0;
while n<len(aids):
i_query+=1
n_query=len(aids[n:n+n_chunk])
print >>sys.stderr,'Q%d. [%d-%d] of %d'%(i_query,n+1,n+n_query,len(aids))
status,reqid,url,error=QueryPug_fetchassays(aids[n:n+n_chunk],format,verbose)
pug_utils.CheckStatus(status,error)
if verbose:
print >>sys.stderr,'query submit status: %s'%status
if not url and not reqid:
pug_utils.ErrorExit('query submit ok but no ID; quitting.')
i_poll=0
while not url:
if i_poll*POLL_WAIT>MAX_WAIT:
pug_utils.PollPug(reqid,'cancel',verbose)
pug_utils.ErrorExit('max wait exceeded (%d sec); quitting.'%MAX_WAIT)
print >>sys.stderr,'polling PUG [ID=%d]...'%reqid
status,reqid,url,qkey,wenv,error = pug_utils.PollPug(reqid,'status',verbose)
pug_utils.CheckStatus(status,error)
if url: break
if verbose:
print >>sys.stderr,'%d sec wait (status=%s)...'%(POLL_WAIT,status)
time.sleep(POLL_WAIT)
i_poll+=1
if verbose:
print >>sys.stderr,'URL: %s'%url
print >>sys.stderr,'downloading to %s...'%ofile
nbytes=pug_utils.DownloadUrl(url,fout)
nbytes_total+=nbytes;
print >>sys.stderr,'assays: %d (%.2fMB)'%(n_query,nbytes/1e6)
n_query_total+=n_query
n+=n_chunk
fout.close()
if not gz:
ftmp=gzip.open(ofile_tmp,'rb')
fout=file(ofile,'w')
fout.write(ftmp.read())
ftmp.close()
fout.close()
os.unlink(ofile_tmp)
print >>sys.stderr,'%s: total assays: %d (%.2fMB)'%(PROG,n_query_total,nbytes_total/1e6)
sys.stderr.write('%s: format: %s'%(PROG,format))
if gz: sys.stderr.write('(gzipped)')
sys.stderr.write('\n')
```
#### File: pubchem/soap/pug_substance_search.py
```python
import sys,os,re,urllib,time,getopt,tempfile
import pug_utils
PROG=os.path.basename(sys.argv[0])
PUGURL='http://pubchem.ncbi.nlm.nih.gov/pug/pug.cgi'
POLL_WAIT=10
MAX_WAIT=300
SIM_CUTOFF=0.80
NMAX=0
#############################################################################
def QueryPug_qkey(qkey,webenv,fmt='smiles',gzip=False,verbose=0):
if gzip: compres='gzip'
else: compres='none'
qxml='''\
<PCT-Data><PCT-Data_input><PCT-InputData>
<PCT-InputData_download><PCT-Download>
<PCT-Download_uids><PCT-QueryUids>
<PCT-QueryUids_entrez><PCT-Entrez>
<PCT-Entrez_db>pcsubstance</PCT-Entrez_db>
<PCT-Entrez_query-key>%(QKEY)s</PCT-Entrez_query-key>
<PCT-Entrez_webenv>%(WEBENV)s</PCT-Entrez_webenv>
</PCT-Entrez></PCT-QueryUids_entrez>
</PCT-QueryUids></PCT-Download_uids>
<PCT-Download_format value="%(FMT)s"/>
<PCT-Download_compression value="%(COMPRES)s"/>
</PCT-Download></PCT-InputData_download>
</PCT-InputData></PCT-Data_input></PCT-Data>
'''%{'QKEY':qkey,'WEBENV':webenv,'FMT':fmt,'COMPRES':compres}
if verbose:
print >>sys.stderr,'connecting %s...'%PUGURL
print >>sys.stderr,'submitting query for qkey: %s ...'%qkey
f=pug_utils.UrlOpenTry(PUGURL,qxml)
pugxml=f.read()
f.close()
status,reqid,url,qkey,wenv,error=pug_utils.ParsePugXml(pugxml)
return status,reqid,url,qkey,wenv,error
#############################################################################
def QueryPug_struct(smiles,sim=False,sim_cutoff=0.8,active=False,mlsmr=False,nmax=10000,verbose=0):
qxml='''\
<PCT-Data><PCT-Data_input><PCT-InputData><PCT-InputData_query><PCT-Query>
<PCT-Query_type><PCT-QueryType><PCT-QueryType_css>
<PCT-QueryCompoundCS>
<PCT-QueryCompoundCS_query>
<PCT-QueryCompoundCS_query_data>%(SMILES)s</PCT-QueryCompoundCS_query_data>
</PCT-QueryCompoundCS_query>
<PCT-QueryCompoundCS_type>
'''%{'SMILES':smiles}
if sim:
qxml+='''\
<PCT-QueryCompoundCS_type_similar><PCT-CSSimilarity>
<PCT-CSSimilarity_threshold>%(SIM_CUTOFF)d</PCT-CSSimilarity_threshold>
</PCT-CSSimilarity></PCT-QueryCompoundCS_type_similar>
'''%{'SIM_CUTOFF':sim_cutoff*100}
else:
qxml+='''\
<PCT-QueryCompoundCS_type_subss><PCT-CSStructure>
<PCT-CSStructure_isotopes value="false"/>
<PCT-CSStructure_charges value="false"/>
<PCT-CSStructure_tautomers value="false"/>
<PCT-CSStructure_rings value="false"/>
<PCT-CSStructure_bonds value="true"/>
<PCT-CSStructure_chains value="true"/>
<PCT-CSStructure_hydrogen value="false"/>
</PCT-CSStructure></PCT-QueryCompoundCS_type_subss>
'''
qxml+='''\
</PCT-QueryCompoundCS_type>
<PCT-QueryCompoundCS_results>%(NMAX)d</PCT-QueryCompoundCS_results>
</PCT-QueryCompoundCS>
</PCT-QueryType_css></PCT-QueryType>
'''%{'NMAX':nmax}
if active:
qxml+='''\
<PCT-QueryType><PCT-QueryType_cel><PCT-QueryCompoundEL>
<PCT-QueryCompoundEL_activity>
<PCT-CLByBioActivity value="active">2</PCT-CLByBioActivity>
</PCT-QueryCompoundEL_activity>
</PCT-QueryCompoundEL></PCT-QueryType_cel></PCT-QueryType>
'''
if mlsmr:
qxml+='''\
<PCT-QueryType><PCT-QueryType_cel><PCT-QueryCompoundEL>
<PCT-QueryCompoundEL_source><PCT-CLByString>
<PCT-CLByString_qualifier value="must">1</PCT-CLByString_qualifier>
<PCT-CLByString_category>MLSMR</PCT-CLByString_category>
</PCT-CLByString></PCT-QueryCompoundEL_source>
</PCT-QueryCompoundEL></PCT-QueryType_cel></PCT-QueryType>
'''
qxml+='''\
</PCT-Query_type></PCT-Query>
</PCT-InputData_query></PCT-InputData></PCT-Data_input></PCT-Data>
'''
if verbose:
print >>sys.stderr,'connecting %s...'%PUGURL
if sim:
print >>sys.stderr,'query (%f): %s ...'%(sim_cutoff,smiles)
else:
print >>sys.stderr,'substructure query: %s ...'%smiles
f=pug_utils.UrlOpenTry(PUGURL,qxml)
pugxml=f.read()
f.close()
status,reqid,url,qkey,wenv,error=pug_utils.ParsePugXml(pugxml)
return status,reqid,qkey,wenv,error
#############################################################################
def ElapsedTime(t_start):
t_elapsed=time.time()-t_start
tsec=t_elapsed%60
tmin=t_elapsed/60
return ("%d:%02d"%(tmin,tsec))
#############################################################################
if __name__=='__main__':
usage='''
%(PROG)s - PubChem substructure or similarity search, fetch smiles
required:
--name_substr=<str> ... name substring query
--o=<OUTSMIS> ... output smiles file (w/ CIDs)
--fmt=<FMT> ... smiles|sdf [default via filename]
--gz ... output gzipped [default via filename]
--out_sids=<OUTSIDS> ... output SIDs (only w/ smiles output)
--active ... active mols only (in any assay)
--mlsmr ... MLSMR mols only
--nmax ... max hits returned
--max_wait ... max wait for query
--v ... verbose
--vv ... very verbose
--h ... this help
'''%{'PROG':PROG,'SIM_CUTOFF':SIM_CUTOFF}
def ErrorExit(msg):
print >>sys.stderr,msg
sys.exit(1)
smiles=None; ofile=None; verbose=0;
out_cids_file=None; similarity=False; active=False; mlsmr=False;
fmt=None; gzip=False;
opts,pargs=getopt.getopt(sys.argv[1:],'',['h','v','vv','smi=','smiles=',
'o=','out=','nmax=','n=','sim_cutoff=','similarity','sim','out_cids=',
'fmt=','gz','max_wait=','active','mlsmr'])
if not opts: ErrorExit(usage)
for (opt,val) in opts:
if opt=='--h': ErrorExit(usage)
elif opt in ('--smiles','--smi'): smiles=val
elif opt in ('--o','--out'): ofile=val
elif opt in ('--fmt','--format'): fmt=val
elif opt=='--out_cids': out_cids_file=val
elif opt in ('--similarity','--sim'): similarity=True
elif opt=='--active': active=True
elif opt=='--mlsmr': mlsmr=True
elif opt=='--sim_cutoff': SIM_CUTOFF=float(val)
elif opt=='--max_wait': MAX_WAIT=int(val)
elif opt in ('--n','--nmax'): NMAX=int(val)
elif opt=='--vv': verbose=2
elif opt=='--v': verbose=1
else: ErrorExit('Illegal option: %s'%val)
if not smiles:
ErrorExit('smiles required\n'+usage)
if verbose:
print >>sys.stderr,'query: "%s"'%smiles
if ofile:
fout=file(ofile,'wb')
if not fout:
ErrorExit('cannot open: %s\n%s'%(ofile,usage))
if (ofile[-3:]=='.gz'):
gzip=True
ext=re.sub('^.*\.','',ofile[:-3])
else:
ext=re.sub('^.*\.','',ofile)
if not fmt: fmt=ext
if fmt=='smi': fmt='smiles'
if fmt not in ('smiles','sdf'):
ErrorExit('format "smiles" or "sdf" required\n'+usage)
if out_cids_file:
if fmt=='sdf':
ErrorExit('--out_cids not allowed with SDF output.\n'+usage)
fout_cids=file(out_cids_file,'wb')
if not fout_cids:
ErrorExit('cannot open: %s\n%s'%(out_cids_file,usage))
t0=time.time() ## start timer
status,reqid,qkey,wenv,error=QueryPug_struct(smiles,similarity,SIM_CUTOFF,active,mlsmr,NMAX,verbose)
if status not in ('success','queued','running'):
ErrorExit('query failed; quitting (status=%s,error="%s").'%(status,error))
if verbose:
print >>sys.stderr,'query status: %s'%status
if not reqid and not qkey:
ErrorExit('query ok but no ID; quitting.')
i_poll=0
while not qkey:
if time.time()-t0>MAX_WAIT:
pug_utils.PollPug(reqid,'cancel',verbose)
ErrorExit('max wait exceeded (%d sec); quitting.'%MAX_WAIT)
print >>sys.stderr,'polling PUG [ID=%d]...'%reqid
status,reqid_new,url,qkey,wenv,error=pug_utils.PollPug(reqid,'status',verbose)
if reqid_new: reqid=reqid_new
if qkey: break
if verbose:
print >>sys.stderr,'%s elapsed; %d sec wait; status=%s'%(ElapsedTime(t0),POLL_WAIT,status)
print >>sys.stderr,'%s'%(error)
if re.search('No result found',error,re.I):
ErrorExit(error)
time.sleep(POLL_WAIT)
i_poll+=1
status,reqid,url,qkey,wenv,error=QueryPug_qkey(qkey,wenv,fmt,gzip,verbose)
i_poll=0
while not url:
if time.time()-t0>MAX_WAIT:
pug_utils.PollPug(reqid,'cancel',verbose)
ErrorExit('max wait exceeded (%d sec); quitting.'%MAX_WAIT)
print >>sys.stderr,'polling PUG [ID=%d]...'%reqid
status,reqid,url,qkey,wenv,error=pug_utils.PollPug(reqid,'status',verbose)
if url: break
if verbose:
print >>sys.stderr,'%s elapsed; %d sec wait; status=%s'%(ElapsedTime(t0),POLL_WAIT,status)
time.sleep(POLL_WAIT)
i_poll+=1
if verbose:
print >>sys.stderr,'query elapsed time: %s'%(ElapsedTime(t0))
if ofile:
if verbose:
print >>sys.stderr,'URL: %s'%url
print >>sys.stderr,'downloading to %s...'%ofile
fd,tmpfile=tempfile.mkstemp('.txt',PROG)
f=os.fdopen(fd,"w")
nbytes=pug_utils.DownloadUrl(url,f)
f.close()
if fmt=='smiles':
ftmp=open(tmpfile)
n=0
while True:
line=ftmp.readline()
if not line: break
fields=line.split()
if ofile: fout.write("%s %s\n"%(fields[1],fields[0]))
if out_cids_file: fout_cids.write("%s\n"%(fields[0]))
n+=1
print >>sys.stderr,'compounds: %d'%(n)
ftmp.close()
else:
os.system("cp %s %s"%(tmpfile,ofile))
os.unlink(tmpfile)
sys.stderr.write('%s: format: %s'%(PROG,fmt))
if gzip: sys.stderr.write('(gzipped)')
sys.stderr.write('\n')
if ofile:
fout.close()
nbytes=os.stat(ofile).st_size
print >>sys.stderr,'%s (%.2fMB)'%(ofile,nbytes/1e6)
if out_cids_file:
fout_cids.close()
nbytes=os.stat(out_cids_file).st_size
print >>sys.stderr,'%s (%.2fMB)'%(out_cids_file,nbytes/1e6)
```
#### File: BioClients/tcga/Utils.py
```python
import sys,os,re,json,time,logging
from ..util import rest
#
##############################################################################
def ListProjects(base_url, skip, nmax, fout):
n_out=0; tags=None;
from_next=skip; size=100;
while True:
url_next = (base_url+'/projects?from={0}&size={1}'.format(from_next, size))
rval = rest.Utils.GetURL(url_next, parse_json=True)
projects = rval["data"]["hits"] if "data" in rval and "hits" in rval["data"] else []
for project in projects:
logging.debug(json.dumps(project, indent=2))
if not tags:
tags = list(project.keys())
fout.write("\t".join(tags)+"\n")
vals = [(str(project[tag]) if tag in project else "") for tag in tags]
fout.write("\t".join(vals)+"\n")
n_out+=1
if n_out>=nmax: break
if n_out>=nmax: break
total = rval["data"]["pagination"]["total"] if "data" in rval and "pagination" in rval["data"] and "total" in rval["data"]["pagination"] else None
count = rval["data"]["pagination"]["count"] if "data" in rval and "pagination" in rval["data"] and "count" in rval["data"]["pagination"] else None
if not count or count<size: break
from_next += count
logging.info("n_out: %d / %d"%(n_out, total))
##############################################################################
def ListCases(base_url, skip, nmax, fout):
n_out=0; tags=None;
from_next=skip; size=100;
while True:
url_next = (base_url+'/cases?from={0}&size={1}'.format(from_next, size))
rval = rest.Utils.GetURL(url_next, parse_json=True)
cases = rval["data"]["hits"] if "data" in rval and "hits" in rval["data"] else []
for case in cases:
logging.debug(json.dumps(case, indent=2))
if not tags:
tags = list(case.keys())
fout.write("\t".join(tags)+"\n")
vals = [(str(case[tag]) if tag in case else "") for tag in tags]
fout.write("\t".join(vals)+"\n")
n_out+=1
if n_out>=nmax: break
if n_out>=nmax: break
total = rval["data"]["pagination"]["total"] if "data" in rval and "pagination" in rval["data"] and "total" in rval["data"]["pagination"] else None
count = rval["data"]["pagination"]["count"] if "data" in rval and "pagination" in rval["data"] and "count" in rval["data"]["pagination"] else None
if not count or count<size: break
from_next += count
logging.info("n_out: %d / %d"%(n_out, total))
##############################################################################
def ListFiles(base_url, skip, nmax, fout):
n_out=0; tags=None;
from_next=skip; size=100;
while True:
url_next = (base_url+'/files?from={0}&size={1}'.format(from_next, size))
rval = rest.Utils.GetURL(url_next, parse_json=True)
files = rval["data"]["hits"] if "data" in rval and "hits" in rval["data"] else []
for file_this in files:
logging.debug(json.dumps(file_this, indent=2))
if not tags:
tags = list(file_this.keys())
fout.write("\t".join(tags)+"\n")
vals = [(str(file_this[tag]) if tag in file_this else "") for tag in tags]
fout.write("\t".join(vals)+"\n")
n_out+=1
if n_out>=nmax: break
if n_out>=nmax: break
total = rval["data"]["pagination"]["total"] if "data" in rval and "pagination" in rval["data"] and "total" in rval["data"]["pagination"] else None
count = rval["data"]["pagination"]["count"] if "data" in rval and "pagination" in rval["data"] and "count" in rval["data"]["pagination"] else None
if not count or count<size: break
from_next += count
logging.info("n_out: %d / %d"%(n_out, total))
##############################################################################
def ListAnnotations(base_url, skip, nmax, fout):
n_out=0; tags=None;
from_next=skip; size=100;
while True:
url_next = (base_url+'/annotations?from={0}&size={1}'.format(from_next, size))
rval = rest.Utils.GetURL(url_next, parse_json=True)
annos = rval["data"]["hits"] if "data" in rval and "hits" in rval["data"] else []
for anno in annos:
logging.debug(json.dumps(anno, indent=2))
if not tags:
tags = list(anno.keys())
fout.write("\t".join(tags)+"\n")
vals = [(str(anno[tag]) if tag in anno else "") for tag in tags]
fout.write("\t".join(vals)+"\n")
n_out+=1
if n_out>=nmax: break
if n_out>=nmax: break
total = rval["data"]["pagination"]["total"] if "data" in rval and "pagination" in rval["data"] and "total" in rval["data"]["pagination"] else None
count = rval["data"]["pagination"]["count"] if "data" in rval and "pagination" in rval["data"] and "count" in rval["data"]["pagination"] else None
if not count or count<size: break
from_next += count
logging.info("n_out: %d / %d"%(n_out, total))
##############################################################################
```
|
{
"source": "jeremykenedy/swdc-sublime",
"score": 2
}
|
#### File: swdc-sublime/lib/SoftwareHttp.py
```python
import http
import json
import sublime_plugin, sublime
from .SoftwareSettings import *
USER_AGENT = 'Code Time Sublime Plugin'
lastMsg = None
windowView = None
def httpLog(message):
if (getValue("software_logging_on", True)):
print(message)
def redispayStatus():
global lastMsg
showStatus(lastMsg)
def toggleStatus():
global lastMsg
showStatusVal = getValue("show_code_time_status", True)
if (showStatusVal is True):
showStatus(lastMsg)
else:
# show clock icon unicode
showStatus("⏱")
# update the status bar message
def showStatus(msg):
global lastMsg
try:
active_window = sublime.active_window()
showStatusVal = getValue("show_code_time_status", True)
if (showStatusVal is False):
msg = "⏱"
else:
lastMsg = msg
if (active_window is not None):
for view in active_window.views():
if (view is not None):
view.set_status('software.com', msg)
except RuntimeError:
httpLog(msg)
def isResponsOk(response):
if (response is not None and int(response.status) < 300):
return True
return False
def isUnauthenticated(response):
if (response is not None and int(response.status) == 401):
return True
return False
# send the request.
def requestIt(method, api, payload, jwt):
api_endpoint = getValue("software_api_endpoint", "api.software.com")
telemetry = getValue("software_telemetry_on", True)
if (telemetry is False):
# httpLog("Code Time: telemetry is currently paused. To see your coding data in Software.com, enable software telemetry.")
return None
# try to update kpm data.
try:
connection = None
# create the connection
if ('localhost' in api_endpoint):
connection = http.client.HTTPConnection(api_endpoint)
else:
connection = http.client.HTTPSConnection(api_endpoint)
headers = {'Content-Type': 'application/json', 'User-Agent': USER_AGENT}
if (jwt is not None):
headers['Authorization'] = jwt
elif (method is 'POST' and jwt is None):
httpLog("Code Time: no auth token available to post kpm data: %s" % payload)
return None
# make the request
if (payload is None):
payload = {}
httpLog("Code Time: Requesting [" + method + ": " + api_endpoint + "" + api + "]")
else:
httpLog("Code Time: Sending [" + method + ": " + api_endpoint + "" + api + ", headers: " + json.dumps(headers) + "] payload: %s" % payload)
# send the request
connection.request(method, api, payload, headers)
response = connection.getresponse()
# httpLog("Code Time: " + api_endpoint + "" + api + " Response (%d)" % response.status)
return response
except Exception as ex:
print("Code Time: " + api + " Network error: %s" % ex)
return None
```
|
{
"source": "jeremykerr/equanimous-tribble",
"score": 3
}
|
#### File: equanimous-tribble/examples/hello.py
```python
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello_world():
return "Hello, world."
if __name__ == "__main__":
# Debug should be set to False on production machines. Setting Debug mode to True allows for a security
# issue that can be used to execute arbitrary code. See the Flask documentation for more information.
app.debug = False
app.run(host='0.0.0.0')
```
|
{
"source": "jeremykid/Algorithm_project",
"score": 4
}
|
#### File: jeremykid/Algorithm_project/euler_trivial.py
```python
import math
import time
def primeGenerate(number):
largest = number
# largest = int(math.sqrt(number)+1)
prime_list = largest*[1]
if (number<4):
return [2,3]
prime_list[1] = 0
for i in range(0,largest,2):
prime_list[i] = 0
prime_list[2] = 1
for i in range(3,largest,2):
if (prime_list[i] == 1):
for j in range(2*i,largest,i):
prime_list[j] == 1
result = []
for i in range(0,number):
if(prime_list[i] == 1):
result.append(i)
return result
def trial_division(n):
"""Return a list of the prime factors for a natural number."""
if n < 2:
return []
prime_factors_list = []
temp = primeGenerate(int(n**0.5) + 1)
for p in temp:
if p*p > n:
break
while n % p == 0:
prime_factors_list.append(p)
n //= p
if n > 1:
prime_factors_list.append(n)
return prime_factors_list
def gcd(a,b):
while (b != 0):
temp = a%b
a = b
b = temp
return a
def perfectsquare(n):
return n % n**0.5 == 0
def find2sqrtroot(number):
largest = int(math.sqrt(number//2))
result = []
length = 0
# print number,largest
for i in range(largest+1):
temp2 = number - i*i
if (perfectsquare(temp2)):
result.append(i)
result.append(math.sqrt(temp2))
length+=1
if (length == 2):
break
return result
def euler(number):
timeC = time.time()
result = find2sqrtroot(number)
timeD = time.time()
global timeinroot2
timeinroot2 = timeinroot2 + timeD - timeC
if (len(result) != 4):
# print (number)
temp = trial_division(number)
# print temp
prime_factors.extend(temp)
return
else:
b = result[0]
a = result[1]
d = result[2]
c = result[3]
k = gcd(a-c,d-b)
h = gcd(a+c,d+b)
l = (a-c)/k
m = (d-b)/k
# m = gcd(a+c,d-b)
# l = gcd(a-c,d+b)
factor1 = (k/2)**2 + (h/2)**2
factor2 = l**2 + m**2
# print (a,b,c,d)
# print (l,m,k,h)
# print factor1*factor2
euler(factor1)
euler(factor2)
def runner():
global prime_factors
global timeinroot2
testcases = int(input("How many Testcases: "))
for i in range(testcases):
prime_factors = []
timeinroot2 = 0
timeA = time.time()
number = int(input("number:"))
# print find2sqrtroot(number)
euler(number)
print (prime_factors)
timeB = time.time()
print (timeinroot2)
print (timeB - timeA - timeinroot2)
```
|
{
"source": "jeremykid/FunAlgorithm",
"score": 2
}
|
#### File: FunAlgorithm/faceRegonization/views.py
```python
def my_view(request):
if request.method == 'POST':
import set_gpio
#Do your stuff ,calling whatever you want from set_gpio.py
return "1"#Something, normally a HTTPResponse, using django""
```
#### File: FunAlgorithm/Hankson/Hankson.py
```python
import math
def findFactor(b0):
temp = b0
tempSqrt = math.sqrt(temp)
returnArray = []
i = 2
while (i<temp):
if temp%i == 0:
temp = temp/i
returnArray.append(i)
tempSqrt = math.sqrt(temp)
i = 2
else:
i += 1
if temp != 1:
returnArray.append(temp)
return returnArray
def findDivision(value):
valueSqrt = math.sqrt(value)
i = 2
returnArray = [value,1]
while i<valueSqrt:
if value%i == 0:
returnArray.append(i)
returnArray.append(value/i)
i += 1
return returnArray
def gcd(a, b):
while b:
a, b = b, a%b
return a
def main():
times = int(input(""))
for i in range(times):
temp = raw_input("")
tempArray = temp.split(" ")
a0,a1,b0,b1 = int(tempArray[0]),int(tempArray[1]),int(tempArray[2]),int(tempArray[3])
returnArray = findFactor(b0)
resultFactor = b1/b0
newb0 = b0
gcdB = 1
for i in findFactor(resultFactor):
resultPoints = returnArray.count(i)
gcdB *= i**resultPoints
newb0= newb0/(i**resultPoints)
returnDevision = findDivision(newb0)
returnArray = [i*gcdB*resultFactor for i in returnDevision]
returnArray = [i for i in returnArray if gcd(i,a0)==a1]
print len(returnArray)
main()
```
#### File: FunAlgorithm/leetcode/1700_Number_of_Students_Unable_to_Eat_Lunch_Easy.py
```python
class Solution:
def countStudents(self, students: List[int], sandwiches: List[int]) -> int:
# method 1
count = 0
while students:
if students[0] == sandwiches[0]:
students.pop(0)
sandwiches.pop(0)
count = 0
elif count > len(students): # one loop
break
else:
x = students.pop(0) #student go to the end of line
students.append(x)
count += 1 # count one sandwiches don't like
```
#### File: FunAlgorithm/leetcode/1_Two_Sum_Easy.py
```python
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
# method with sort alogrithm
'''
nums.sort()
start = 0
end = len(nums) - 1
while start < end:
if (nums[start] + nums[end] == target):
return [start, end]
elif (nums[start] + nums[end] > target):
end -= 1
else: #(nums[start] + nums[end] < target):
start += 1
'''
# method O(n)
dir = {}
for i in range(len(nums)):
difference = target - nums[i]
if difference in dir:
return [dir[difference], i]
dir[nums[i]] = i
```
#### File: FunAlgorithm/leetcode/23_Merge_k_Sorted_Lists_Hard.py
```python
import heapq
# from Queue import PriorityQueue
class Solution:
# method 2
def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:
result = point = ListNode(0)
min_heap = []
for list_index in range(len(lists)):
list_node = lists[list_index]
if list_node:
heappush(min_heap, (list_node.val, list_index))
while len(min_heap) > 0:
val, list_index = heappop(min_heap)
point.next = ListNode(val)
point = point.next
list_node = lists[list_index]
lists[list_index] = list_node = list_node.next
if list_node:
heappush(min_heap, (list_node.val, list_index))
return result.next
# method 1
'''
def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:
if len(lists) == 0:
return None
# Create a min heap
min_heap = []
for list_node in lists:
if list_node != None:
heappush(min_heap, list_node.val)
if len(min_heap) == 0:
return None
# inital min heap with the first element in each listnode
min_value = heappop(min_heap)
for list_index in range(len(lists)):
list_node = lists[list_index]
if list_node != None and min_value == list_node.val:
lists[list_index] = list_node.next
if lists[list_index] != None:
heappush(min_heap, lists[list_index].val)
break
result_listnode = ListNode(min_value)
self.recursive_mergeKLists(lists, min_heap, result_listnode)
return result_listnode
def recursive_mergeKLists(self, lists, min_heap, result_listnode):
if len(min_heap) == 0:
return
min_value = heappop(min_heap)
for list_index in range(len(lists)):
list_node = lists[list_index]
if list_node != None and min_value == list_node.val:
lists[list_index] = list_node.next
if lists[list_index] != None:
heappush(min_heap, lists[list_index].val)
break
result_listnode.next = ListNode(min_value)
result_listnode = result_listnode.next
return self.recursive_mergeKLists(lists, min_heap, result_listnode)
'''
```
#### File: FunAlgorithm/leetcode/2_Add_Two_Numbers_Medium.py
```python
class Solution:
def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:
return self.recursive_add_numbers(l1, l2, 0)
def recursive_add_numbers(self,
l1: Optional[ListNode],
l2: Optional[ListNode],
carry: int) -> Optional[ListNode]:
if l1 == None and l2 == None:
if carry == 1:
return ListNode(1)
else:
return None
elif l1 == None:
l2.val += carry
if l2.val//10 == 1:
result = ListNode(val = l2.val%10)
result.next = self.recursive_add_numbers(l1, l2.next, 1)
return result
else:
return l2
elif l2 == None:
l1.val += carry
if l1.val//10 == 1:
result = ListNode(val = l1.val%10)
result.next = self.recursive_add_numbers(l1.next, l2, 1)
return result
else:
return l1
else:
total = l1.val + l2.val + carry
carry = total//10
# print (total, carry)
result = ListNode(val = total%10)
result.next = self.recursive_add_numbers(l1.next, l2.next, carry)
return result
```
#### File: FunAlgorithm/leetcode/530_Minimum_Absolute_Difference_in_BST_Easy.py
```python
import math
class Solution:
def getMinimumDifference(self, root: Optional[TreeNode]) -> int:
self.prev = float('-inf')
self.diff = float('inf')
self.inorder(root)
return self.diff
def inorder(self, node):
if node:
self.inorder(node.left)
self.diff = min(self.diff,abs(node.val - self.prev))
self.prev = node.val
self.inorder(node.right)
```
#### File: python_practice/data_structure/binary_search.py
```python
def binarySearchList(space_list, target):
space_list.sort()
space_list_length = len(space_list)
lo = 0
hi = space_list_length
while lo < hi:
mid = (lo+hi)//2
if space_list[mid] == target:
return mid
else if space_list[mid] > target:
hi = mid-1
else:
lo = mid+1
if space_list[lo] == target:
return lo
return -1
```
#### File: data_structure/linked_list/LinkedList.py
```python
from node import Node
class linked_list():
def __init__(self):
self.head = None
def set_head(self, head_node):
self.head = head_node
def size(self):
count = 0
current = self.head
while current:
count += 1
current = current.get_next()
return count
def __str__(self):
current = self.head
output = ""
while current:
output += str(current) + " -> "
current = current.get_next()
output += " End "
return output
def empty(self):
count = 0
if (self.head):
return False
else:
return True
# Pops an item from the front of the list
def pop_front(self):
if self.head:
self.head = self.head.get_next()
else:
raise IndexError("Unable to pop from empty list")
# params: node value
# return: int, index of the node value in the linked list
def value_at(self,index):
count = 0
current = self.head
while (count < index and current):
count += 1
current = current.get_next()
if current:
return current
else:
return "Out of Index"
# params: node value
# return: bool true: exist that value in the linked list false: does not exist
def exist(self, node_value):
result = False
current = self.head
while (result == False and current):
if current.get_data() == node_value:
result = True
current = current.get_next()
return result
# params: node value
# return:
def push_front(self, node_value):
new_node = Node(node_value)
new_node.set_next(self.head)
self.head = new_node
def push_back(self, node_value):
new_node = Node(node_value)
current = self.head
while current.get_next():
current = current.get_next()
current.get_next().set_next(new_node)
# delete the last and return the end node value
def pop_back(self):
current = self.head
while current.get_next().get_next():
current = current.get_next()
result = current.get_next()
current.set_next(None)
return result
# return: the front node
def front(self):
return self.head
# return: the end node
def back(self):
while current.get_next():
current = current.get_next()
return current
# params: index of the item which need to be erase
#
def erase(self, index):
count = 0
current = self.head
if (index == 0):
self.head = self.head.get_next()
else:
while (count < index-1 and current):
current = current.get_next()
if (count == index-1):
current.set_next(current.get_next().get_next())
else:
print "Index out of range"
def insert(self, index, node_value):
if (index == 0):
temp = self.head
self.head = node(node_value)
self.head.set_next(temp)
else:
count = 0
current = self.head
while (count == index-1 and current):
count += 1
current = current.get_next()
if (count == index-1):
temp_next = current.get_next()
temp = node(node_value)
current.set_next(temp)
temp.set_next(temp_next)
else:
print "index out of range"
# a->b->c->end
def reverse(self):
current = self.head#a
temp_next = current.get_next()#b
if temp_next == None:
return self
current.set_next(None)#a->end
while temp_next.get_next():
temp_node = temp_next.get_next()
temp_next.set_next(current)
current = temp_next
temp_next = temp_node
temp_next.set_next(current)
self.head = temp_next
return self
def remove_value(self, value):
current =self.head
if current == value: #for the case the head need to be remove
self.head = current.get_next()
return 1
while (current.get_next() != value and current.get_next()):
current = current.get_next()
if (current.get_next() == value):
temp_next = current.get_next().get_next()
current.set_next(temp_next)
return 1
else:
return -1
def value_n_from_end(self, n):
linked_list_length = self.size()
return self.value_at(linked_list_length - n - 1)
```
#### File: python_practice/leetCode/leetcode-12.py
```python
class Solution:
def intToRoman(self, num):
"""
:type num: int
:rtype: str
"""
res = ""
while num:
if num >= 1000:
res += (num//1000) * 'M'
num = num%1000
elif num >= 900:
res += 'CM'
num -= 900
elif num >= 500:
res += 'D'
num -= 500
elif num >= 400:
res += 'CD'
num -= 400
elif num >= 100:
res += (num//100) * 'C'
num = num%100
elif num >= 90:
res += 'XC'
num -= 90
elif num >= 50:
res += 'L'
num -= 50
elif num >= 40:
res += 'XL'
num -= 40
elif num >= 10:
res += (num//10) * 'X'
num = num%10
elif num >= 9:
res += 'IX'
num -= 9
elif num >= 5:
res += 'V'
num -= 5
elif num >= 4:
res += 'IV'
num -= 4
elif num >= 1:
res += (num//1) * 'I'
num = 0
return res
```
#### File: python_practice/leetCode/leetcode-437.py
```python
class Solution:
def pathSum(self, root: 'TreeNode', sum: 'int') -> 'List[List[int]]':
return self.hasPathSum(root, sum, sum, 0)
def hasPathSum(self, root: 'TreeNode', sum: 'int', originalSum: 'int', dirty: 'int'):
if root == None:
return 0
elif root.right == None and root.left == None :
return int(sum == root.val)
else:
result = int(sum == root.val) + self.hasPathSum(root.left, sum - root.val, originalSum, 1) + self.hasPathSum(root.right, sum - root.val, originalSum, 1)
if (not dirty):
result = result + self.hasPathSum(root.left, originalSum, originalSum, 0) + self.hasPathSum(root.right, originalSum, originalSum, 0)
return result
```
#### File: FunAlgorithm/python_practice/NearestNeighbor.py
```python
import numpy as np
class NearestNeighbor(object):
def __init__(self):
pass
def train(self, X, y):
""" X is N x D where each row is an example. Y is 1-dimension of size N """
# the nearest neighbor classifier simply remembers all the training data
self.Xtr = X
self.ytr = y
def predict(self, X):
""" X is N x D where each row is an example we wish to predict label for """
num_test = X.shape[0]
# lets make sure that the output type matches the input type
Ypred = np.zeros(num_test, dtype = self.ytr.dtype)
# loop over all test rows
for i in xrange(num_test):
# find the nearest training image to the i'th test image
# using the L1 distance (sum of absolute value differences)
distances = np.sum(np.abs(self.Xtr - X[i,:]), axis = 1)
min_index = np.argmin(distances) # get the index with smallest distance
Ypred[i] = self.ytr[min_index] # predict the label of the nearest example
return Ypred
```
|
{
"source": "jeremykid/HIV_project",
"score": 3
}
|
#### File: jeremykid/HIV_project/calculate_trimer_coordinates.py
```python
import math
import pointsPlot
import linesPlot
import time
from HIV_point import Point
global T_points_file
T_points_file = open("T_points","w")
global T_y_points_array
T_y_points_array = []
global T_points_count
T_points_count = 0
global T_x_points_array
T_x_points_array = []
def distance(point_x, point_y):
'''
To caculate the distance between point_x and point_y
'''
x2 = (point_x.x-point_y.x)**2
y2 = (point_x.y-point_y.y)**2
z2 = (point_x.z-point_y.z)**2
return math.sqrt(x2+y2+z2)
def unit(point_z):
'''
To caculate the unit vector of point_z
'''
# print (point_z)
magnitude = math.sqrt(point_z.x**2 + point_z.y**2 + point_z.z**2)
if magnitude:
point_z.x = point_z.x/magnitude
point_z.y = point_z.y/magnitude
point_z.z = point_z.z/magnitude
return point_z,magnitude
else:
return point_z,magnitude
def rotate120(point_x, point_y):
'''
point_x rotate about point_y
'''
sin120 = math.sqrt(3)/2
cos120 = -0.5
pointz = Point(point_y.x, point_y.y, point_y.z)
newpoint,magnitude = unit(pointz)
x = (cos120+newpoint.x**2*(1-cos120)) * point_x.x +\
(newpoint.x*newpoint.y*(1-cos120) - newpoint.z*sin120) * point_x.y +\
(newpoint.x*newpoint.z*(1-cos120) + newpoint.y*sin120) * point_x.z
y = (newpoint.y*newpoint.x*(1-cos120) + newpoint.z*sin120) * point_x.x +\
(cos120 + newpoint.y**2*(1-cos120)) * point_x.y +\
(newpoint.y*newpoint.z*(1-cos120) - newpoint.x*sin120) * point_x.z
z = (newpoint.z*newpoint.x*(1-cos120) - newpoint.y*sin120) * point_x.x +\
(newpoint.z*newpoint.y*(1-cos120) + newpoint.x*sin120) * point_x.y +\
(cos120 + newpoint.z**2*(1-cos120))*point_x.z
result_point = Point(x,y,z)
return result_point
def rotate240(point_x,point_y):
'''
point_x rotate about point_y 240 degree
'''
sin240 = -math.sqrt(3)/2
cos240 = -0.5
pointz = Point(point_y.x, point_y.y, point_y.z)
newpoint,magnitude = unit(pointz)
x = (cos240+newpoint.x**2*(1-cos240)) * point_x.x +\
(newpoint.x*newpoint.y*(1-cos240) - newpoint.z*sin240) * point_x.y +\
(newpoint.x*newpoint.z*(1-cos240) + newpoint.y*sin240) * point_x.z
y = (newpoint.y*newpoint.x*(1-cos240) + newpoint.z*sin240) * point_x.x +\
(cos240 + newpoint.y**2*(1-cos240)) * point_x.y +\
(newpoint.y*newpoint.z*(1-cos240) - newpoint.x*sin240) * point_x.z
z = (newpoint.z*newpoint.x*(1-cos240) - newpoint.y*sin240) * point_x.x +\
(newpoint.z*newpoint.y*(1-cos240) + newpoint.x*sin240) * point_x.y +\
(cos240 + newpoint.z**2*(1-cos240))*point_x.z
result_point = Point(x,y,z)
return result_point
def get_root_index(index):
'''
According to algorithm, the index x is the root of branch 2x+1 and 2x+2
'''
if index == 2:
return 1
elif index%2:
return (index-1)/2
else:
return (index-2)/2
def check_distance(r,point_x,point_y,check_T_points=0):
'''
return the distance between point_x and point_y
'''
distance = math.sqrt((point_x.x-point_y.x)**2+(point_x.y-point_y.y)**2+(point_x.z-point_y.z)**2)
#print (distance)# 52.076992648 52.0769926525
if distance >= (r-0.001):
if check_T_points:
if distance < 52.1 and (point_y.index-1)//2 != point_x.index:
global T_points_file
T_points_file.write(str(point_y)+"\n")
T_points_file.write(str(point_x)+"\n")
global T_y_points_array
T_y_points_array.append(point_y)
global T_x_points_array
T_x_points_array.append(point_x)
global T_points_count
T_points_count += 1
else:
return True
else:
return False
def check_overlap(r,point_x,point_y):
'''
Check the distance between two center of trimer is less than the r prime. r prime = 71.4
'''
distance = math.sqrt((point_x.x-point_y.x)**2+(point_x.y-point_y.y)**2+(point_x.z-point_y.z)**2)
if distance <= 71.4:
return 1
else:
return 0
def check_everydistance(r,point_x,K_keepers):
'''
check the distances between each points in K_keepers and point_x
'''
for i in K_keepers:
if not check_distance(r,i,point_x):
return False
for i in K_keepers:
check_distance(r,i, point_x, check_T_points=1)
count = 0
for i in K_keepers:
count += check_overlap(r,i, point_x)
if count != 1:
return False
return True
def algorithm(R, r, writeInFile=True):
# The IO for researcher to input the R
# r = 52.0769942809
cosphi = (2*R**2-r**2)/(2*R**2)
sinphi = math.sqrt(1-cosphi**2)
K_dict = {}
K_dict[1] = Point(0,0,R,1)
K_dict[2] = Point(sinphi*R,0,cosphi*R)
K_dict[2].editIndex(2)
K_dict[3] = rotate120(K_dict[2], K_dict[1])
K_dict[3].editIndex(3)
K_dict[4] = rotate240(K_dict[2], K_dict[1])
K_dict[4].editIndex(4)
K_queue = [2,3,4]
K_keepers = [K_dict[1],K_dict[2],K_dict[3],K_dict[4]]
K_index = 5
K_count = 4
# open the file to write, w is write the points' coordinations
if writeInFile:
w = open("points","w")
w_plot = open("lines","w")
# To write the first 4 points' coordination the file "points",
for i in K_keepers:
w.write(str(i))
w.write("\n")
# To write the first 3 lines 2 ends' points coordinations in the file "lines"
w_plot.write(str(K_keepers[0])+" "+str(K_keepers[1])+"\n")
w_plot.write(str(K_keepers[0])+" "+str(K_keepers[2])+"\n")
w_plot.write(str(K_keepers[0])+" "+str(K_keepers[3])+"\n")
# Repeat the step which finds the coordinations
# according to the points in the K_queue
# and push the new points in the K_queue
# Until we use all the points in the K_queue
while len(K_queue) != 0:
father = K_queue.pop(0)
grandfather = get_root_index(father)
if K_dict.has_key(father):
new_point = rotate120(K_dict[grandfather],K_dict[father])
K_index = father*2+1
new_point.editIndex(K_index)
if check_everydistance(r,new_point,K_keepers):
K_keepers.append(new_point)
K_dict[K_index] = new_point
K_queue.append(K_index)
K_count += 1
if writeInFile:
w.write(str(new_point)+"\n")
w_plot.write(str(new_point)+" "+str(K_dict[father])+"\n")
K_index = father*2+2
new_point = rotate240(K_dict[grandfather],K_dict[father])
new_point.editIndex(K_index)
if check_everydistance(r,new_point,K_keepers):
K_keepers.append(new_point)
K_dict[K_index] = new_point
K_queue.append(K_index)
K_count += 1
if writeInFile:
w.write(str(new_point)+"\n")
w_plot.write(str(new_point)+" "+str(K_dict[father])+"\n")
if writeInFile:
print "There are "+str(K_count)+" points."
print "There are "+str(T_points_count)+" T_points."
w_plot.close
w.close
return K_count
def plot():
# Ask researcher for showing the model and save the png file.
print "1 - show and save the lines plot"
print "2 - show and save the points model"
print "3 - quit"
option = int(input("Input your choice: "))
while option < 3:
global T_x_points_array
global T_y_points_array
if option == 2:
T_x_points_array.extend(T_y_points_array)
print (T_x_points_array)
pointsPlot.drawPoints(T_x_points_array)
else:
linesPlot.drawLines(T_x_points_array,T_y_points_array)
option = int(input("Input your choice: "))
return 0
def input_radius():
R = float(input('Input the R: '))
option = raw_input('Do you want to use default r = 52.0769942809? (y or n): ')
if option == 'y':
r = 52.0769942809
else:
r = float(input('Input the r: '))
return R,r
if __name__ == '__main__':
R, r = input_radius()
algorithm(R,r)
plot()
```
#### File: jeremykid/HIV_project/linesPlot.py
```python
import importlib
importlib.import_module('mpl_toolkits.mplot3d').Axes3D
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def drawLines(T_x_points_array,T_y_points_array):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
with open("lines") as f:
content = f.read().splitlines()
count = 0
for i in content:
line_list = i.split(' ')
count += 1
try:
ax.plot([float(line_list[0]), float(line_list[4])],\
[float(line_list[1]),float(line_list[5])],\
zs=[float(line_list[2]),float(line_list[6])],\
c = "r")
except:
print count
for i in range(len(T_y_points_array)):
ax.plot([T_x_points_array[i].x,T_y_points_array[i].x],\
[T_x_points_array[i].y,T_y_points_array[i].y],\
zs = [T_x_points_array[i].z,T_y_points_array[i].z],\
c = "b")
plt.savefig('lines.png')
plt.show()
```
|
{
"source": "jeremykid/Kattis",
"score": 3
}
|
#### File: jeremykid/Kattis/numbertree.py
```python
def main():
l = raw_input('')
l = l.split(' ')
H = int(l[0])+1
try:
direction = list(l[1])
temp = 1
index = 0
for i in direction:
index += 1
if i == "L":
temp *= 2
if i == "R":
temp *= 2
temp += 1
result =2**H - temp
print result
except:
print 2**H-1
main()
```
|
{
"source": "jeremykid/ProteinDataResearch",
"score": 2
}
|
#### File: jeremykid/ProteinDataResearch/proteinData.py
```python
class ProteinData:
def __init__(self, raw_data, data):
self.raw_data = raw_data
self.data = data
def add_missing_data():
def matching_data():
```
|
{
"source": "jeremykohn/markform",
"score": 4
}
|
#### File: src/markform/convert_text.py
```python
from helpers import create_html
from helpers.parse_line import parse_line
def convert_text(markform_text):
input_lines = markform_text.split("\n")
output_segments = []
# Convert document line by line.
for line in input_lines:
# Call function to split line into components.
parsed_line = parse_line(line)
# Get individual components of line.
pre = parsed_line["pre_element_content"]
post = parsed_line["post_element_content"]
inner = parsed_line["inner_content"]
identifier = parsed_line["opening_identifier"]
# Use different functions to convert an element based on its opening identifier.
# Form start.
if identifier == "+":
output = create_html.form_start(pre_element_content=pre, post_element_content=post, inner_content=inner)
# Form end.
elif identifier == "-":
output = create_html.form_end(pre_element_content=pre, post_element_content=post, inner_content=inner)
# Various types of <input> elements.
# Text input.
elif identifier == "_":
output = create_html.input_element(input_type="text", pre_element_content=pre, post_element_content=post, inner_content=inner)
# Email input.
elif identifier == "@":
output = create_html.input_element(input_type="email", pre_element_content=pre, post_element_content=post, inner_content=inner)
# Password input.
elif identifier == "*":
output = create_html.input_element(input_type="password", pre_element_content=pre, post_element_content=post, inner_content=inner)
# Number input.
elif identifier == "$":
output = create_html.input_element(input_type="number", pre_element_content=pre, post_element_content=post, inner_content=inner)
# Range input.
elif identifier == "%":
output = create_html.input_element(input_type="range", pre_element_content=pre, post_element_content=post, inner_content=inner)
# File input.
elif identifier == "^":
output = create_html.input_element(input_type="file", pre_element_content=pre, post_element_content=post, inner_content=inner)
# Textarea.
elif identifier == "[":
output = create_html.textarea_element(pre_element_content=pre, post_element_content=post, inner_content=inner)
# Submit button.
elif identifier == "(":
output = create_html.submit_button_element(pre_element_content=pre, post_element_content=post, inner_content=inner)
# Element group.
elif identifier == "{":
output = create_html.element_group(pre_element_content=pre, post_element_content=post, inner_content=inner)
# No identifier returned by parser. Not a Markform element.
else:
# Don't convert line, just append to output.
output = line
# Add output from this element
output_segments.append(output)
# Finally, combine output segments.
complete_output = "\n".join(output_segments)
return complete_output
```
#### File: markform/helpers/create_html.py
```python
import re
import cgi
# Helpers.
# Create element ID by assembling specified text segments.
def combine_into_element_id(id_segments):
# Remove empty segments.
id_segments = [segment for segment in id_segments if segment]
# Join with '-', replace nonword characters with '-', and convert all to lowercase.
combined_text = ('-').join(id_segments)
combined_text = re.sub(r'\W+', '-', combined_text)
combined_text = combined_text.lower()
return combined_text
def create_label(element_id, label_content):
escaped_label_content = cgi.escape(label_content, quote=True)
label_html = '<label for="{}">{}</label>'.format(element_id, escaped_label_content)
return label_html
def create_button_text(content):
button_text = content.strip()
button_text = cgi.escape(button_text)
return button_text
# Later, might add more features.
# Like, convert `Name of form: [+]` into <form><fieldset><legend>Name of form:</legend>...<fieldset></form>
# Or, convert `[+ method="post" action="action.php" +]` into <form method="post" action="action.php">
def form_start(pre_element_content, post_element_content, inner_content):
return "<form>"
def form_end(pre_element_content, post_element_content, inner_content):
return "</form>"
def input_element(input_type, pre_element_content, post_element_content, inner_content):
output_html_lines = []
# Output is surrounded by div tags.
# First line is an opening div tag.
output_html_lines.append('<div>')
# Trim whitespace around pre- and post-element content.
pre_element_content = pre_element_content.strip()
post_element_content = post_element_content.strip()
# Create HTML element ID by joining element type with pre/post element content.
element_id = combine_into_element_id(['markform', input_type, 'input', pre_element_content, post_element_content])
# Generate HTML for label before element.
if pre_element_content:
pre_element_label = create_label(element_id, pre_element_content)
output_html_lines.append(pre_element_label)
# Generate HTML for input.
input_tag = '<input id="{}" type="{}">'.format(element_id, input_type)
output_html_lines.append(input_tag)
# Generate HTML for label after element.
if post_element_content:
post_element_label = create_label(element_id, post_element_content)
output_html_lines.append(post_element_label)
# Last line is a closing div tag.
output_html_lines.append('</div>')
# Output final HTML.
output_html = "\n".join(output_html_lines)
return output_html
# Later, might convert textarea with [[ Inner Text ]] either to placeholder or to pre-filled text.
def textarea_element(pre_element_content, post_element_content, inner_content):
output_html_lines = []
# Output is surrounded by div tags.
# First line is an opening div tag.
output_html_lines.append('<div>')
# Trim whitespace around pre- and post-element content.
pre_element_content = pre_element_content.strip()
post_element_content = post_element_content.strip()
# Create HTML element ID by joining element type with pre/post element content.
element_id = combine_into_element_id(['markform', 'textarea', pre_element_content, post_element_content])
# Generate HTML for label before element.
if pre_element_content:
pre_element_label = create_label(element_id, pre_element_content)
output_html_lines.append(pre_element_label)
# Generate HTML for textarea.
textarea_tags = '<textarea id="{}"></textarea>'.format(element_id)
output_html_lines.append(textarea_tags)
# Generate HTML for label after element.
if post_element_content:
post_element_label = create_label(element_id, post_element_content)
output_html_lines.append(post_element_label)
# Last line is a closing div tag.
output_html_lines.append('</div>')
# Output final HTML.
output_html = "\n".join(output_html_lines)
return output_html
def submit_button_element(pre_element_content, post_element_content, inner_content):
output_html_lines = []
# Output is surrounded by div tags.
# First line is an opening div tag.
output_html_lines.append('<div>')
# Trim whitespace around pre- and post-element content.
pre_element_content = pre_element_content.strip()
post_element_content = post_element_content.strip()
# Create HTML element ID by joining element type with inner content and pre/post element content.
element_id = combine_into_element_id(['markform', 'submit-button', pre_element_content, inner_content, post_element_content])
# Generate HTML for label before element.
if pre_element_content:
pre_element_label = create_label(element_id, pre_element_content)
output_html_lines.append(pre_element_label)
# Generate button text from inner content.
button_text = create_button_text(inner_content)
# Generate HTML for submit button.
submit_button_tags = '<button id="{}" type="submit">{}</button>'.format(element_id, button_text)
output_html_lines.append(submit_button_tags)
# Generate HTML for label after element.
if post_element_content:
post_element_label = create_label(element_id, post_element_content)
output_html_lines.append(post_element_label)
# Last line is a closing div tag.
output_html_lines.append('</div>')
# Output final HTML.
output_html = "\n".join(output_html_lines)
return output_html
"""
def parse_element_group
def checkbox_group( ... )
def radio_button_group( ... )
def dropdown_menu( ... )
def element_group(pre_element_content, post_element_content, inner_content):
# Turn inner content into array of inner elements
# Divide using pipe characters `|`
# Detect type of the first inner element
# Detect type of the other inner elements;
# parse as 'same type' if they're of the same type,
# or parse as just strings if they aren't of the same type
# Parse inner content of element group.
parsed_inner_elements = []
# That will include element type, prechecked/preselected, pre-identifier label/content, post-identifier label/content
inner_element_list = []
# Just a list of separate inner elements.
pos = 0
current_character = None
previous_character = None
next_character = None
current_inner_element_start = 0
# current_inner_element_end = None
# Parse inner content, divide into elements.
while pos < len(inner_content):
current_character = inner_content[pos]
if pos > 0:
previous_character = inner_content[pos - 1]
if pos < len(inner_content) - 1:
next_character = inner_content[pos + 1]
# Check for unescaped pipe character. This character divides inner elements from each other.
if current_character == '|' and previous_character != '\\':
# End of inner element, beginning of next element.
# Add current inner element to list.
current_inner_element = inner_content[current_inner_element_start:pos]
inner_element_list.append(current_inner_element)
# Update the start position of next inner element.
current_inner_element_start = pos + 1
# Next character.
pos += 1
# Remember to remove empty/blank inner elements from inner_element_list? Or treat them as empty labels?
# Process each inner element.
# Return pre inner identifier content, post inner identifier content, element type, and checked/unchecked or selected/unselected.
# Then, provide that as input to the respective function --
# checkbox_group()
# radio_button_group()
# dropdown_menu()
# (And within each of those, have functions to parse inner elements into pre-identifier content, post-identifier content, and identifier?)
# Element group.
elif identifier == "{":
# Parse the inner content first, to get components and determine valid type of element group. If any.
parsed_element_group = parse_element_group(inner)
group_type = parsed_element_group["group_type"]
inner_element_list = parsed_element_group["inner_element_list"] # Each inner element includes a [ ], [x], ( ), (o), or >, but not | characters
# Checkbox group.
if group_type == "checkbox":
output = create_html.checkbox_group(pre_element_content=pre, post_element_content=post, inner_element_list=inner_element_list)
# Radio button group.
elif group_type == "radio":
output = create_html.radio_group(pre_element_content=pre, post_element_content=post, inner_element_list=inner_element_list)
# Select / dropdown menu.
elif group_type == "dropdown":
output = create_html.dropdown_menu(pre_element_content=pre, post_element_content=post, inner_element_list=inner_element_list)
else:
# Don't convert element group, just append to output.
output = line
"""
```
|
{
"source": "jeremyko/let-me-test",
"score": 2
}
|
#### File: let-me-test/module_core/lmt_runner.py
```python
import sys
import os
import glob
import datetime
import subprocess
import traceback
import configparser
import shutil
import logging
from tspec_cmd_impl import *
from module_core import *
_g_runner_self = None # XXX
#///////////////////////////////////////////////////////////////////////////////
class PkgTestRunner:
logger = None
logger_summary = None
__args = None
__ini_config = None
__group_dirs = [] # name only
__group_dirs_full = [] # full path
__test_dirs_per_group = []
__test_dirs_per_group_full = []
__total_test_cnt = 0
__total_tspec_cnt = 0
__failed_tests = []
__succeeded_tests = []
__failed_test_cnt = 0
__succeeded_test_cnt = 0
temp_internal_use_only_dir = None
temp_internal_use_only_dir_remote = None
__log_level = None
#DB ----------------
ora_conn_str = None
mysql_host = None
mysql_user = None
mysql_passwd = <PASSWORD>
mysql_db_name= None
# ----------------
pkg_dir = None
package_id = None
package_name = None
system_name = None
service_name = None
xml_cfg_path = None
xml_db_path = None
input_path = None
work_path = None
stat_path = None
output_path = None
pfnm_userid = None
pfnm_passwd = None
cli_name = None
db_type = None
log_base_path= None
cur_ctx_test_path= None
is_xml_config_changed = False
start_all_prc_per_tspec = 'Y'
ini_config_path_full = None
simul_name = None
cur_indent = None
simul_gsn_binding_ip = None
simul_gsn_binding_port = None
simul_gtp_ip = None
simul_gtp_port = None
simul_dm_base_path = None # XXX diameter simul only XXX
#simul_tps = 0
pid_save = []
info_repo = {}
cleanup_cli_cmds = [] # 1개의 tspec 이 종료되고, xml db, xml config가 원복된후 수행될 CLI 명령을 관리
change_xml_dbs = {} # table_name:is_changed
dm_sim_cfg_backup_files = [] # dm simul only. client.list, tas01.ini ...
PKG_CFG_NAME ='per_pkg.ini'
TSPEC_FILE_EXT ='.tspec'
TSPEC_DIR_NAME = 'tspecs'
GROUP_DIR_PREFIX ='group_'
xml_data_string =''
ems_ip = None
ems_id = None
ems_passwd = <PASSWORD>
ems_xml_cfg_path = None
ems_is_xml_config_changed = False
ems_cli_cmd = None
ems_package_name = None
ems_system_name = None
ems_service_name = None
ems_policy_path = None
start_timestamp = None
test_result_dir = None
#==================================================================
def __init__(self,logger,logger_summary,timestamp,args):
self.__ini_config = configparser.ConfigParser()
self.pkg_dir = args.pkg_dir
self.logger = logger;
self.logger_summary = logger_summary;
self.start_timestamp = timestamp
self.test_result_dir = self.pkg_dir + "pkg_test_result"
self.test_result_dir = os.path.join(self.test_result_dir, "{}_RESULT_DATA".format(timestamp) )
self.__args = args
self.pkg_dir = os.path.abspath(self.pkg_dir) + os.sep
global _g_runner_self
_g_runner_self = self
self.reset_variables()
#==================================================================
def reset_variables(self):
self.cur_ctx_test_path = None
self.__group_dirs = []
self.__group_dirs_full = []
self.__test_dirs_per_group = []
self.__test_dirs_per_group_full = []
self.__total_test_cnt = 0
self.__total_tspec_cnt = 0
self.__failed_tests = []
self.__succeeded_tests = []
self.__failed_test_cnt = 0
self.__succeeded_test_cnt = 0
self.__failed_tspec_cnt = 0
self.__succeeded_tspec_cnt = 0
self.pid_save = []
self.info_repo = {}
self.cleanup_cli_cmds = []
self.change_xml_dbs = {}
self.dm_sim_cfg_backup_files =[]
self.cur_indent = ' '
#==================================================================
def run_teardown_if_any(self):
self.logger.info("\n\n")
setup_path = os.path.join(self.pkg_dir,'pkg_teardown.tspec')
if os.path.isfile(setup_path):
self.logger.info(">> run pkg common tspec (teardown.tspec)")
execfile(setup_path)
#==================================================================
def display_run_result(self):
if self.__total_tspec_cnt == 0 :
self.logger.error("invalid run command. check cmd arguments")
return;
self.logger.info(" ")
self.logger.info(" ")
self.logger.info("---------------------------------------------------------")
msg ="TOTAL {} TSPEC".format(self.__total_tspec_cnt)
self.logger.info (msg)
self.logger_summary.info (msg)
msg =" {} OK".format(self.__succeeded_tspec_cnt)
self.logger.info (msg)
self.logger_summary.info (msg)
for succeeded in self.__succeeded_tests :
msg = " succeeded : {}".format(succeeded)
self.logger.info (msg)
self.logger_summary.info(msg)
if(self.__failed_test_cnt >0 ):
msg =" {} FAILED".format(self.__failed_tspec_cnt)
self.logger.error (msg)
self.logger_summary.error(msg)
for failed in self.__failed_tests :
msg = " failed : {}".format(failed)
self.logger.error (msg)
self.logger_summary.error(msg)
#==================================================================
# run test
#==================================================================
def run_test(self):
self.logger.info("pkg dir [{}]".format(self.pkg_dir))
self.read_pkg_ini()
self.reset_variables()
#create temporary work directory
if(os.path.isdir(self.temp_internal_use_only_dir)==False):
self.logger.debug("create internal use only dir : {}".
format(self.temp_internal_use_only_dir))
os.mkdir(self.temp_internal_use_only_dir)
#create result directory
self.__result_base_dir = self.pkg_dir + 'pkg_test_result'
if(os.path.isdir(self.__result_base_dir)==False):
self.logger.debug("create result dir : {}".
format(self.__result_base_dir))
os.mkdir(self.__result_base_dir)
self.set_log_level()
self.get_groups ()
if(self.__group_dirs):
#-------------------------------------------
# XXX : run setup.tspec if any.
setup_path = os.path.join(self.pkg_dir,'pkg_setup.tspec')
if os.path.isfile(setup_path):
self.logger.info("\n\n")
self.logger.info(">> run pkg common tspec (setup.tspec)")
execfile(setup_path)
#-------------------------------------------
self.logger.info("run groups : {}".format(self.__group_dirs))
self.run_groups()
else:
err_msg ="invalid pkg dir or usage {}".format(self.pkg_dir)
self.logger.error(err_msg)
self.logger_summary.error(err_msg)
return False
return True
#==================================================================
def strip_all_members(self):
self.xml_cfg_path = self.xml_cfg_path.strip()
self.xml_db_path = self.xml_db_path .strip()
self.package_id = self.package_id.strip()
self.package_name = self.package_name.strip()
self.system_name = self.system_name .strip()
self.service_name = self.service_name.strip()
self.cli_name = self.cli_name .strip()
self.log_base_path= self.log_base_path.strip()
self.pfnm_userid = self.pfnm_userid.strip()
self.pfnm_passwd = self.pfnm_passwd.strip()
self.db_type = self.db_type.strip()
self.start_all_prc_per_tspec = self.start_all_prc_per_tspec.strip()
self.input_path = self.input_path.strip()
self.work_path = self.work_path.strip()
self.output_path = self.output_path.strip()
self.stat_path = self.stat_path.strip()
self.ems_xml_cfg_path = self.ems_xml_cfg_path.strip()
self.ems_policy_path = self.ems_policy_path.strip()
if(self.simul_dm_base_path):
self.simul_dm_base_path = self.simul_dm_base_path.strip()
#==================================================================
def remove_tailing_os_sep (self):
# XXX 모든 설정의 경로 값은 끝에 '/' 가 불필요하다. 만약 잘못 설정한 경우
# 마지막 '/' 를 제거한다.
"""
'/LOG/' --> '/LOG'
"""
if self.xml_cfg_path.endswith(os.sep):
self.xml_cfg_path = self.xml_cfg_path[:-1]
if self.xml_db_path.endswith(os.sep):
self.xml_db_path = self.xml_db_path[:-1]
if self.log_base_path.endswith(os.sep):
self.log_base_path = self.log_base_path[:-1]
if(self.simul_dm_base_path):
if self.simul_dm_base_path.endswith(os.sep):
self.simul_dm_base_path = self.simul_dm_base_path[:-1]
if self.input_path.endswith(os.sep):
self.input_path = self.input_path[:-1]
if self.work_path.endswith(os.sep):
self.work_path = self.work_path[:-1]
if self.output_path.endswith(os.sep):
self.output_path = self.output_path[:-1]
if self.stat_path.endswith(os.sep):
self.stat_path = self.stat_path[:-1]
if self.ems_xml_cfg_path.endswith(os.sep):
self.ems_xml_cfg_path = self.ems_xml_cfg_path[:-1]
if self.ems_policy_path.endswith(os.sep):
self.ems_policy_path = self.ems_policy_path[:-1]
#==================================================================
def read_pkg_ini(self):
# read per_pkg.ini, pkg_dir ends with os.sep
self.ini_config_path_full = self.pkg_dir+self.PKG_CFG_NAME
if (os.path.isfile(self.ini_config_path_full) == False):
err_msg ="invalid cfg path {}".format(self.ini_config_path_full)
self.logger.error(err_msg)
return False
self.__ini_config.read(self.ini_config_path_full)
self.package_id = self.__ini_config['COMMON']['PACKAGE_ID']
self.package_name = self.__ini_config['COMMON']['PACKAGE_NAME']
self.system_name = self.__ini_config['COMMON']['SYSTEM_NAME']
self.service_name = self.__ini_config['COMMON']['SERVICE_NAME']
self.xml_cfg_path = self.__ini_config['COMMON']['CONFIG_PATH']
self.xml_db_path = self.__ini_config['COMMON']['XML_DB_PATH']
self.input_path = self.__ini_config['COMMON']['INPUT_PATH']
self.work_path = self.__ini_config['COMMON']['WORK_PATH']
self.output_path = self.__ini_config['COMMON']['OUTPUT_PATH']
self.stat_path = self.__ini_config['COMMON']['STAT_PATH']
self.cli_name = self.__ini_config['COMMON']['CLI_NAME']
self.start_all_prc_per_tspec = self.__ini_config['COMMON']['START_ALL_PRC_PER_TSPEC']
self.__log_level = self.__ini_config['LOG']['LOG_LEVEL']
self.log_base_path= self.__ini_config['LOG']['LOG_BASE_PATH']
self.pfnm_userid = self.__ini_config['PFNM']['USER']
self.pfnm_passwd = self.__ini_config['PFNM']['<PASSWORD>']
self.db_type = self.__ini_config['DB']['DB_TYPE']
if(self.__ini_config.has_option('DB', 'ORA_CONN')):
self.ora_conn_str = self.__ini_config['DB']['ORA_CONN']
if(self.__ini_config.has_option('DB', 'MYSQL_HOST')):
self.mysql_host = self.__ini_config['DB']['MYSQL_HOST']
self.mysql_user = self.__ini_config['DB']['MYSQL_USER']
self.mysql_passwd = self.__ini_config['DB']['MYSQL_PASSWD']
self.mysql_db_name= self.__ini_config['DB']['MYSQL_DB_NAME']
if(self.db_type =='ORACLE' and self.ora_conn_str ==None):
err_msg ="oracle connection string not set"
self.logger.error(err_msg)
return False
if self.db_type =='MYSQL' :
if self.mysql_host ==None or self.mysql_user ==None or \
self.mysql_passwd ==None or self.mysql_db_name ==None :
err_msg ="mysql db info not all set"
self.logger.error(err_msg)
return False
if(self.__ini_config.has_option('SIMUL', 'SIMUL_NAME')):
self.simul_name = self.__ini_config['SIMUL']['SIMUL_NAME']
if(self.__ini_config.has_option('GSN_CONFIG', 'GSN_BINDING_IP')):
# XXX gtp only !
# T_GTP_NODE_INFO 에 존재해야함. GTP 가 미리 정의된 GSN 으로 부터만 수신함.
# GTP 에서 메시지 수신시 전송처의 port ,ip 를 찾는 로직이 존재한다.
self.simul_gsn_binding_ip = self.__ini_config['GSN_CONFIG']['GSN_BINDING_IP']
self.simul_gsn_binding_port = self.__ini_config['GSN_CONFIG']['GSN_BINDING_PORT']
# GTP info : GTP 프로세스가 binding한 ip, port 정보
# T_GTP_DEF 에 존재해야함. GTP 기동시 server로 동작하는 정보임
self.simul_gtp_ip = self.__ini_config['GTP_CONFIG']['GTP_IP']
self.simul_gtp_port = self.__ini_config['GTP_CONFIG']['GTP_PORT']
#self.simul_tps = self.__ini_config['TPS_CONFIG']['SEND_PER_SECOND']
#simul 에서 직접 설정을 읽어서 TPS 처리 하므로 여기서 불필요
if(self.__ini_config.has_option('SIMUL', 'SUMUL_DM_BASE_PATH')):
# diameter simul only !!
# XXX : diameter simul use relative path !!!!
# /CG/OFCS_SIM/IMS_SIM/config --> client.list
# /CG/OFCS_SIM/IMS_SIM/raw --> tas01.ini
self.simul_dm_base_path = self.__ini_config['SIMUL']['SUMUL_DM_BASE_PATH']
if(self.__ini_config.has_option('EMS', 'IP')==False):
self.logger.error("- EMS CONFIG NOT EXISTS")
return False
self.ems_ip = self.__ini_config['EMS']['IP']
self.ems_id = self.__ini_config['EMS']['ID']
self.ems_passwd = self.__ini_config['EMS']['PASSWD']
self.ems_xml_cfg_path = self.__ini_config['EMS']['CONFIG_PATH']
self.ems_cli_name = self.__ini_config['EMS']['CLI_NAME']
self.ems_package_name = self.__ini_config['EMS']['PACKAGE_NAME']
self.ems_system_name = self.__ini_config['EMS']['SYSTEM_NAME']
self.ems_service_name = self.__ini_config['EMS']['SERVICE_NAME']
self.ems_policy_path = self.__ini_config['EMS']['POLICY_PATH']
self.strip_all_members()
self.remove_tailing_os_sep()
self.logger.info("- xml_cfg_path [{}]".format(self.xml_cfg_path))
self.logger.info("- xml_db_path [{}]".format(self.xml_db_path))
self.logger.info("- input_path [{}]".format(self.input_path))
self.logger.info("- work_path [{}]".format(self.work_path))
self.logger.info("- stat_path [{}]".format(self.stat_path))
self.logger.info("- output_path [{}]".format(self.output_path))
self.logger.info("- package_id [{}]".format(self.package_id))
self.logger.info("- package_name [{}]".format(self.package_name))
self.logger.info("- system_name [{}]".format(self.system_name ))
self.logger.info("- service_name [{}]".format(self.service_name ))
self.logger.info("- cli_name [{}]".format(self.cli_name ))
self.logger.info("- log_level [{}]".format(self.__log_level ))
self.logger.info("- log_base_path[{}]".format(self.log_base_path ))
self.logger.info("- db_type [{}]".format(self.db_type ))
if self.db_type =='ORACLE' :
self.logger.info("- conn str [{}]".format(self.ora_conn_str))
if self.db_type =='MYSQL' :
self.logger.info("- host [{}]".format(self.mysql_host ))
self.logger.info("- user [{}]".format(self.mysql_user ))
#self.logger.info("- passwd [{}]".format(self.mysql_passwd ))
self.logger.info("- db name [{}]".format(self.mysql_db_name))
self.logger.info("- simul_name [{}]".format(self.simul_name ))
self.logger.info("- simul_gsn_binding_ip [{}]".format(self.simul_gsn_binding_ip ))
self.logger.info("- simul_gsn_binding_port[{}]".format(self.simul_gsn_binding_port ))
self.logger.info("- simul_gtp_ip [{}]".format(self.simul_gtp_ip ))
self.logger.info("- simul_gtp_ip [{}]".format(self.simul_gtp_port ))
#self.logger.info("- simul_tps [{}]".format(self.simul_tps ))
self.logger.info("- start_all_prc_per_tspec [{}]".format(self.start_all_prc_per_tspec ))
self.temp_internal_use_only_dir = self.pkg_dir + 'do_not_delete_internal_use'
self.logger.debug("- internal_use_only_dir [{}]".
format(self.temp_internal_use_only_dir))
if(self.simul_dm_base_path):
self.logger.info("- simul_dm_base_path [{}]".format(self.simul_dm_base_path ))
self.temp_internal_use_only_dir_remote = self.temp_internal_use_only_dir + '/remote'
self.logger.info("- test result dir [{}]".format(self.test_result_dir ))
#==================================================================
def set_log_level(self):
#print ("log level is {}".format(self.__log_level))
if(self.__log_level == 'DEBUG'):
self.logger.setLevel(logging.DEBUG)
elif(self.__log_level == 'INFO'):
self.logger.setLevel(logging.INFO)
elif(self.__log_level == 'WARNING'):
self.logger.setLevel(logging.WARNING)
elif(self.__log_level == 'ERROR'):
self.logger.setLevel(logging.ERROR)
elif(self.__log_level == 'CRITICAL'):
self.logger.setLevel(logging.CRITICAL)
else:
self.logger.setLevel(logging.INFO)
#==================================================================
#remove temp directory : self.temp_internal_use_only_dir
def clean_all(self):
self.logger.debug("clean all temporary files")
if(os.path.isdir(self.temp_internal_use_only_dir)):
self.logger.debug("remove internal use only dir : {}".
format(self.temp_internal_use_only_dir))
shutil.rmtree(self.temp_internal_use_only_dir) #XXX dangerous
return True
#==================================================================
def get_groups(self):
grp_dirs = os.listdir(self.pkg_dir)
grp_dirs.sort()
self.__group_dirs = []
self.__group_dirs_full = []
for grp_dir_name in grp_dirs :
if(grp_dir_name.startswith(self.GROUP_DIR_PREFIX)) :
# this is group directory
if(self.__args.group is not None):
#TODO dup check
for grp_arg in self.__args.group:
if(grp_dir_name == grp_arg):
self.logger.info("[run specific group] {}".format(grp_dir_name))
self.__group_dirs_full.append(self.pkg_dir+grp_dir_name)
self.__group_dirs.append(grp_dir_name)
else:
self.__group_dirs_full.append(self.pkg_dir+grp_dir_name)
# pkg_dir ends with os.sep
self.__group_dirs.append(grp_dir_name)
#==================================================================
def run_groups(self):
index =0
for grp_dir_name in self.__group_dirs :
self.logger.debug(" ")
self.logger.debug("================================= group {}".
format(self.__group_dirs_full[index]))
# TODO group 별 grp_setup.py , grp_teardown.py 실행
#----------------------------
self.get_test_per_group(self.__group_dirs_full[index],grp_dir_name)
#----------------------------
index += 1
if(self.__test_dirs_per_group_full):
start_dtime = datetime.datetime.now()
#-------------------------------------
self.run_tests_per_group(grp_dir_name)
#-------------------------------------
end_dtime = datetime.datetime.now()
elapsed = end_dtime - start_dtime
self.logger.info(" ")
if(len(self.__failed_tests)>0):
self.logger.error("[FAILED] {} -> elapsed : {} secs".
format(grp_dir_name, elapsed.total_seconds()))
else:
self.logger.info("[PASSED] {} -> elapsed : {} secs".
format(grp_dir_name, elapsed.total_seconds()))
#==================================================================
def get_test_per_group(self, group_dir_full, grp_dir_name):
self.logger.debug("grp_dir_name = {}".format(grp_dir_name))
self.__test_dirs_per_group_full = []
self.__test_dirs_per_group = []
test_dirs = os.listdir(group_dir_full)
test_dirs.sort()
for test_dir_name in test_dirs :
if(not os.path.isdir(os.path.join(group_dir_full, test_dir_name))):
self.logger.debug("not directory skip = {}".format(test_dir_name))
continue
self.logger.debug("test name = {}".format(test_dir_name))
if(self.__args.test_id is not None):
#run specific test : -t [group name].[test name]
# ex) 'group_001.grp001_test001'
filter_name = grp_dir_name + "." + test_dir_name
self.logger.debug("filter_name = {}".format(filter_name))
for grp_and_test in self.__args.test_id:
if(filter_name == grp_and_test):
self.logger.debug("[run specific test] {}".format(grp_and_test))
self.__test_dirs_per_group_full.append(group_dir_full+os.sep+test_dir_name)
self.__test_dirs_per_group.append(test_dir_name)
elif(self.__args.spec is not None): #XXX specific tspec
for spec in self.__args.spec :
if grp_dir_name in spec :
if test_dir_name in spec :
test_dir = group_dir_full+os.sep+test_dir_name
if(test_dir not in self.__test_dirs_per_group_full):
self.__test_dirs_per_group_full.append(group_dir_full+os.sep+test_dir_name)
self.__test_dirs_per_group.append(test_dir_name)
self.logger.debug("[run test] {}, {}".format(test_dir_name, spec))
else:
self.logger.debug("[run whole test] {}".format(test_dir_name))
self.__test_dirs_per_group_full.append(group_dir_full+os.sep+test_dir_name)
self.__test_dirs_per_group.append(test_dir_name)
#==================================================================
def run_tests_per_group(self, grp_dir_name):
if(self.__args.spec is not None): #XXX specific tspec
is_found = False
for spec in self.__args.spec :
if (grp_dir_name in spec) :
self.logger.debug("GROUP={} : FOUND".format(grp_dir_name))
is_found = True
break
if (is_found ==False):
self.logger.debug("GROUP={} : SKIP".format(grp_dir_name))
return True
self.logger.info(" ")
self.logger.info("////////////////////////////////////////////////////////////////////////////////")
self.logger.info("[GROUP] {}".format(grp_dir_name))
#self.logger.debug(" --> TEST DIRS : {}".format(self.__test_dirs_per_group_full))
index = 0
for test_dir_full in self.__test_dirs_per_group_full :
self.cur_ctx_test_path = os.path.abspath(test_dir_full) # absolute path
start_dtime = datetime.datetime.now()
self.__total_test_cnt += 1
self.logger.debug(" --> test dir {}".format(test_dir_full))
test_name = self.__test_dirs_per_group[index]
self.logger.info(" ")
self.logger.info(" ")
self.logger.info("=== [TEST] {}".format(test_name))
self.logger.info(" ")
tspecs_dir_full = test_dir_full +os.sep + self.TSPEC_DIR_NAME # tspec directory path
# TODO test 별 test_setup.py , test_teardown.py 실행
is_test_done = False
#----------------------------
if(os.path.isdir(tspecs_dir_full)):
tspec_names = os.listdir(tspecs_dir_full) # all tspec path in tspec dir.
tspec_names.sort()
if(tspec_names):
#-----------------------------------------
is_all_tspec_ok = True
for tspec_name in tspec_names:
if(tspec_name.endswith(self.TSPEC_FILE_EXT)) :
#self.logger.debug("{}[tspec] {}".format(self.cur_indent,tspec_name))
if(self.__args.spec is not None): # specific tspec
self.logger.debug("{}[filter tspec] {}".format(self.cur_indent, self.__args.spec))
#run specific tspec : -s [group name].[test name].[tspec name]
# ex) 'group_test.test001.spec001'
filter_name = grp_dir_name + "." + test_name + "." + tspec_name
self.logger.debug("{}filter_name = {}".format(self.cur_indent,filter_name))
for one_spec in self.__args.spec:
if(filter_name==one_spec + self.TSPEC_FILE_EXT):
self.logger.debug("{}==> tspec_name : {}".format(self.cur_indent, one_spec+ self.TSPEC_FILE_EXT))
self.__total_tspec_cnt += 1
is_test_done = True
#--------------------
tspec_name_no_ext = filter_name[:len(filter_name)-len(self.TSPEC_FILE_EXT)]
if(self.run_one_tspec(tspecs_dir_full+os.sep+tspec_name, tspec_name) == False):
self.logger.error("{}tspec error : {}".format(self.cur_indent, filter_name))
is_all_tspec_ok = False
self.__failed_tspec_cnt += 1
self.__failed_tests.append(tspec_name_no_ext)
else:
self.__succeeded_tests.append(tspec_name_no_ext)
self.__succeeded_tspec_cnt +=1
else:
self.__total_tspec_cnt += 1
#self.logger.debug("{}==> tspec_name : {}".
# format(self.cur_indent, grp_dir_name + "." + test_name + "." + tspec_name))
tspec_name_no_ext = tspec_name[:len(tspec_name)-len(self.TSPEC_FILE_EXT)]
is_test_done = True
if(self.run_one_tspec(tspecs_dir_full+os.sep+tspec_name, tspec_name) == False):
self.logger.error("{}tspec error : {}".format(self.cur_indent,
grp_dir_name + "." + test_name + "." + tspec_name))
is_all_tspec_ok = False
self.__failed_tspec_cnt += 1
self.__failed_tests.append(grp_dir_name + "." + test_name +"."+ tspec_name_no_ext )
else:
self.__succeeded_tests.append(grp_dir_name + "." + test_name +"."+ tspec_name_no_ext)
self.__succeeded_tspec_cnt +=1
#----------------------------------------- end of for
self.logger.debug("{}all tspec end".format(self.cur_indent))
#all tspec end..
#check elapsed
end_dtime = datetime.datetime.now()
elapsed = end_dtime - start_dtime
if is_test_done == False :
self.logger.error("{}no test done".format(self.cur_indent))
return True
if(is_all_tspec_ok == True):
self.__succeeded_test_cnt += 1
self.logger.debug(" [ALL TSPECS OK] ")
self.logger.info(" ")
self.logger.info (" [PASSED] {} -> elapsed : {} sec".
format(self.__test_dirs_per_group[index],
elapsed.total_seconds()))
else:
self.__failed_test_cnt += 1
self.logger.info(" ")
self.logger.error(" [SOME TSPEC FAILED] ")
self.logger.error(" [FAILED] {} -> elapsed : {} sec".
format(self.__test_dirs_per_group[index],
elapsed.total_seconds()))
else:
self.logger.error("tspec dir invalid : {}".format(tspecs_dir_full))
return False
index += 1
#==================================================================
# run a tspec file
#==================================================================
def run_one_tspec(self, tspec_path_full, tspec_name):
self.logger.info(" ")
self.logger.info("======= [TSPEC] {}".format(tspec_name))
self.logger.info(" ")
try:
self.is_xml_config_changed = False
self.ems_is_xml_config_changed = False
self.change_xml_dbs = {}
self.pid_save = []
self.info_repo = {}
self.cleanup_cli_cmds = []
self.dm_sim_cfg_backup_files = []
start_dtime = datetime.datetime.now()
#XXX start all app first ...
if(self.start_all_prc_per_tspec == 'Y'):
lmt_process.run_cli_cmd_no_rollback(_g_runner_self,"STAR-PRC:${PACKAGE_NAME}:${SYSTEM_NAME}:${SERVICE_NAME}")
#------------------------
execfile(tspec_path_full)
#------------------------
except lmt_exception.LmtException as lmt_e:
err_msg = '{}lmt exception : {} '.format(self.cur_indent,lmt_e)
self.logger.error(err_msg)
#traceback.print_exc()
cl, exc, tb = sys.exc_info()
if(tb):
bt_len=len(traceback.extract_tb(tb))
if(bt_len>1):
self.logger.error("{} - tspec => {}". \
format(self.cur_indent,traceback.extract_tb(tb)[1][0]))
self.logger.error("{} - line no => {}". \
format(self.cur_indent,traceback.extract_tb(tb)[1][1]))
self.logger.error("{} - test => {}". \
format(self.cur_indent,traceback.extract_tb(tb)[1][3]))
#--------------
err_stack = traceback.extract_tb(tb)
for bt in err_stack:
self.logger.error("{}{}".format(self.cur_indent,bt))
#--------------
del tb
return False
except Exception as e:
err_msg = '{}error -> {} : {} :{}'. \
format(self.cur_indent,tspec_name,e.__doc__, e.message)
self.logger.error(err_msg)
cl, exc, tb = sys.exc_info()
if(tb):
#traceback.print_exc()
bt_len=len(traceback.extract_tb(tb))
if(bt_len>1):
self.logger.error("{} - tspec => {}". \
format(self.cur_indent,traceback.extract_tb(tb)[1][0]))
self.logger.error("{} - line no => {}". \
format(self.cur_indent,traceback.extract_tb(tb)[1][1]))
self.logger.error("{} - test => {}". \
format(self.cur_indent,traceback.extract_tb(tb)[1][3]))
#--------------
err_stack = traceback.extract_tb(tb)
for bt in err_stack:
self.logger.error("{}{}".format(self.cur_indent,bt))
#--------------
del tb
return False
finally:
#XXX auto rollback --> when tspec file ends..
if(self.is_xml_config_changed == True):
# set_xml_cfg 명령이 호출된 경우만 config 원복
self.logger.debug("{}auto rollback config".format(self.cur_indent))
self.is_xml_config_changed = False
self.rollback_config()
if(self.ems_is_xml_config_changed == True):
# set_xml_cfg 명령이 호출된 경우만 config 원복
self.logger.debug("{}auto rollback ems config".format(self.cur_indent))
self.ems_is_xml_config_changed = False
self.ems_rollback_config()
if(len(self.change_xml_dbs) >0):
# set_xml_db 명령이 호출된 경우만 xml db 원복
self.logger.debug("{}auto rollback xml_db".format(self.cur_indent))
self.rollback_xml_db()
self.change_xml_dbs = {}
if(len(self.dm_sim_cfg_backup_files) > 0):
# dm simul only
#for backup in self.dm_sim_cfg_backup_files:
# self.logger.debug("{}backup : dm simul configs ={}".format(self.cur_indent,backup))
self.logger.debug("{}auto rollback : dm simul configs".format(self.cur_indent))
self.dm_simul_rollback_config()
self.dm_sim_cfg_backup_files =[]
#XXX invoke auto cleanup commands
if(len(self.cleanup_cli_cmds)>0):
self.logger.info("{}*** auto cleanup cli cmd".format(self.cur_indent))
for cleanup_cmd in self.cleanup_cli_cmds:
self.logger.debug("{}auto cleanup cli cmd : {}".format(self.cur_indent,cleanup_cmd))
#lmt_util.run_shell_cmd(self,cleanup_cmd)
os.system(cleanup_cmd) # no error check.. XXX
lmt_time.wait_secs(self,5)
self.cleanup_cli_cmds =[]
#check elapsed
end_dtime = datetime.datetime.now()
elapsed = end_dtime - start_dtime
self.logger.info(" [PASSED] {} , elapsed : {} sec".
format(tspec_name,elapsed.total_seconds()))
return True
#==================================================================
def backup_config(self):
src = self.xml_cfg_path
file_name = os.path.basename(self.xml_cfg_path)
dest = os.path.join(self.temp_internal_use_only_dir, file_name )
self.logger.debug("{}backup config : src ={}".format(self.cur_indent, src))
self.logger.debug("{}backup config : dest ={}".format(self.cur_indent, dest))
#원본 파일은 move
shutil.move(src, dest)
#사용할 파일을 copy
shutil.copyfile(dest, src)
return True
#==================================================================
def rollback_config(self):
#self.logger.debug("rollback config")
file_name = os.path.basename(self.xml_cfg_path)
src = os.path.join(self.temp_internal_use_only_dir, file_name )
dest = self.xml_cfg_path
self.logger.debug("{}rollback config : src ={}".format(self.cur_indent, src))
self.logger.debug("{}rollback config : dest ={}".format(self.cur_indent, dest))
#원본 파일은 move
shutil.move(src, dest)
#shutil.copyfile(dest, src)
return True
#==================================================================
def ems_rollback_config(self):
#self.logger.debug("rollback config")
file_name = os.path.basename(self.ems_xml_cfg_path)
src = os.path.join(self.temp_internal_use_only_dir, file_name )
dest = self.ems_xml_cfg_path
self.logger.debug("{}rollback ems config : src ={}".format(self.cur_indent, src))
self.logger.debug("{}rollback ems config : dest ={}".format(self.cur_indent, dest))
#원본 파일은 move
#shutil.move(src, dest)
#사용할 파일을 copy
#shutil.copyfile(dest, src)
lmt_remote.rollback_remote_file(self, self.ems_ip,self.ems_id, self.ems_passwd, self.ems_xml_cfg_path)
return True
#==================================================================
def backup_xml_db(self, table_path_full, table_name):
self.change_xml_dbs[table_name] = True
dest = os.path.join(self.temp_internal_use_only_dir, table_name)
dest += ".xml"
self.logger.debug("{}backup xml db : dest ={}".format(self.cur_indent, dest))
#원본 파일은 move
shutil.move(table_path_full, dest)
#사용할 파일을 copy
shutil.copyfile(dest, table_path_full)
return True
#==================================================================
def rollback_xml_db(self):
self.logger.debug("{}ROLLBACK xml_db".format(self.cur_indent))
self.logger.debug("{}self.change_xml_dbs : len= {}".format(self.cur_indent, len(self.change_xml_dbs)))
for xml_db_file_name in self.change_xml_dbs:
xml_db_file_name += ".xml"
self.logger.debug("{}ROLLBACK : {}".format(self.cur_indent, xml_db_file_name))
src = os.path.join(self.temp_internal_use_only_dir, xml_db_file_name )
dest = os.path.join(self.xml_db_path, xml_db_file_name)
self.logger.debug("{}ROLLBACK xml_db : src ={}".format(self.cur_indent, src))
self.logger.debug("{}ROLLBACK xml_db : dest ={}".format(self.cur_indent, dest))
#원본 파일은 move
shutil.move(src, dest)
#shutil.copyfile(dest, src)
return True
#==================================================================
# DIA_SIM 사용했던 설정파일 원복
def dm_simul_rollback_config(self):
#XXX dm simul only
self.logger.debug("{}ROLLBACK dm simul config [{}]".format(self.cur_indent, self.simul_dm_base_path))
# /CG/OFCS_SIM/IMS_SIM/config --> client.list
# /CG/OFCS_SIM/IMS_SIM/raw --> tas01.ini
#SIMUL_NAME = DIA_SIM
#SUMUL_BASE_PATH=/CG/OFCS_SIM/IMS_SIM
try:
for dm_sim_cfg_file in self.dm_sim_cfg_backup_files:
self.logger.info("{}dm_sim_cfg_file [{}] ".format(self.cur_indent, dm_sim_cfg_file))
src = os.path.join(self.temp_internal_use_only_dir, dm_sim_cfg_file )
dest = ''
if(dm_sim_cfg_file == 'client.list'):
dest = os.path.join(self.simul_dm_base_path,'config',dm_sim_cfg_file)
elif('.ini' in dm_sim_cfg_file ):
dest = os.path.join(self.simul_dm_base_path,'raw', dm_sim_cfg_file)
self.logger.debug("{}ROLLBACK move client list : {} ==> {} ".format(self.cur_indent, src,dest))
shutil.move(src, dest)
except Exception as e:
err_msg = '{}error -> {} :{}'. \
format(self.cur_indent,e.__doc__, e.message)
self.logger.error(err_msg)
#--------------
cl, exc, tb = sys.exc_info()
err_stack = traceback.extract_tb(tb)
for bt in err_stack:
self.logger.error("{}{}".format(self.cur_indent,bt))
#--------------
del tb
return False
return True
#///////////////////////////////////////////////////////////////////////////////
#///////////////////////////////////////////////////////////////////////////////
#///////////////////////////////////////////////////////////////////////////////
# tspec command 를 해당 모듈의 명령으로 전달
#///////////////////////////////////////////////////////////////////////////////
# lmt_time.py
#///////////////////////////////////////////////////////////////////////////////
def wait_secs(secs):
lmt_time.wait_secs(_g_runner_self, secs)
#///////////////////////////////////////////////////////////////////////////////
# lmt_shell.py
#///////////////////////////////////////////////////////////////////////////////
def run_shell_cmd(cmd):
out = lmt_util.run_shell_cmd(_g_runner_self,cmd)
return out
def run_shell_cmd_background(cmd):
lmt_util.run_shell_cmd_background(_g_runner_self,cmd)
def do_interactive_cmd(run_cmd, dic_expect_val):
lmt_shell.do_interactive_cmd(_g_runner_self,run_cmd,dic_expect_val)
#///////////////////////////////////////////////////////////////////////////////
# lmt_xml_config.py
#///////////////////////////////////////////////////////////////////////////////
def set_xml_cfg(xpath, val):
lmt_xml_config.set_xml_cfg(_g_runner_self, xpath, val)
return True
def get_xml_cfg(xpath):
output = lmt_xml_config.get_xml_cfg(_g_runner_self, xpath)
return output
def set_xml_cfg_ems(xpath, val):
lmt_xml_config.set_xml_cfg_ems(_g_runner_self, xpath, val)
return True
def get_xml_cfg_ems(xpath):
output = lmt_xml_config.get_xml_cfg_ems(_g_runner_self, xpath)
return output
#///////////////////////////////////////////////////////////////////////////////
# lmt_xml_db.py
#///////////////////////////////////////////////////////////////////////////////
def replace_xml_db_file (testing_file_path_full, table_name):
lmt_xml_db.replace_xml_db_file(_g_runner_self, testing_file_path_full, table_name)
return True
def assert_eq_xml_db_fields(table, dic_asserts, dic_conditons):
lmt_xml_db.assert_eq_xml_db_fields(_g_runner_self, table, dic_asserts, dic_conditons)
return True
def set_xml_db(table, dic_field_vals, dic_conditons):
# table, field, val, kwargs
lmt_xml_db.set_xml_db(_g_runner_self, table, dic_field_vals, dic_conditons)
return True
""" not in use
def assert_eq_xml_db_field(*args, **kwargs):
# table, field, val, kwargs
lmt_xml_db.assert_eq_xml_db_field(_g_runner_self, args, kwargs)
return True
"""
#///////////////////////////////////////////////////////////////////////////////
# lmt_assert.py
#///////////////////////////////////////////////////////////////////////////////
def test_eq(a,b):
lmt_assert.test_eq(_g_runner_self,a,b)
def assert_app_running(service_name, process_name, process_count):
lmt_assert.assert_app_running(_g_runner_self,service_name, process_name, process_count)
def assert_prc_running(proc_name):
lmt_assert.assert_prc_running(_g_runner_self,proc_name)
def assert_file_grep(to_find_str, file_name, max_wait_secs=0):
lmt_assert.assert_file_grep( _g_runner_self,to_find_str, file_name, max_wait_secs)
def assert_file_grep_sequentially(to_find_str, file_name, max_wait_secs=0 ):
lmt_assert.assert_file_grep_sequentially( _g_runner_self,to_find_str, file_name, max_wait_secs)
def assert_file_grep_sequentially_ems(to_find_str, file_name):
lmt_assert.assert_file_grep_sequentially_ems( _g_runner_self,to_find_str, file_name, 0)
#not include 는 wait_secs를 수행하고 마지막에 호출하자
def assert_file_grep_sequentially_not_include(to_find_str, file_name):
lmt_assert.assert_file_grep_sequentially_not_include( _g_runner_self,to_find_str, file_name, 0)
def assert_prc_same_pid(process_name):
lmt_assert.assert_prc_same_pid(_g_runner_self,process_name)
def assert_alarm_exists(alarm_code):
lmt_assert.assert_alarm_exists(_g_runner_self,alarm_code)
def assert_alarm_cleared(alarm_code):
lmt_assert.assert_alarm_cleared(_g_runner_self,alarm_code)
def assert_mes_q_full(log_file_path):
lmt_assert.assert_mes_q_full(_g_runner_self,log_file_path)
def assert_mes_q_not_full(log_file_path):
lmt_assert.assert_mes_q_not_full(_g_runner_self,log_file_path)
def test_run_ok(cmd) :
lmt_assert.test_run_ok(_g_runner_self, cmd)
def test_run_err(cmd) :
lmt_assert.test_run_err(_g_runner_self,cmd)
def assert_eq_cmd_output (cmd, expect_val):
lmt_assert.assert_eq_cmd_output (_g_runner_self, cmd, expect_val)
def assert_cmd_output_include_string(cmd, expect_val):
lmt_assert.assert_cmd_output_include_string(_g_runner_self, cmd, expect_val)
def assert_poll_cmd_output_include_string(cmd, expect_val, max_wait_secs=600):
lmt_assert.assert_poll_cmd_output_include_string(_g_runner_self, cmd, expect_val, max_wait_secs)
#///////////////////////////////////////////////////////////////////////////////
# lmt_file.py
#///////////////////////////////////////////////////////////////////////////////
#def save_cur_file_line_cnt(file_name):
# lmt_file.save_cur_file_line_cnt(_g_runner_self,file_name)
def remove_app_file_log(file_name):
lmt_file.remove_app_file_log(_g_runner_self,file_name)
def remove_stat_file(file_name):
lmt_file.remove_stat_file(_g_runner_self, file_name)
def remove_pfnm_file_log(file_name):
lmt_file.remove_pfnm_file_log(_g_runner_self,file_name)
def remove_all_input_files() :
"""
input 폴더내에 DATA, INFO 폴더가 있는데 이것들을 지우면 안됨. 자동 생성 안됨
DATA, INFO 내의 파일만 지우게 처리한다.
[DELL01:/POFCS/INPUT]cd /POFCS/INPUT/GTP_PGW
[DELL01:/POFCS/INPUT/GTP_PGW]ll
drwxrwxr-x 2 pofcs pofcs 4096 7월 22 13:50 DATA01
drwxrwxr-x 2 pofcs pofcs 6 6월 24 20:18 DATA02
drwxrwxr-x 2 pofcs pofcs 12288 7월 22 13:51 INFO01
drwxrwxr-x 2 pofcs pofcs 6 6월 24 20:18 INFO02
"""
del_input_dir = os.path.join( _g_runner_self.input_path , _g_runner_self.service_name )
#get DATA, INFO directory list and remove files in it.
del_dir_path = os.path.join(del_input_dir , "*" )
dir_list=glob.glob(del_dir_path)
for del_dir in dir_list:
lmt_file.remove_all_files(_g_runner_self, del_dir )
return True
def remove_all_work_files() :
#XXX TODO DT 가 사용하는 *_LAST_INFO_MODIFIED 은 삭제하면 안됨. DT 재기동을 하면 무방
del_work_dir = os.path.join( _g_runner_self.work_path , _g_runner_self.service_name )
lmt_file.remove_all_files(_g_runner_self, del_work_dir )
return True
# ex) /POFCS/OUTPUT/GTP_PGW/IPMD/ 아래 모든 폴더내 파일 삭제
def remove_all_ipmd_output_files() :
ipmd_dir_base = os.path.join( _g_runner_self.output_path , _g_runner_self.service_name ,"IPMD")
lmt_file.remove_all_files(_g_runner_self, ipmd_dir_base)
return True
# ex) /OUTPUT/IMS/ERR/ 아래 모든 폴더내 파일 삭제
def remove_all_err_output_files() :
err_dir_base = os.path.join( _g_runner_self.output_path , _g_runner_self.service_name ,"ERR")
lmt_file.remove_all_files(_g_runner_self, err_dir_base)
return True
# 해당 폴더안의 파일과 디렉토리 삭제. 인자로 넘긴 폴더는 삭제 안함
def remove_all_files(del_in_this_dir):
lmt_file.remove_all_files(_g_runner_self, del_in_this_dir)
return True
def clear_file_db(proc_name='ALL') :
lmt_file.clear_file_db(_g_runner_self,proc_name)
#///////////////////////////////////////////////////////////////////////////////
# lmt_oracle.py
# lmt_mysql.py
#///////////////////////////////////////////////////////////////////////////////
def assert_db_exists(table, where):
where = lmt_util.replace_all_symbols(_g_runner_self,where)
if(_g_runner_self.db_type == 'ORACLE'):
lmt_oracle.assert_oracle_exists(_g_runner_self,table, where)
elif(_g_runner_self.db_type == 'MYSQL'):
lmt_mysql.assert_mysql_exists(_g_runner_self,table, where)
else:
err_msg ="invalid DB type : {}".format(_g_runner_self.db_type)
_g_runner_self.logger.error(err_msg)
return False
return True
def execute_sql(full_sql):
full_sql = lmt_util.replace_all_symbols(_g_runner_self,full_sql)
if(_g_runner_self.db_type == 'ORACLE'):
lmt_oracle.execute_sql(_g_runner_self,full_sql)
elif(_g_runner_self.db_type == 'MYSQL'):
lmt_mysql.execute_sql(_g_runner_self,full_sql)
else:
err_msg ="invalid DB type : {}".format(_g_runner_self.db_type)
_g_runner_self.logger.error(err_msg)
return False
return True
#///////////////////////////////////////////////////////////////////////////////
# lmt_memory.py
#///////////////////////////////////////////////////////////////////////////////
def make_swap():
lmt_memory.make_swap(_g_runner_self)
def clear_mes_queue_restart_pfnm ():
#clear_all_shared_memory
lmt_memory.clear_mes_queue_restart_pfnm(_g_runner_self)
#///////////////////////////////////////////////////////////////////////////////
# lmt_process.py
#///////////////////////////////////////////////////////////////////////////////
def run_cli_cmd(cli_cmd):
lmt_process.run_cli_cmd(_g_runner_self,cli_cmd)
def run_prc(run_cmd):
lmt_process.run_prc(_g_runner_self,run_cmd)
def terminate_prc(proc_name):
lmt_process.terminate_prc(_g_runner_self,proc_name)
def kill_prc(proc_name):
lmt_process.kill_prc(_g_runner_self,proc_name)
def save_prc_pid(process_name):
lmt_process.save_prc_pid(_g_runner_self,process_name)
#프로세스의 쓰레드 개수와 일치확인
def assert_process_thread_count_matching(name, expected_val):
lmt_process.assert_process_thread_count_matching(_g_runner_self, name, expected_val)
#///////////////////////////////////////////////////////////////////////////////
# lmt_report.py
#///////////////////////////////////////////////////////////////////////////////
def write_report_msg (msg):
lmt_report.write_report_msg (_g_runner_self,msg)
#///////////////////////////////////////////////////////////////////////////////
# lmt_simul.py
#///////////////////////////////////////////////////////////////////////////////
def send_simul_gtp (cdr_dir, config=None):
config_path = config
if config_path is None:
lmt_simul.send_simul_gtp (_g_runner_self,cdr_dir,_g_runner_self.ini_config_path_full)
else:
lmt_simul.send_simul_gtp (_g_runner_self,cdr_dir, config_path)
# XXX client.list 경로, tas01.ini 경로 인자로 넘긴다. 해당 파일을 copy 하여 사용후 , 원복 처리한다.
# ex) send_simul_dm ("${TEST_DATA_DIR}/client_list/02.NotSpace", "${TEST_DATA_DIR}/raws/02.NotSpace")
def send_simul_dm (client_list_path, raw_ini_path) :
lmt_simul.send_simul_dm(_g_runner_self,client_list_path, raw_ini_path)
#///////////////////////////////////////////////////////////////////////////////
# lmt_network.py
#///////////////////////////////////////////////////////////////////////////////
def start_tcpdump(cmd_opt, dump_file_name):
lmt_network.start_tcpdump(_g_runner_self,cmd_opt, dump_file_name)
def stop_tcpdump(cmd_opt):
lmt_network.stop_tcpdump(_g_runner_self,cmd_opt)
#///////////////////////////////////////////////////////////////////////////////
# lmt_rms.py
#///////////////////////////////////////////////////////////////////////////////
def rms_mem_info_check():
lmt_rms.rms_mem_info_check(_g_runner_self)
#///////////////////////////////////////////////////////////////////////////////
# lmt_remote.py
#///////////////////////////////////////////////////////////////////////////////
def assert_ems_zero_file_not_exist(path, file_name):
lmt_remote.assert_ems_zero_file_not_exist(_g_runner_self, path, file_name)
def ems_run_shell_cmd(cmd):
lmt_remote.ems_run_shell_cmd(_g_runner_self, cmd)
def ems_run_cli_cmd(cmd):
lmt_remote.ems_run_cli_cmd(_g_runner_self, cmd)
def get_ems_file(file_name):
lmt_remote.get_ems_file(_g_runner_self,file_name)
def put_ems_file(remote_path, file_name):
lmt_remote.put_ems_file(_g_runner_self,remote_path, file_name)
def get_remote_file(host,user,password, file_name):
lmt_remote.get_remote_file(_g_runner_self,host,user,password, file_name)
def put_remote_file(host,user,password,remote_path, local_path, file_name):
lmt_remote.put_remote_file(_g_runner_self,host,user,password,remote_path, local_path, file_name, file_name)
def backup_ems_config(file_name_path):
lmt_remote.backup_ems_config(_g_runner_self, file_name_path)
def rollback_ems_config(file_name_path):
lmt_remote.rollback_ems_config(_g_runner_self, file_name_path)
def backup_remote_file(host,user,password, file_name_path):
lmt_remote.backup_remote_file(_g_runner_self, host,user,password, file_name_path)
def psm_set_config_and_restart(after_min):
lmt_remote.psm_set_config_and_restart(_g_runner_self, after_min)
def put_policy_files_to_ems():
lmt_remote.put_policy_files_to_ems(_g_runner_self)
def test(name):
lmt_process.test(_g_runner_self,name)
```
#### File: let-me-test/module_core/lmt_util.py
```python
import os
import re
import sys
import datetime
import subprocess
import shlex
import pexpect
import psutil
from module_core import lmt_exception
#///////////////////////////////////////////////////////////////////////////////
def is_file_contains(file_path, pattern):
with open(file_path) as f:
for line in f:
if re.search(pattern, line):
return True
return False
#///////////////////////////////////////////////////////////////////////////////
# 1.리턴값을 받을수 있어야 함.
# 2.잘못된 명령등에 대한 에러를 알수 있어야 함
# 3.사용자의 명령중에 pipe ('|') 가 있는 경우에 처리모두 되어야 함
# 4.명령의 출력이 매우 많고 클때도 처리 되어야 함
# 5.background process 를 기동하는 경우에 처리 TODO
# (ex: pfmRun.sh all 01 -> 현재 os.sytem 으로만 가능)
#///////////////////////////////////////////////////////////////////////////////
#///////////////////////////////////////////////////////////////////////////////
# 명령이 끝날때까지 대기한다. 프로세스를 기동한 경우에는 child가 종료될때까지 대기한다.
def run_shell_cmd(runner_ctx,cmd):
cmd = replace_all_symbols(runner_ctx,cmd)
try:
runner_ctx.logger.info("{}cmd : {}".format(runner_ctx.cur_indent,cmd))
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True,universal_newlines=True)
# If the return code was non-zero it raises a CalledProcessError.
# The CalledProcessError object will have the return code in the returncode attribute
# and any output in the output attribute.
if(output):
out_list = output.split("\n")
if(out_list):
for outval in out_list:
if(len(outval)>0 and outval != 'grep: write error' ):
runner_ctx.logger.info("{}{}".format(runner_ctx.cur_indent,outval))
return output
except subprocess.CalledProcessError as e:
err_msg = 'error -> {} : return code = {}'.format(e.output, e.returncode )
raise lmt_exception.LmtException(err_msg)
except Exception as e:
err_msg = 'error -> {} : {} '. format( e.__doc__, e.message)
runner_ctx.logger.error("{}error: {}".format(runner_ctx.cur_indent,err_msg))
raise lmt_exception.LmtException(err_msg)
runner_ctx.logger.info("{}cmd done".format(runner_ctx.cur_indent))
return None
#///////////////////////////////////////////////////////////////////////////////
# background 로 명령을 수행하고 바로 나온다. (위 run_shell_cmd 와 다름)
def run_shell_cmd_background(runner_ctx,cmd):
cmd = replace_all_symbols(runner_ctx,cmd)
runner_ctx.logger.info("{}cmd : {}".format(runner_ctx.cur_indent, cmd))
#child = pexpect.spawn('/bin/bash', ['-c', cmd], timeout=120,maxread=1)
#child.logfile = sys.stdout
#child.expect(pexpect.EOF)
#XXX close_fds=True XXX
subprocess.Popen(cmd, shell=True,universal_newlines=True, close_fds=True)
#os.spawnl(os.P_NOWAIT, cmd)
#///////////////////////////////////////////////////////////////////////////////
def replace_all_symbols(runner_ctx, user_str):
runner_ctx.logger.debug("{}before replace : {}".format(runner_ctx.cur_indent,user_str))
today = datetime.datetime.now().strftime('%Y%m%d')
today_yyyymmddhh = datetime.datetime.now().strftime('%Y%m%d%H')
curr_hh = datetime.datetime.now().strftime('%H') # string
#runner_ctx.logger.debug("{}curr hh : {}".format(runner_ctx.cur_indent, curr_hh))
resolved = user_str.replace("${PACKAGE_ID}", runner_ctx.package_id)
resolved = resolved.replace("${PACKAGE_NAME}", runner_ctx.package_name)
resolved = resolved.replace("${SYSTEM_NAME}" , runner_ctx.system_name)
resolved = resolved.replace("${SERVICE_NAME}" , runner_ctx.service_name)
resolved = resolved.replace("${CUR_YYYYMMDD}", today)
resolved = resolved.replace("${CUR_YYYYMMDDHH}", today_yyyymmddhh)
if runner_ctx.cur_ctx_test_path :
resolved = resolved.replace("${TEST_DATA_DIR}", runner_ctx.cur_ctx_test_path+os.sep+"data")
resolved = resolved.replace("${XML_DB_PATH}", runner_ctx.xml_db_path)
resolved = resolved.replace("${OUTPUT_IPMD}", runner_ctx.output_path+os.sep +
runner_ctx.service_name + os.sep + "IPMD")
resolved = resolved.replace("${OUTPUT_ERR}", runner_ctx.output_path+os.sep +
runner_ctx.service_name + os.sep + "ERR")
resolved = resolved.replace("${LOG_BASE_PATH}" , runner_ctx.log_base_path)
curr_hh_plus_1_str = str(int(curr_hh) + 1)
#runner_ctx.logger.debug("{}curr_hh_plus_1_str : {}".format(runner_ctx.cur_indent, curr_hh_plus_1_str))
resolved = resolved.replace("${CURR_HH}" , curr_hh )
resolved = resolved.replace("${CURR_HH+1}" ,curr_hh_plus_1_str )
resolved = resolved.replace("${EMS_PACKAGE_NAME}", runner_ctx.ems_package_name)
resolved = resolved.replace("${EMS_SYSTEM_NAME}" , runner_ctx.ems_system_name)
resolved = resolved.replace("${EMS_SERVICE_NAME}" , runner_ctx.ems_service_name)
resolved = resolved.replace("${STAT_PATH}" , runner_ctx.stat_path)
resolved = resolved.replace("${WORK_PATH}" , runner_ctx.work_path)
if "${TEST_RESULT_DIR}" in user_str :
if(os.path.isdir(runner_ctx.test_result_dir)==False):
runner_ctx.logger.info("{}create dir : {}".format(runner_ctx.cur_indent,runner_ctx.test_result_dir))
os.mkdir(runner_ctx.test_result_dir)
resolved = resolved.replace("${TEST_RESULT_DIR}" , runner_ctx.test_result_dir)
runner_ctx.logger.debug("{}resolved : {}".format(runner_ctx.cur_indent,resolved))
return resolved
#///////////////////////////////////////////////////////////////////////////////
# XXX ipcs -m | grep `whoami` 에서 데이터 없으면 $? == 1 이므로 에러발생됨 !!!
def clear_all_shared_memory(runner_ctx):
runner_ctx.logger.info("{}공유메모리 모두 삭제 수행 시작".format(runner_ctx.cur_indent))
runner_ctx.logger.info("{}공유메모리 현재 상태 출력".format(runner_ctx.cur_indent))
run_shell_cmd(runner_ctx,"ipcs -m | grep `whoami`")
runner_ctx.logger.info("{}공유메모리 삭제 시작 ".format(runner_ctx.cur_indent))
run_shell_cmd(runner_ctx,"ipcs -m | grep `whoami` | awk '{ print $2 }' | xargs -n1 ipcrm -m ")
runner_ctx.logger.info("{}공유메모리 삭제 완료 ".format(runner_ctx.cur_indent))
run_shell_cmd(runner_ctx,"ipcs -m ")
#///////////////////////////////////////////////////////////////////////////////
def is_runnig(script_name):
running_cnt = 0
for q in psutil.process_iter():
if q.name() == 'python':
#print q.cmdline()
if len(q.cmdline())>1 and script_name in q.cmdline()[1]:
running_cnt += 1
if(running_cnt > 1):
return True, running_cnt
return False, 0
```
#### File: let-me-test/tspec_cmd_impl/lmt_report.py
```python
from module_core import lmt_exception
#202007 kojh create
#///////////////////////////////////////////////////////////////////////////////
def write_report_msg (runner_ctx,msg):
#runner_ctx.logger.info(" ")
runner_ctx.logger.info("{}{}".format(runner_ctx.cur_indent, msg))
runner_ctx.logger.info("{}--------------------------------------------------------------------".
format(runner_ctx.cur_indent))
return True
```
#### File: let-me-test/tspec_cmd_impl/lmt_shell.py
```python
import pexpect
from module_core import lmt_exception
from module_core import lmt_util
#///////////////////////////////////////////////////////////////////////////////
def do_interactive_cmd(runner_ctx, run_cmd, dic_expect_val):
# TODO
"""
timeout_sec = 30
#-------------------------------------
child = pexpect.spawn('/bin/bash -c "cd /CG/OFCS_SIM/IMS_SIM/SRC; ./DIA_SIM"')
child.logfile = sys.stdout
#-------------------------------------
child.setwinsize(100, 100)
child.expect('CLOSE_NOT_READY', timeout=timeout_sec)
runner_ctx.logger.info("CLOSE_NOT_READY")
#-------------------------------------
child.sendline('init 1\n')
child.expect('OPEN_NOT_READY', timeout=timeout_sec)
runner_ctx.logger.info("OPEN_NOT_READY")
#-------------------------------------
child.sendline('load 1\n')
child.expect('OPEN_READY', timeout=timeout_sec)
runner_ctx.logger.info("OPEN_READY")
#-------------------------------------
child.sendline('start 1\n')
child.expect('ALL_SENT', timeout=120)
runner_ctx.logger.info("ALL_SENT")
#-------------------------------------
child.sendline('quit')
child.expect('GOOD BYE!', timeout=timeout_sec)
runner_ctx.logger.info("GOOD BYE!")
#-------------------------------------
#child.terminate()
child.close()
"""
return True
```
#### File: let-me-test/tspec_cmd_impl/lmt_time.py
```python
import time
#from tqdm import trange
from alive_progress import alive_bar, config_handler
from module_core import lmt_exception
#///////////////////////////////////////////////////////////////////////////////
def wait_secs(runner_ctx,secs):
runner_ctx.logger.debug("{}wait_secs start : args = {}".
format(runner_ctx.cur_indent,secs))
config_handler.set_global(length=20, theme='ascii', spinner='fish_bouncing')
items = range(secs)
with alive_bar(secs) as bar:
for item in items:
time.sleep(1)
bar()
return True
```
#### File: let-me-test/tspec_cmd_impl/lmt_xml_config.py
```python
try:
import xml.etree.cElementTree as ET
except ImportError:
print ("ImportError")
import xml.etree.ElementTree as ET
import os
from pexpect import pxssh
from module_core import lmt_exception
from module_core import lmt_util
from tspec_cmd_impl import lmt_remote
#///////////////////////////////////////////////////////////////////////////////
# auto backup --> when one tspec begins.
# auto rollback --> when one tspec ends..
#///////////////////////////////////////////////////////////////////////////////
def set_xml_cfg_ems(runner_ctx, xpath, val):
local_xml_path = "{}/{}".format(runner_ctx.temp_internal_use_only_dir_remote, os.path.basename(runner_ctx.ems_xml_cfg_path))
if(runner_ctx.ems_is_xml_config_changed == False):
# set_xml_cfg 이 여러번 호출되는 경우 고려.
# -> 최초로 set_xml_cfg 호출됬을때만 한번 backup 수행
runner_ctx.logger.debug("{}BACKUP ems xml cfg".format(runner_ctx.cur_indent))
#runner_ctx.backup_config()
lmt_remote.backup_remote_file(runner_ctx, runner_ctx.ems_ip,runner_ctx.ems_id, runner_ctx.ems_passwd, runner_ctx.ems_xml_cfg_path)
runner_ctx.ems_is_xml_config_changed = True
runner_ctx.logger.info("{}ems config path = {}".format(runner_ctx.cur_indent, runner_ctx.ems_xml_cfg_path))
lmt_remote.get_remote_file(runner_ctx,runner_ctx.ems_ip,runner_ctx.ems_id,runner_ctx.ems_passwd, runner_ctx.ems_xml_cfg_path)
set_xml_cfg_this_path(runner_ctx, local_xml_path, xpath, val)
remote_path = os.path.dirname(runner_ctx.ems_xml_cfg_path)
lmt_remote.put_remote_file(runner_ctx,runner_ctx.ems_ip,runner_ctx.ems_id,runner_ctx.ems_passwd, remote_path,
runner_ctx.temp_internal_use_only_dir_remote, os.path.basename(local_xml_path), os.path.basename(local_xml_path))
#///////////////////////////////////////////////////////////////////////////////
def set_xml_cfg(runner_ctx, xpath, val):
if(runner_ctx.is_xml_config_changed == False):
# set_xml_cfg 이 여러번 호출되는 경우 고려.
# -> 최초로 set_xml_cfg 호출됬을때만 한번 backup 수행
runner_ctx.logger.debug("{}BACKUP xml cfg".format(runner_ctx.cur_indent))
runner_ctx.backup_config()
runner_ctx.is_xml_config_changed = True
set_xml_cfg_this_path(runner_ctx, runner_ctx.xml_cfg_path, xpath, val)
#///////////////////////////////////////////////////////////////////////////////
def set_xml_cfg_this_path(runner_ctx, file_path, xpath, val):
xpath = lmt_util.replace_all_symbols(runner_ctx,xpath)
runner_ctx.logger.debug("{}xml_cfg_path = {}".format(runner_ctx.cur_indent,file_path))
runner_ctx.logger.debug("{}xpath = {}".format(runner_ctx.cur_indent,xpath))
try:
#------------------------------
doc = ET.parse(file_path)
#------------------------------
#doc = ET.parse("error.xml")
except Exception as e:
err_msg = 'xml parse failed {} :{}'.format(e.__doc__, e.message)
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,err_msg))
raise lmt_exception.LmtException(err_msg)
if(doc is None):
runner_ctx.logger.error("{}parse failed ".format(runner_ctx.cur_indent))
return False
try:
xml_root = doc.getroot()
if(xml_root is None):
err_msg = "xml getroot failed"
runner_ctx.logger.error(err_msg)
raise lmt_exception.LmtException(err_msg)
except Exception as e:
err_msg = 'xml getroot failed : {} :{}'.format(e.__doc__, e.message)
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,err_msg))
raise lmt_exception.LmtException(err_msg)
tmp_xpath = './' + xpath # root + xpath
#tmp_xpath = './/' + 'DB_CONNECT_INFO/USER_ID' # OK
#tmp_xpath = './' + 'COMMON/DB_CONNECT_INFO/USER_ID' # OK
try:
xml_nodes = xml_root.findall(tmp_xpath)
if xml_nodes is None:
err_msg = "findall failed = {}".format(tmp_xpath)
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,tmp_xpath))
raise lmt_exception.LmtException(err_msg)
if len(xml_nodes) == 0:
err_msg = "invalid xpath = {}".format(tmp_xpath)
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,err_msg))
raise lmt_exception.LmtException(err_msg)
#print xml_nodes
config_val = xml_nodes[0].text
runner_ctx.logger.debug("{}old value = {}".format(runner_ctx.cur_indent,config_val))
runner_ctx.logger.debug("{}new value = {}".format(runner_ctx.cur_indent,val))
#------------------------
# XXX change value XXX
xml_nodes[0].text = val
#------------------------
#last_updated = ET.SubElement(xml_nodes[0], "test_new")
#last_updated.text = 'TEST'
#write file
#doc.write(runner_ctx.xml_cfg_path, encoding="utf-8", xml_declaration=True)
doc.write(file_path)
except Exception as e:
err_msg = 'error : {} :{}'.format(e.__doc__, e.message)
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,err_msg))
raise
except (SyntaxError, AttributeError):
err_msg = 'Syntax or Attribute error '
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,err_msg))
raise
return True
#///////////////////////////////////////////////////////////////////////////////
def get_xml_cfg_ems(runner_ctx, xpath):
xml_path = "{}/{}".format(runner_ctx.temp_internal_use_only_dir_remote, os.path.basename(runner_ctx.xml_cfg_path))
lmt_remote.get_remote_file(runner_ctx,runner_ctx.ems_ip,runner_ctx.ems_id,runner_ctx.ems_passwd, runner_ctx.ems_xml_cfg_path)
out = get_xml_cfg_this_path(runner_ctx, xml_path, xpath)
return out
#///////////////////////////////////////////////////////////////////////////////
def get_xml_cfg(runner_ctx, xpath):
out = get_xml_cfg_this_path(runner_ctx, runner_ctx.xml_cfg_path, xpath)
return out
#///////////////////////////////////////////////////////////////////////////////
def get_xml_cfg_this_path(runner_ctx, file_path, xpath):
xpath = lmt_util.replace_all_symbols(runner_ctx,xpath)
runner_ctx.logger.debug("{}xml_cfg_path = {}".format(runner_ctx.cur_indent,file_path))
runner_ctx.logger.debug("{}xpath = {}".format(runner_ctx.cur_indent,xpath))
try:
#------------------------------
doc = ET.parse(file_path)
#------------------------------
except Exception as e:
err_msg = 'xml parse failed :{} -> {} :{}'.format(file_path, e.__doc__, e.message)
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,err_msg))
raise lmt_exception.LmtException(err_msg)
if(doc is None):
runner_ctx.logger.error("parse failed ")
return None
try:
xml_root = doc.getroot()
if(xml_root is None):
err_msg = "xml getroot failed"
runner_ctx.logger.error(err_msg)
raise lmt_exception.LmtException(err_msg)
except Exception as e:
err_msg = 'xml getroot failed : {} :{}'.format(e.__doc__, e.message)
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,err_msg))
raise lmt_exception.LmtException(err_msg)
tmp_xpath = './' + xpath # root + xpath
#tmp_xpath = './/' + 'DB_CONNECT_INFO/USER_ID' # OK
#tmp_xpath = './' + 'COMMON/DB_CONNECT_INFO/USER_ID' # OK
try:
xml_nodes = xml_root.findall(tmp_xpath)
if xml_nodes is None:
err_msg = "findall failed = {}".format(tmp_xpath)
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,tmp_xpath))
return None # no error ! this is just get function
#raise lmt_exception.LmtException(err_msg)
if len(xml_nodes) == 0:
err_msg = "invalid xpath = [{}] get just failed.".format(tmp_xpath)
runner_ctx.logger.warning("{}{}".format(runner_ctx.cur_indent,err_msg))
return None # no error ! this is just get function
#raise lmt_exception.LmtException(err_msg)
config_val = xml_nodes[0].text
runner_ctx.logger.info("{}{}={}".format(runner_ctx.cur_indent,xpath, config_val))
return config_val
except Exception as e:
err_msg = 'error : {} :{}'.format(e.__doc__, e.message)
runner_ctx.logger.error("{}".format(err_msg))
raise
except (SyntaxError, AttributeError):
err_msg = 'Syntax or Attribute error '
runner_ctx.logger.error("{}".format(err_msg))
raise
return None
```
|
{
"source": "JeremyKTH/hk_ros_2021",
"score": 3
}
|
#### File: hk_ros_2021/Scripts/GeometricDet.py
```python
import numpy as np
import cv2
import rospy
from PIL import Image
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
rospy.init_node('geometry_detect', anonymous=False)
def openCV(image_message):
bridge = CvBridge()
img = bridge.imgmsg_to_cv2(image_message, desired_encoding='passthrough')
#imagename = 'shape3.PNG'
#img = cv2.imread(imagename)
# Convert BGR to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_green = np.array([70,100,40])
upper_green = np.array([110,190,100]) #150
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_green, upper_green)
mask = (255-mask)
ret , thrash = cv2.threshold(mask, 127, 255, cv2.CHAIN_APPROX_NONE)
_, contours , _ = cv2.findContours(thrash, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
for contour in contours:
approx = cv2.approxPolyDP(contour, 0.05*cv2.arcLength(contour, True), True)
cv2.drawContours(img, [approx], 0, (0, 250, 0), 5)
#x = approx.ravel()[0]
#y = approx.ravel()[1]
Area = cv2.contourArea(contour)
xy = np.mean(approx, axis=0)[0]
x = round(xy[0])
y = round(xy[1])
lines = len(approx)
if (Area > 475 and Area < 700 and y>180):
#print("Area = ", Area)
if lines == 3:
#cv2.putText(img, "Triangle", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0) )
print("Triangle")
elif lines == 4 :
x, y , w, h = cv2.boundingRect(approx)
aspectRatio = float(w)/h
# print(aspectRatio)
if aspectRatio >= 0.95 and aspectRatio < 1.05:
#cv2.putText(img, "square", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0))
print("square")
else:
#cv2.putText(img, "rectangle", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0))
print("rectangle")
elif lines == 5:
#cv2.putText(img, "pentagon", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0))
print("pentagon")
#elif lines == 10 :
#cv2.putText(mask, "star", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0))
#print("star")
#if (lines>2 and lines<11):
#print(approx)
#print("-\n x = ",x, "y = ",y, "-\n")
#print("New contour")
#else:
# cv2.putText(img, "circle", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0))
def listener():
rospy.Subscriber('/raspicam_node/image', Image, openCV)
#rate = rospy.Rate(10) # 10hz
#while not rospy.is_shutdown():
#rospy.loginfo(hello_str) # print
rospy.spin()
if __name__ == '__main__':
try:
listener()
except rospy.ROSInterruptException:
pass
```
|
{
"source": "jeremykuhnash/dcos-jenkins-service",
"score": 3
}
|
#### File: tests/scale/test_load.py
```python
import logging
import time
from threading import Thread, Lock
from typing import List, Set
from xml.etree import ElementTree
import config
import jenkins
import pytest
import sdk_dcos
import sdk_marathon
import sdk_quota
import sdk_security
import sdk_utils
import shakedown
import json
from tools import dcos_login
from sdk_dcos import DCOS_SECURITY
log = logging.getLogger(__name__)
SHARED_ROLE = "jenkins-role"
DOCKER_IMAGE="mesosphere/jenkins-dind:scale"
# initial timeout waiting on deployments
DEPLOY_TIMEOUT = 30 * 60 # 30 mins
JOB_RUN_TIMEOUT = 10 * 60 # 10 mins
SERVICE_ACCOUNT_TIMEOUT = 15 * 60 # 15 mins
LOCK = Lock()
TIMINGS = {"deployments": {}, "serviceaccounts": {}}
ACCOUNTS = {}
class ResultThread(Thread):
"""A thread that stores the result of the run command."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._result = None
self._event = None
@property
def result(self) -> bool:
"""Run result
Returns: True if completed successfully.
"""
return bool(self._result)
@property
def event(self):
return self._event
@event.setter
def event(self, event):
self._event = event
def run(self) -> None:
start = time.time()
try:
super().run()
self._result = True
except Exception as e:
self._result = False
finally:
end = time.time()
if self.event:
TIMINGS[self.event][self.name] = end - start
@pytest.mark.scale
def test_scaling_load(master_count,
job_count,
single_use: bool,
run_delay,
cpu_quota,
memory_quota,
work_duration,
mom,
external_volume: bool,
scenario,
min_index,
max_index,
batch_size) -> None:
"""Launch a load test scenario. This does not verify the results
of the test, but does ensure the instances and jobs were created.
The installation is run in threads, but the job creation and
launch is then done serially after all Jenkins instances have
completed installation.
Args:
master_count: Number of Jenkins masters or instances
job_count: Number of Jobs on each Jenkins master
single_use: Mesos Single-Use Agent on (true) or off (false)
run_delay: Jobs should run every X minute(s)
cpu_quota: CPU quota (0.0 to disable)
work_duration: Time, in seconds, for generated jobs to sleep
mom: Marathon on Marathon instance name
external_volume: External volume on rexray (true) or local volume (false)
min_index: minimum index to begin jenkins suffixes at
max_index: maximum index to end jenkins suffixes at
batch_size: batch size to deploy jenkins instances in
"""
security_mode = sdk_dcos.get_security_mode()
if mom and cpu_quota != 0.0 and memory_quota != 0.0:
with shakedown.marathon_on_marathon(mom):
_setup_quota(SHARED_ROLE, cpu_quota, memory_quota)
# create marathon client
if mom:
with shakedown.marathon_on_marathon(mom):
marathon_client = shakedown.marathon.create_client()
else:
marathon_client = shakedown.marathon.create_client()
masters = []
if min_index == -1 or max_index == -1:
masters = ["jenkins{}".format(sdk_utils.random_string()) for _ in
range(0, int(master_count))]
else:
#max and min indexes are specified
#NOTE: using min/max will override master count
masters = ["jenkins{}".format(index) for index in
range(min_index, max_index)]
# create service accounts in parallel
sdk_security.install_enterprise_cli()
if mom:
_configure_admin_router(mom, SHARED_ROLE)
current = 0
end = max_index - min_index
for current in range(0, end, batch_size):
batched_masters = masters[current:current+batch_size]
service_account_threads = _spawn_threads(batched_masters,
_create_service_accounts,
security=security_mode)
thread_failures = _wait_and_get_failures(service_account_threads,
timeout=SERVICE_ACCOUNT_TIMEOUT)
current = current + batch_size
# launch Jenkins services
current = 0
end = max_index - min_index
for current in range(0, end, batch_size):
log.info("Re-authenticating current batch load of jenkins{} - jenkins{} "
"to prevent auth-timeouts on scale cluster.".format(current, current+batch_size))
dcos_login.login_session()
batched_masters = masters[current:current+batch_size]
install_threads = _spawn_threads(batched_masters,
_install_jenkins,
event='deployments',
client=marathon_client,
external_volume=external_volume,
security=security_mode,
daemon=True,
mom=mom)
thread_failures = _wait_and_get_failures(install_threads,
timeout=DEPLOY_TIMEOUT)
thread_names = [x.name for x in thread_failures]
# the rest of the commands require a running Jenkins instance
deployed_masters = [x for x in batched_masters if x not in thread_names]
job_threads = _spawn_threads(deployed_masters,
_create_jobs,
jobs=job_count,
single=single_use,
delay=run_delay,
duration=work_duration,
scenario=scenario)
_wait_on_threads(job_threads, JOB_RUN_TIMEOUT)
r = json.dumps(TIMINGS)
print(r)
current = current + batch_size
@pytest.mark.scalecleanup
def test_cleanup_scale(mom, min_index, max_index,service_id_list) -> None:
"""
Args:
mom: Marathon on Marathon instance name
min_index: minimum index to begin jenkins suffixes at
max_index: maximum index to end jenkins suffixes at
service_id_list: list of service ids to delete
Blanket clean-up of jenkins instances on a DC/OS cluster.
1. Delete list of service ids if specified
2. Delete range between max and min if specified
1. Queries MoM for all apps matching "jenkins" prefix
2. Delete all jobs on running Jenkins instances
3. Uninstall all found Jenkins installs
"""
service_ids = list()
if service_id_list != '':
service_ids = service_id_list.split(",")
elif min_index != -1 and max_index != -1:
service_ids = ["/jenkins{}".format(index) for index in
range(min_index, max_index)]
else:
r = sdk_marathon.filter_apps_by_id('jenkins', mom)
jenkins_apps = r.json()['apps']
jenkins_ids = [x['id'] for x in jenkins_apps]
for service_id in jenkins_ids:
if service_id.startswith('/'):
service_id = service_id[1:]
# skip over '/jenkins' instance - not setup by tests
if service_id == 'jenkins':
continue
service_ids.append(service_id)
cleanup_threads = _spawn_threads(service_ids,
_cleanup_jenkins_install,
mom=mom,
daemon=False)
_wait_and_get_failures(cleanup_threads, timeout=JOB_RUN_TIMEOUT)
def _setup_quota(role, cpus, memory):
current_quotas = sdk_quota.list_quotas()
if "infos" not in current_quotas:
_set_quota(role, cpus, memory)
return
match = False
for quota in current_quotas["infos"]:
if quota["role"] == role:
match = True
break
if match:
sdk_quota.remove_quota(role)
_set_quota(role, cpus, memory)
def _set_quota(role, cpus, memory):
sdk_quota.create_quota(role, cpus=cpus, mem=memory)
def _spawn_threads(names, target, daemon=False, event=None, **kwargs) -> List[ResultThread]:
"""Create and start threads running target. This will pass
the thread name to the target as the first argument.
Args:
names: Thread names
target: Function to run in thread
**kwargs: Keyword args for target
Returns:
List of threads handling target.
"""
thread_list = list()
for service_name in names:
# setDaemon allows the main thread to exit even if
# these threads are still running.
t = ResultThread(target=target,
daemon=daemon,
name=service_name,
args=(service_name,),
kwargs=kwargs)
t.event = event
thread_list.append(t)
t.start()
return thread_list
def _create_service_accounts(service_name, security=None):
if security == DCOS_SECURITY.strict:
try:
start = time.time()
log.info("Creating service accounts for '{}'"
.format(service_name))
sa_name = "{}-principal".format(service_name)
sa_secret = "jenkins-{}-secret".format(service_name)
sdk_security.create_service_account(
sa_name, sa_secret, service_name)
sdk_security.grant_permissions(
'root', '*', sa_name)
sdk_security.grant_permissions(
'root', SHARED_ROLE, sa_name)
end = time.time()
ACCOUNTS[service_name] = {}
ACCOUNTS[service_name]["sa_name"] = sa_name
ACCOUNTS[service_name]["sa_secret"] = sa_secret
TIMINGS["serviceaccounts"][service_name] = end - start
except Exception as e:
log.warning("Error encountered while creating service account: {}".format(e))
raise e
def _install_jenkins(service_name,
client=None,
security=None,
**kwargs):
"""Install Jenkins service.
Args:
service_name: Service Name or Marathon ID (same thing)
client: Marathon client connection
external_volume: Enable external volumes
"""
def _wait_for_deployment(app_id, client):
with LOCK:
res = len(client.get_deployments(app_id)) == 0
return res
try:
if security == DCOS_SECURITY.strict:
kwargs['strict_settings'] = {
'secret_name': ACCOUNTS[service_name]["sa_secret"],
'mesos_principal': ACCOUNTS[service_name]["sa_name"],
}
kwargs['service_user'] = 'root'
log.info("Installing jenkins '{}'".format(service_name))
jenkins.install(service_name,
client,
role=SHARED_ROLE,
fn=_wait_for_deployment,
**kwargs)
except Exception as e:
log.warning("Error encountered while installing Jenkins: {}".format(e))
raise e
def _cleanup_jenkins_install(service_name, **kwargs):
"""Delete all jobs and uninstall Jenkins instance.
Args:
service_name: Service name or Marathon ID
"""
if service_name.startswith('/'):
service_name = service_name[1:]
try:
log.info("Removing all jobs on {}.".format(service_name))
jenkins.delete_all_jobs(service_name, retry=False)
finally:
log.info("Uninstalling {}.".format(service_name))
jenkins.uninstall(service_name,
package_name=config.PACKAGE_NAME,
**kwargs)
def _create_jobs(service_name, **kwargs):
"""Create jobs on deployed Jenkins instances.
All functionality around creating jobs should go here.
Args:
service_name: Jenkins instance name
"""
m_label = _create_executor_configuration(service_name)
_launch_jobs(service_name, label=m_label, **kwargs)
def _create_executor_configuration(service_name: str) -> str:
"""Create a new Mesos Slave Info configuration with a random name.
Args:
service_name: Jenkins instance to add the label
Returns: Random name of the new config created.
"""
mesos_label = "mesos"
jenkins.create_mesos_slave_node(mesos_label,
service_name=service_name,
dockerImage=DOCKER_IMAGE,
executorCpus=0.3,
executorMem=1800,
idleTerminationMinutes=1,
timeout_seconds=600)
return mesos_label
def _launch_jobs(service_name: str,
jobs: int = 1,
single: bool = False,
delay: int = 3,
duration: int = 600,
label: str = None,
scenario: str = None):
"""Create configured number of jobs with given config on Jenkins
instance identified by `service_name`.
Args:
service_name: Jenkins service name
jobs: Number of jobs to create and run
single: Single Use Mesos agent on (true) or off
delay: A job should run every X minute(s)
duration: Time, in seconds, for the job to sleep
label: Mesos label for jobs to use
"""
job_name = 'generator-job'
single_use_str = '100' if single else '0'
seed_config_xml = jenkins._get_job_fixture('gen-job.xml')
seed_config_str = ElementTree.tostring(
seed_config_xml.getroot(),
encoding='utf8',
method='xml')
jenkins.create_seed_job(service_name, job_name, seed_config_str)
log.info(
"Launching {} jobs every {} minutes with single-use "
"({}).".format(jobs, delay, single))
jenkins.run_job(service_name,
job_name,
timeout_seconds=600,
**{'JOBCOUNT': str(jobs),
'AGENT_LABEL': label,
'SINGLE_USE': single_use_str,
'EVERY_XMIN': str(delay),
'SLEEP_DURATION': str(duration),
'SCENARIO': scenario})
def _wait_on_threads(thread_list: List[Thread],
timeout=DEPLOY_TIMEOUT) -> List[Thread]:
"""Wait on the threads in `install_threads` until a specified time
has elapsed.
Args:
thread_list: List of threads
timeout: Timeout is seconds
Returns:
List of threads that are still running.
"""
start_time = current_time = time.time()
for thread in thread_list:
remaining = timeout - (current_time - start_time)
if remaining < 1:
break
thread.join(timeout=remaining)
current_time = time.time()
active_threads = [x for x in thread_list if x.isAlive()]
return active_threads
def _wait_and_get_failures(thread_list: List[ResultThread],
**kwargs) -> Set[Thread]:
"""Wait on threads to complete or timeout and log errors.
Args:
thread_list: List of threads to wait on
Returns: A list of service names that failed or timed out.
"""
timeout_failures = _wait_on_threads(thread_list, **kwargs)
timeout_names = [x.name for x in timeout_failures]
if timeout_names:
log.warning("The following {:d} Jenkins instance(s) failed to "
"complete in {:d} minutes: {}"
.format(len(timeout_names),
DEPLOY_TIMEOUT // 60,
', '.join(timeout_names)))
# the following did not timeout, but failed
run_failures = [x for x in thread_list if not x.result]
run_fail_names = [x.name for x in run_failures]
if run_fail_names:
log.warning("The following {:d} Jenkins instance(s) "
"encountered an error: {}"
.format(len(run_fail_names),
', '.join(run_fail_names)))
return set(timeout_failures + run_failures)
def _configure_admin_router(mom_role, jenkins_role=SHARED_ROLE):
# Admin-Router by default only has permissions to read from '*' and 'slave_public' roles.
# When jenkins is launched on a MoM it runs under the mom role.
# Here we explicily grant Admin-Router access to both jenkins-role and mom roles.
ADMIN_ROUTER_SERVICE_ACCOUNT_NAME='dcos_adminrouter'
permissions = [
{
'user': ADMIN_ROUTER_SERVICE_ACCOUNT_NAME,
'acl': "dcos:mesos:master:framework:role:{}".format(jenkins_role),
'description': "Grant Admin-Router access to services with the role {}".format(jenkins_role),
'action': 'read'
},
{
'user': ADMIN_ROUTER_SERVICE_ACCOUNT_NAME,
'acl': "dcos:mesos:master:framework:role:{}".format(mom_role),
'description': "Grant Admin-Router access to services with the role {}".format(mom_role),
'action': 'read'
}
]
log.info("Granting permissions to {}".format(ADMIN_ROUTER_SERVICE_ACCOUNT_NAME))
for permission in permissions:
sdk_security.grant(**permission)
log.info("Permission setup completed for {}".format(ADMIN_ROUTER_SERVICE_ACCOUNT_NAME))
```
|
{
"source": "jeremylcarter/pyrtition",
"score": 3
}
|
#### File: pyrtition/topic/topic_coordinator.py
```python
import random
import logging
from queue import Queue
from threading import RLock
from typing import Dict, List, Any, Optional, Callable
from pyrtition.topic.topic_message import TopicMessage
from pyrtition.topic.topic_partition import TopicPartition
from pyrtition.topic.topic_partition_capacity import TopicPartitionCapacity
class TopicCoordinator:
name: str
max_partition_count: int
partitions: Dict[int, TopicPartition]
_producer_cache: Dict[str, int]
_lock: RLock
def __init__(self, name="default", max_partition_count: int = 4):
self.name = name
self.max_partition_count = max_partition_count
self._lock = RLock()
self.__partition()
def __partition(self):
self.partitions = dict()
self.producer_cache = dict()
for i in range(self.max_partition_count):
self.__create_partition(i + 1)
def __create_partition(self, number: int) -> bool:
partition = TopicPartition(self.name, number)
self.partitions[number] = partition
return True
def __assign_new_producer_to_partition(self, producer_name: str) -> int:
self._lock.acquire()
try:
next_available_partition = self.__get_next_available_partition()
if next_available_partition is not None:
partition = self.partitions[next_available_partition]
partition.assign_producer(producer_name)
self.producer_cache[producer_name] = partition.number
return partition.number
finally:
self._lock.release()
def __get_next_available_partition(self):
capacities = self.get_capacity()
try:
# Sort them in descending order
capacities.sort(key=lambda c: c.producers)
# If there is only 1 available then just return it
if len(capacities) == 1:
return capacities[0].number
# Pick the next available zero capacity partition
next_available_zero_capacity = next((c for c in capacities if c.producers == 0), None)
if next_available_zero_capacity is not None:
return next_available_zero_capacity.number
# Either pick the lowest available partition or a random one
pick_lowest = random.getrandbits(1)
if pick_lowest:
return capacities[0].number
else:
# Pick a random partition
random_index = random.randint(0, (len(capacities) - 1))
if random_index < len(capacities):
return capacities[random_index].number
# As a last resort just return the first partition
return capacities[0].number
except Exception as ex:
logging.exception(ex)
# As a last resort just return the first partition
return capacities[0].number
def get_producer_partition(self, producer_name: str) -> int:
if producer_name in self.producer_cache:
return self.producer_cache[producer_name]
raise Exception(f"Producer {producer_name} is not in topic {self.name}")
def get_or_add_producer_partition(self, producer_name: str) -> int:
if producer_name in self.producer_cache:
return self.producer_cache[producer_name]
else:
return self.__assign_new_producer_to_partition(producer_name)
def publish(self, producer_name: str, data: Optional[Any]) -> bool:
assigned_to = self.get_or_add_producer_partition(producer_name)
if data is not None and assigned_to > 0:
self.partitions[assigned_to].put_value(producer_name, data)
return True
return False
def is_queue_empty(self, partition: int) -> bool:
if self.partitions[partition]:
return self.partitions[partition].is_queue_empty()
def dequeue(self, partition: int) -> Any:
if self.partitions[partition]:
return self.partitions[partition].dequeue()
def get_queue(self, partition: int) -> Queue:
if self.partitions[partition]:
return self.partitions[partition].get_queue()
def get_capacity(self) -> List[TopicPartitionCapacity]:
return list([TopicPartitionCapacity(partition.number, partition.producer_count)
for partition in self.partitions.values()])
def start_consuming(self, on_message: Callable[[TopicMessage, int, int], None] = None, use_signals: bool = False):
for partition in self.partitions.values():
partition.start_consuming(on_message, use_signals)
def stop_consuming(self):
for partition in self.partitions.values():
partition.stop_consuming()
```
#### File: pyrtition/topic/topic_partition.py
```python
from datetime import datetime
from queue import Queue
from threading import RLock
from typing import Set, Any, Optional, Callable
from pyrtition.topic.topic_message import TopicMessage
class TopicPartition:
topic_name: str
number: int
producer_count: int
producers: Set[str]
_queue: Queue
_lock: RLock
_consumer_thread: Any = None
def __init__(self, name: str, number: int):
self.topic_name = name
self.number = number
self.producer_count = 0
self.producers = set()
self._queue = Queue()
self._lock = RLock()
def dequeue(self) -> Optional[TopicMessage]:
if not self._queue.empty():
return self._queue.get()
def is_queue_empty(self) -> bool:
return self._queue.empty()
def get_queue(self) -> Queue:
return self._queue
def has_producer(self, producer_name):
return producer_name in self.producers
def assign_producer(self, producer_name) -> int:
self._lock.acquire()
try:
self.producers.add(producer_name)
self.producer_count = len(self.producers)
return self.producer_count
finally:
self._lock.release()
def put_value(self, producer_name: str, data: Any):
if not self.has_producer(producer_name):
raise Exception(f"Producer {producer_name} is not a member of this partition")
message = TopicMessage(producer_name=producer_name, timestamp=datetime.utcnow(), data=data)
self._queue.put(message)
# Notify any consumers
if self._consumer_thread:
self._consumer_thread.notify()
def start_consuming(self, on_message: Callable[[TopicMessage, int, int], None] = None, use_signals: bool = False):
# We can only consume if we have an on_message callable
if not on_message:
return
from pyrtition.consumer.topic_partition_consumer_thread import TopicPartitionConsumerThread
consumer_thread = TopicPartitionConsumerThread(self, on_message, use_signals)
consumer_thread.start()
self._consumer_thread = consumer_thread
def stop_consuming(self):
if self._consumer_thread:
self._consumer_thread.stop()
```
|
{
"source": "jeremyletran/react-flask-boilerplate",
"score": 2
}
|
#### File: react-flask-boilerplate/server/app.py
```python
import os
from os.path import join, dirname
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from dotenv import load_dotenv
from logging import StreamHandler
from sys import stdout
dotenv_path = join(dirname(__file__), '../.env')
load_dotenv(dotenv_path)
db = SQLAlchemy()
def create_app():
from api.kittens import kittens_api
from views.index import index_view
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.register_blueprint(kittens_api.blueprint, url_prefix='/api')
app.register_blueprint(index_view)
db.init_app(app)
handler = StreamHandler(stdout)
app.logger.addHandler(handler)
return app
```
|
{
"source": "jeremyleung521/WE_dap",
"score": 3
}
|
#### File: we-dap/alt/hdf5_consistency.py
```python
def consistent(h5f):
result=True
violations=0
for i,v in enumerate(h5f['iterations'].values()):
if i==0:
prevset=set([x[-1][0] for x in v['pcoord'][:]])
print("iteration",i+1,prevset)
else:
newset=set([x[0][0] for x in v['pcoord'][:]])
if not newset.issubset(prevset):
print("consistency violation iter",i,prevset,newset)
result=False
violations+=1
prevset=set([x[-1][0] for x in v['pcoord'][:]])
print("iteration",i+1,prevset)
if violations>0:
print("N_violations:",violations)
return result
```
#### File: WE_dap/we-dap/h5_plot_main.py
```python
import h5py
import numpy as np
from numpy import inf
import matplotlib.pyplot as plt
from warnings import warn
# Suppress divide-by-zero in log
np.seterr(divide='ignore', invalid='ignore')
def norm_hist(hist, p_units, p_max=None):
""" TODO: add temperature arg, also this function may not be needed.
Parameters
----------
hist : ndarray
Array containing the histogram count values to be normalized.
p_units : str
Can be 'kT' or 'kcal'. kT = -lnP, kcal/mol = -RT(lnP), where RT = 0.5922 at 298K.
Adjust RT accordingly if a different temperature is used.
p_max : int (optional)
The maximum probability limit value.
Returns
-------
hist : ndarray
The hist array is normalized according to the p_units argument.
If p_max, probability values above p_max are adjusted to be inf.
"""
if p_units == "kT":
hist = -np.log(hist / np.max(hist))
elif p_units == "kcal":
hist = -0.5922 * np.log(hist / np.max(hist))
else:
raise ValueError("Invalid p_units value, must be 'kT' or 'kcal'.")
if p_max: # TODO: this may not be necessary, can just set limits in plotting function
hist[hist > p_max] = inf
return hist
def aux_to_pdist(h5, iteration, aux_x, aux_y=None, bins=100, hist_range=None):
"""
Parameters
----------
h5 : str
Path to west.h5 file containing coordinates and aux data from WE simulation.
iteration : int
Desired iteration to extract timeseries info from.
aux_x : str
Auxillary data identifier for recorded values in west.h5 to be dimension 0.
aux_y : str
Optional auxillary data identifier for recorded values in west.h5 to be dimension 1.
bins : int
Amount of bins in histogram data, default 100.
hist_range: tuple (optional)
2 int values for min and max hist range.
Returns
-------
midpoints_x : ndarray
Histogram midpoint bin values for target aux coordinate of dimension 0.
midpoints_y : ndarray
Optional histogram midpoint bin values for target aux coordinate of dimension 1.
histogram : ndarray
Raw histogram count values of each histogram bin. Can be later normalized as -lnP(x).
"""
f = h5py.File(h5, mode="r")
# each row is walker with 1 column that is a tuple of values, the first being the seg weight
seg_weights = np.array(f[f"iterations/iter_{iteration:08d}/seg_index"])
# return 1D aux data: 1D array for histogram and midpoint values
if aux_y == None:
aux = np.array(f[f"iterations/iter_{iteration:08d}/auxdata/{aux_x}"])
# make an 1-D array to fit the hist values based off of bin count
histogram = np.zeros(shape=(bins))
for seg in range(0, aux.shape[0]):
# can use dynamic hist range based off of dataset or a static value from arg
if hist_range:
counts, bins = np.histogram(aux[seg], bins=bins, range=hist_range)
else:
counts, bins = np.histogram(aux[seg], bins=bins, range=(np.amin(aux), np.amax(aux)))
# multiply counts vector by weight scalar from seg index
counts = np.multiply(counts, seg_weights[seg][0])
# add all of the weighted walkers to total array for the resulting linear combination
histogram = np.add(histogram, counts)
# get bin midpoints
midpoints_x = (bins[:-1] + bins[1:]) / 2
return midpoints_x, histogram
# 2D instant histogram and midpoint values for a single specified WE iteration
if aux_y:
aux_x = np.array(f[f"iterations/iter_{iteration:08d}/auxdata/{aux_x}"])
aux_y = np.array(f[f"iterations/iter_{iteration:08d}/auxdata/{aux_y}"])
# 2D array to store hist counts for each timepoint in both dimensions
histogram = np.zeros(shape=(bins, bins))
for seg in range(0, aux_x.shape[0]):
# can use dynamic hist range based off of dataset or a static value from arg
if hist_range:
counts, bins_x, bins_y = np.histogram2d(aux_x[seg], aux_y[seg], bins=bins, range=hist_range)
else:
counts, bins_x, bins_y = np.histogram2d(aux_x[seg], aux_y[seg], bins=bins,
range=[[np.amin(aux_x), np.amax(aux_x)],
[np.amin(aux_y), np.amax(aux_y)]]
)
# multiply counts vector by weight scalar from seg index
counts = np.multiply(counts, seg_weights[seg][0])
# add all of the weighted walkers to total array for the resulting linear combination
histogram = np.add(histogram, counts)
# get bin midpoints
midpoints_x = (bins_x[:-1] + bins_x[1:]) / 2
midpoints_y = (bins_y[:-1] + bins_y[1:]) / 2
# flip and rotate to correct orientation (alt: TODO, try np.transpose)
histogram = np.rot90(np.flip(histogram, axis=1))
return midpoints_x, midpoints_y, histogram
def get_iter_range(h5, aux, iteration, ext):
"""
Parameters
----------
h5 : str
path to west.h5 file
aux : str
target auxillary data for range calculation
iteration : int
iteration to calculate range of
ext : float
percentage extension of range, e.g. 0.05 = 5% range extension
Returns
-------
iter_range : tuple
2 item tuple of min and max bin bounds for hist range of target aux data.
"""
f = h5py.File(h5, mode="r")
aux_at_iter = np.array(f[f"iterations/iter_{iteration:08d}/auxdata/{aux}"])
return (np.amin(aux_at_iter) - (np.amin(aux_at_iter) * ext * 5), # TODO: this works for now... but need a smarter solution
np.amax(aux_at_iter) + (np.amax(aux_at_iter) * ext)
)
def pdist_to_normhist(args_list):
"""
Parameters
----------
args_list : argparse.Namespace
Contains command line arguments passed in by user.
h5 : str
path to west.h5 file
aux_x : str #TODO: default to pcoord1
target data for x axis
aux_y : str #TODO: default to pcoord1
target data for y axis
data_type : str
'evolution' (1 dataset); 'average' or 'instance' (1 or 2 datasets)
last_iter : int
Last iteration data to include, default is the last recorded iteration in the west.h5 file.
first_iter : int
Default start plot at iteration 1 data.
bins : int
amount of histogram bins in pdist data to be generated, default 100.
bin_ext : float
Increase the limits of the bins by a percentage value (0.05 = 5% = default).
p_max : int
The maximum probability limit value.
p_units : str
Can be 'kT' (default) or 'kcal'. kT = -lnP, kcal/mol = -RT(lnP), where RT = 0.5922 at 298K.
Returns
-------
x, y, norm_hist
x and y axis values, and if using aux_y or evolution (with only aux_x), also returns norm_hist.
norm_hist is a 2-D matrix of the normalized histogram values.
"""
#p_max += 10 # makes for smoother edges of contour plots # TODO, don't really need pmax here anymore
p_max = None
if args_list.last_iter:
max_iter = args_list.last_iter
elif args_list.last_iter is None:
max_iter = h5py.File(args_list.h5, mode="r").attrs["west_current_iteration"] - 1
else:
raise TypeError("last_iter must be int.")
# get range for max iter hist values: use this as static bin value for evolution plot
max_iter_hist_range_x = get_iter_range(args_list.h5, args_list.aux_x, max_iter, 0.25)
if args_list.aux_y:
max_iter_hist_range_y = get_iter_range(args_list.h5, args_list.aux_y, max_iter, 0.25)
if args_list.data_type == "instant":
if args_list.aux_y:
center_x, center_y, counts_total = aux_to_pdist(args_list.h5, max_iter, args_list.aux_x, aux_y=args_list.aux_y,
bins=args_list.bins, hist_range=(max_iter_hist_range_x,
max_iter_hist_range_y
)
)
counts_total = norm_hist(counts_total, args_list.p_units, p_max=p_max)
return center_x, center_y, counts_total
else:
center, counts_total = aux_to_pdist(args_list.h5, max_iter, args_list.aux_x, bins=args_list.bins, hist_range=max_iter_hist_range_x)
counts_total = norm_hist(counts_total, args_list.p_units, p_max=p_max)
return center, counts_total
elif args_list.data_type == "evolution" or args_list.data_type == "average":
# make array to store hist (-lnP) values for n iterations of aux_x
evolution_x = np.zeros(shape=(max_iter, args_list.bins))
positions_x = np.zeros(shape=(max_iter, args_list.bins))
if args_list.aux_y:
average_xy = np.zeros(shape=(args_list.bins, args_list.bins))
for iter in range(args_list.first_iter, max_iter + 1):
center_x, counts_total_x = aux_to_pdist(args_list.h5, iter, args_list.aux_x, bins=args_list.bins, hist_range=max_iter_hist_range_x)
evolution_x[iter - 1] = counts_total_x
positions_x[iter - 1] = center_x
# 2D avg pdist data generation
if args_list.aux_y:
center_x, center_y, counts_total_xy = aux_to_pdist(args_list.h5, iter, args_list.aux_x, aux_y=args_list.aux_y,
bins=args_list.bins, hist_range=(max_iter_hist_range_x,
max_iter_hist_range_y
)
)
average_xy = np.add(average_xy, counts_total_xy)
# 2D evolution plot of aux_x (aux_y not used if provided) per iteration
if args_list.data_type == "evolution":
evolution_x = norm_hist(evolution_x, args_list.p_units, p_max=p_max)
return positions_x, np.arange(1, max_iter + 1,1), evolution_x
if args_list.data_type == "average":
# summation of counts for all iterations : then normalize
col_avg_x = [np.sum(col[col != np.isinf]) for col in evolution_x.T]
col_avg_x = norm_hist(col_avg_x, args_list.p_units, p_max=p_max)
# 2D average plot data for aux_x and aux_y
if args_list.aux_y:
average_xy = norm_hist(average_xy, args_list.p_units, p_max=p_max)
return center_x, center_y, average_xy
# 1D average plot data for aux_x
else:
return center_x, col_avg_x
else:
raise ValueError("data_type str must be 'instant', 'average', or 'evolution'")
# TODO
# def _smooth(self):
# if self.data_smoothing_level is not None:
# self.Z_data[numpy.isnan(self.Z_data)] = numpy.nanmax(self.Z_data)
# self.Z_data = scipy.ndimage.filters.gaussian_filter(self.Z_data,
# self.data_smoothing_level)
# if self.curve_smoothing_level is not None:
# self.Z_curves[numpy.isnan(self.Z_curves)] = numpy.nanmax(self.Z_curves)
# self.Z_curves = scipy.ndimage.filters.gaussian_filter(self.Z_curves,
# self.curve_smoothing_level)
# self.Z_data[numpy.isnan(self.Z)] = numpy.nan
# self.Z_curves[numpy.isnan(self.Z)] = numpy.nan
def plot_normhist(x, y, args_list, norm_hist=None, ax=None, **plot_options):
"""
Parameters
----------
x, y : ndarray
x and y axis values, and if using aux_y or evolution (with only aux_x), also must input norm_hist.
args_list : argparse.Namespace
Contains command line arguments passed in by user.
norm_hist : ndarray
norm_hist is a 2-D matrix of the normalized histogram values.
ax : mpl axes object
args_list options
-----------------
plot_type: str
'heat' (default), or 'contour'.
data_type : str
'evolution' (1 dataset); 'average' or 'instance' (1 or 2 datasets)
p_max : int
The maximum probability limit value.
p_units : str
Can be 'kT' (default) or 'kcal'. kT = -lnP, kcal/mol = -RT(lnP), where RT = 0.5922 at 298K.
cmap : str
Colormap option, default = viridis.
**plot_options : kwargs
"""
if ax is None:
fig, ax = plt.subplots(figsize=(8,6))
else:
fig = plt.gca()
# 2D heatmaps
if norm_hist is not None and args_list.plot_type == "heat":
if args_list.p_max:
norm_hist[norm_hist > args_list.p_max] = inf
plot = ax.pcolormesh(x, y, norm_hist, cmap=args_list.cmap, shading="auto", vmin=0, vmax=args_list.p_max)
# 2D contour plots TODO: add smooting functionality
elif norm_hist is not None and args_list.plot_type == "contour":
if args_list.data_type == "evolution":
raise ValueError("For contour plot, data_type must be 'average' or 'instant'")
elif args_list.p_max is None:
warn("With 'contour' plot_type, p_max should be set. Otherwise max norm_hist is used.")
levels = np.arange(0, np.max(norm_hist[norm_hist != np.inf ]), 1)
elif args_list.p_max <= 1:
levels = np.arange(0, args_list.p_max + 0.1, 0.1)
else:
levels = np.arange(0, args_list.p_max + 1, 1)
lines = ax.contour(x, y, norm_hist, levels=levels, colors="black", linewidths=1)
plot = ax.contourf(x, y, norm_hist, levels=levels, cmap=args_list.cmap)
# 1D data
elif norm_hist is None:
if args_list.p_max:
y[y > args_list.p_max] = inf
ax.plot(x, y)
# unpack plot options dictionary # TODO: update this for argparse
for key, item in plot_options.items():
if key == "xlabel":
ax.set_xlabel(item)
if key == "ylabel":
ax.set_ylabel(item)
if key == "xlim":
ax.set_xlim(item)
if key == "ylim":
ax.set_ylim(item)
if key == "title":
ax.set_title(item)
if key == "grid":
ax.grid(item, alpha=0.5)
if key == "minima": # TODO: this is essentially bstate, also put maxima?
# reorient transposed hist matrix
norm_hist = np.rot90(np.flip(norm_hist, axis=0), k=3)
# get minima coordinates index (inverse maxima since min = 0)
maxima = np.where(1 / norm_hist == np.amax(1 / norm_hist, axis=(0, 1)))
# plot point at x and y bin midpoints that correspond to mimima
ax.plot(x[maxima[0]], y[maxima[1]], 'ko')
print(f"Minima: ({x[maxima[0]][0]}, {y[maxima[1]][0]})")
if norm_hist is not None:
cbar = fig.colorbar(plot)
# TODO: lines on colorbar?
#if lines:
# cbar.add_lines(lines)
if args_list.p_units == "kT":
cbar.set_label(r"$\Delta F(\vec{x})\,/\,kT$" + "\n" + r"$\left[-\ln\,P(x)\right]$")
elif args_list.p_units == "kcal":
cbar.set_label(r"$\it{-RT}$ ln $\it{P}$ (kcal mol$^{-1}$)")
#fig.tight_layout()
# TODO: could convert to class, with args = h5, data_type, etc as attrs
# then each class method calls self.h5 and etc
# TODO: include postprocess.py trace and plot walker functionality
# TODO: eventually add argparse, maybe add some unit tests
# argparse would be in a separate file
# all plotting options with test.h5, compare output
# 1D Evo, 1D and 2D instant and average
# optional: diff max_iter and bins args
# TODO: maybe have a yaml config file for plot options
```
|
{
"source": "jeremylibretti/scarletwitch",
"score": 2
}
|
#### File: jeremylibretti/scarletwitch/collect_dynamic.py
```python
from scarletwitch import ScarletWitch
import sys
import math
import time, os
import mediapipe as mp
import cv2 as cv
import numpy as np
import webcam
from utils import CvFpsCalc
from handtracker import HandTracker
from gestureclassifier import GestureClassifier
from collections import deque
class DynamicDataCollection(ScarletWitch):
def __init__(self, arguments, stdout=sys.stdout):
super().__init__(arguments, stdout)
self.show_info = True
self.training_mode = 2
self.label_id = -1
self.record = False
self.last_key = ""
self.actions = ["none", "click", "double click"]
self.datasets = [[] for i in range(len(self.actions))]
self.secs_for_action = 2
self.min_freq = self.secs_for_action*30
self.last_label = -1
self.time_created = int(time.time())
self.time_0 = time.time()
self.start_time = time.time()
self.draw_error = False
os.makedirs('dataset', exist_ok=True)
def key_action(self, key):
self.draw_error = False
if self.last_label < 0:
self.record = False
if key > -1:
self.last_key = chr(key)
else:
self.last_key = ""
if key == 27: # ESC
self.last_key = "ESC"
self.time_created = int(time.time())
self.terminate()
if 48 <= key <= 57: # 0 ~ 9
self.label_id = key - 48
if (self.label_id+1) > len(self.actions):
self.draw_error = True
self.label_id = -1
if not self.record:
self.last_label = self.label_id
if key == 114 and not self.record: # r (record)
self.record = True
self.start_time = time.time()
if self.last_label >= 0:
self.datasets[self.label_id].append([])
else:
self.draw_error = True
if key == 110: # n (none)
self.label_id = -1
if not self.record:
self.last_label = self.label_id
if self.record:
times_up = (time.time() - self.start_time) > self.secs_for_action
if times_up:
self.record = False
def process_landmarks(self, debug_img, landmarks, history, classifier):
# Bounding box calculation
self.brect = self.calc_bounding_rect(debug_img, landmarks)
# Joint angle calculations
landmark_data = self.landmarks_to_angles(landmarks)
history.append(landmark_data)
if self.last_label >= 0 and self.record:
# print("added to dataset")
self.datasets[self.label_id][-1].append(landmark_data)
static_gesture_id, static_gesture_label = classifier.classify_static(landmark_data)
dynamic_gesture_id, dynamic_gesture_label = classifier.classify_dynamic(history)
# Image Drawing
debug_image = self.draw_bounding_rect(debug_img, self.brect)
self.mp_drawing.draw_landmarks(debug_image, landmarks, self.mp_hands.HAND_CONNECTIONS)
debug_image = self.draw_hand_info(debug_image, self.brect, static_gesture_label)
return history, [static_gesture_id, static_gesture_label], [dynamic_gesture_id, dynamic_gesture_label]
def draw_hand_info(self, image, brect, static_gesture_text):
cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[1] - 22),
(0, 0, 0), -1)
info_text = static_gesture_text
cv.putText(image, info_text, (brect[0] + 5, brect[1] - 4),
cv.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1, cv.LINE_AA)
return image
def draw_window_info(self, image, dimensions, fps, dynamic_gesture_text):
start_time = self.start_time
# FPS
cv.putText(image, "FPS:" + str(fps), (10, 35), cv.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1, cv.LINE_AA)
# Training
training_text = ["Off", "Static", "Dynamic"]
cv.putText(image, "Training: " + training_text[self.training_mode], (10, 70),
cv.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1, cv.LINE_AA)
# Label id
id = str(self.last_label)
text_color = (255, 255, 255)
sum = 0
if self.last_label == -1: # use last_label instead of label_id
id = "None"
else:
sum = len(self.datasets[self.last_label])
if self.draw_error:
text_color = (0, 0, 255)
cv.putText(image, "Label ID: " + id, (10, 110),
cv.FONT_HERSHEY_DUPLEX, 1.0, text_color, 1, cv.LINE_AA)
# Number of gestures recorded
cv.putText(image, "Sum: " + str(sum) + "", (10, 145),
cv.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1, cv.LINE_AA)
# Key Press
cv.putText(image, self.last_key, (dimensions[0] - 35, 35),
cv.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1, cv.LINE_AA)
# Recording dynamic gesture
if self.record and self.last_label >= 0:
cv.putText(image, "RECORDING: ", (10, 180),
cv.FONT_HERSHEY_DUPLEX, 1.0, (0, 0, 255), 1, cv.LINE_AA)
time_left = self.secs_for_action - (time.time() - start_time)
cv.putText(image, str(time_left), (15, 205), cv.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255), 1, cv.LINE_AA)
return image
def terminate(self):
time = self.time_created
# Write to the dataset files
# Find length of shortest string of data
for d in range(len(self.datasets)):
data = self.datasets[d]
if len(data) > 0:
for seq in range(len(data)):
seq_data = data[seq]
if len(seq_data) < self.min_freq:
self.min_freq = len(seq_data)
self.min_freq = math.floor(self.min_freq*0.95)
# Write files
for d in range(len(self.datasets)):
data = self.datasets[d]
if len(data) > 0:
data = np.array(data)
# print(action, data.shape)
np.save(os.path.join('dataset', 'test_data_', f'raw_{self.actions[d]}_{time}'), data)
# Create sequence data
full_seq_data = []
for seq in range(len(data)):
seq_data = data[seq]
seq_data = seq_data[0: self.min_freq]
full_seq_data.append(seq_data)
full_seq_data = np.array(full_seq_data)
# print(action, full_seq_data.shape)
np.save(os.path.join('dataset', 'test_data_', f'seq_{self.actions[d]}_{time}'), full_seq_data)
print("wrote dynamic data")
print("min_freq = " + str(self.min_freq))
self.running = False
```
|
{
"source": "jeremylimconsulting/spiceai",
"score": 2
}
|
#### File: src/algorithms/factory.py
```python
from algorithms.agent_interface import SpiceAIAgent
from algorithms.dql.agent import DeepQLearning_Agent
from algorithms.vpg.agent import VanillaPolicyGradient_Agent
def get_agent(name: str, state_shape, action_size: int) -> SpiceAIAgent:
if name == "vpg":
return VanillaPolicyGradient_Agent(state_shape, action_size)
elif name == "dql":
return DeepQLearning_Agent(state_shape, action_size)
raise NotImplementedError(f"Unable to find agent with name '{name}'")
```
#### File: ai/src/exec.py
```python
def somewhat_safe_exec(code: str, locals_dict: dict) -> dict:
exec(code, {"__builtins__": {}}, locals_dict)
return locals_dict
def somewhat_safe_eval(code: str, locals_dict: dict) -> any:
return eval(code, {"__builtins__": {}}, locals_dict)
```
#### File: ai/src/validation.py
```python
def validate_rewards(action_rewards: "dict[str]") -> bool:
for action_name in action_rewards:
if (
"reward =" not in action_rewards[action_name]
and "reward=" not in action_rewards[action_name]
):
return False
return True
```
|
{
"source": "jeremylingg/einundzwanzigbot",
"score": 3
}
|
#### File: einundzwanzigbot/src/database.py
```python
import sqlite3
import os
from sqlite3.dbapi2 import Connection
import config
SQLITE_DB_PATH = "db/einundzwanzig.db"
def setup_database():
"""
Sets up the database and initialises all tables
Must be called before any requests can be sent
"""
# database path
if not os.path.exists('db'):
os.mkdir('db')
# database setup
con = sqlite3.connect(SQLITE_DB_PATH)
cur = con.cursor()
# setup tables
# ATH PRICE TABLE
cur.execute('CREATE TABLE IF NOT EXISTS price (price_usd REAL, last_message_id INTEGER)')
first_entry = cur.execute('SELECT * FROM price').fetchone()
if first_entry is None and config.FEATURE_ATH_MANUAL_LAST_ATH != 0:
cur.execute('INSERT INTO price (price_usd, last_message_id) VALUES (?, 0)',
(config.FEATURE_ATH_MANUAL_LAST_ATH,))
con.commit()
con.close()
def get_connection() -> Connection:
"""
Returns a connection to the database
"""
con = sqlite3.connect(SQLITE_DB_PATH)
return con
```
|
{
"source": "JeremyLinux/Basic-GAN",
"score": 3
}
|
#### File: JeremyLinux/Basic-GAN/basic_gan.py
```python
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.fc1 = nn.Linear(256, 384)
self.bn1 = nn.BatchNorm1d(384)
self.fc2 = nn.Linear(384, 512)
self.bn2 = nn.BatchNorm1d(512)
self.fc3 = nn.Linear(512, 784)
def forward(self, z):
out = nn.functional.leaky_relu(self.bn1(self.fc1(z)), 0.05)
out = nn.functional.leaky_relu(self.bn2(self.fc2(out)), 0.05)
out = torch.sigmoid(self.fc3(out))
return out
class Critic(nn.Module):
def __init__(self):
super(Critic, self).__init__()
self.fc1 = nn.Linear(784, 256)
self.bn1 = nn.BatchNorm1d(256)
self.fc2 = nn.Linear(256, 128)
self.bn2 = nn.BatchNorm1d(128)
self.fc3 = nn.Linear(128, 1)
def forward(self, x):
out = nn.functional.leaky_relu(self.bn1(self.fc1(x)), 0.05)
out = nn.functional.leaky_relu(self.bn2(self.fc2(out)), 0.05)
out = self.fc3(out)
return out
class GAN(object):
def __init__(self, lr, batch_size, device):
self.batch_size = batch_size
self.device = device
self.generator = Generator().to(self.device)
self.critic = Critic().to(self.device)
self.g_optimiser = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(0.5, 0.99))
self.c_optimiser = torch.optim.Adam(self.critic.parameters(), lr=lr, betas=(0.5, 0.99))
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.view(-1))
])
dataset = MNIST(".", train=True, download=True, transform=transform)
self.dataloader = DataLoader(dataset, batch_size=self.batch_size, shuffle=True)
self.loss_func = nn.BCEWithLogitsLoss()
def sample_z(self, n):
z = (torch.rand(n, 256)).to(self.device)
return z
def generate_images(self, z):
self.generator.eval()
imgs = self.generator(z)
imgs = imgs.view(imgs.size(0), 28, 28).detach().cpu().data.numpy()
return imgs
def fit_networks(self, iterations, plot_every):
for i in range(iterations):
self.critic.train()
self.generator.eval()
self.c_optimiser.zero_grad()
z = self.sample_z(self.batch_size)
for data, labels in self.dataloader:
x = data.to(self.device)
break
x_hat = self.generator.forward(z)
y_hat = self.critic.forward(torch.cat((x, x_hat), dim=0))
labels = torch.cat((torch.ones(self.batch_size, 1),
torch.zeros(self.batch_size, 1)),
dim=0).to(self.device)
critic_loss = self.loss_func(y_hat, labels)
critic_loss.backward()
self.c_optimiser.step()
self.critic.eval()
self.generator.train()
self.g_optimiser.zero_grad()
z = self.sample_z(self.batch_size)
x_hat = self.generator.forward(z)
y_hat = self.critic.forward(x_hat)
labels = torch.ones(self.batch_size, 1).to(self.device)
generator_loss = self.loss_func(y_hat, labels)
generator_loss.backward()
self.g_optimiser.step()
if (i+1) % plot_every == 0:
z = self.sample_z(4)
imgs = self.generate_images(z)
fig, ax = plt.subplots(figsize=(12, 3), nrows=1, ncols=4)
for j in range(4):
ax[j].imshow(imgs[j], cmap='gray')
ax[j].set_title(str(i+1) + ' iterations')
plt.show()
lr = 0.001
batch_size = 256
device = 'cuda' if torch.cuda.is_available() else 'cpu'
iterations = 20000
plot_every = 200
gan = GAN(lr, batch_size, device)
gan.fit_networks(iterations, plot_every)
```
|
{
"source": "JeremyLinux/PTGPBO",
"score": 2
}
|
#### File: JeremyLinux/PTGPBO/ackley_gif.py
```python
import torch
import numpy as np
import matplotlib.pyplot as plt
import kernel as krn
import gaussian_process as gaussp
import glob
import imageio
class BayesOptimiser(object):
def __init__(self, gp, acq_func, epsilon=1e-8):
self.gp = gp
self.acq_func = acq_func
self.epsilon = epsilon
def minimise(self, objective, bounds, iters, iter_loops=20,
sub_opt_iters=100, sub_opt_lr=0.01, n_inits=3):
x_hist = None
y_hist = None
y_min = float('inf')
x_min = None
beta1 = 0.9
beta2 = 0.99
x_lin = torch.linspace(-8, 8, 501)
exp1 = -0.2*(0.5*x_lin.pow(2)).pow(0.5)
exp2 = 0.5*torch.cos(2*np.pi*x_lin)
y_lin = np.e + 20 - 20*torch.exp(exp1) - torch.exp(exp2)
x_lin = x_lin.view(501, 1)
y_lin = y_lin.view(501, 1)
for x_sample in bounds[:,0].view(1,-1) + torch.rand((n_inits,1)) * \
(bounds[:,1] - bounds[:,0]).view(1,-1):
y_sample = torch.from_numpy(objective(x_sample.data.numpy())).float()
if y_sample < y_min:
y_min = y_sample
x_min = x_sample
if x_hist is not None:
x_hist = torch.cat((x_hist, x_sample.view(1,-1)), dim=0)
y_hist = torch.cat((y_hist, y_sample.view(1,-1)), dim=0)
else:
x_hist = x_sample.view(1,-1)
y_hist = y_sample.view(1,-1)
for i in range(iters):
self.gp.kernel.train()
self.gp.kernel.reset_params()
self.gp.fit(x_hist, y_hist)
self.gp.kernel.eval()
with torch.no_grad():
mu, sigma = self.gp.predict(x_lin, return_var=True)
mu = mu.flatten().data.numpy()
std = sigma.pow(0.5).flatten().data.numpy()
plt.figure(figsize=(6,4.5))
plt.plot(x_lin.flatten().data.numpy(), y_lin.flatten().data.numpy(),
color='k', alpha=0.5, linestyle='--')
plt.plot(x_lin.flatten().data.numpy(), mu, c='r', alpha=0.75)
plt.fill_between(x_lin.flatten().data.numpy(),
mu + std,
mu - std,
color='r', alpha=0.25)
plt.scatter(x_hist.flatten().data.numpy(),
y_hist.flatten().data.numpy(), c='k')
plt.title('Iteration: %d Minimum: %4.2f At: %4.2f' % \
(i+1, y_min, x_min))
plt.xlim([bounds[0,0],bounds[0,1]])
plt.ylim([-0.5, y_lin.max().item() + 0.5])
plt.savefig('ackley_frame_{:04d}.png'.format(i+1))
plt.show()
y_acq = float('inf')
for j in range(iter_loops):
x0 = bounds[:,0] + torch.rand(bounds.size(0)) * (bounds[:,1] - bounds[:,0])
V_dx = torch.zeros(x0.size())
S_dx = torch.zeros(x0.size())
for k in range(sub_opt_iters):
dx = self.acq_func.grad(x0, y_min, self.gp).flatten()
V_dx = beta1 * V_dx + (1 - beta1) * dx
V_dx_corr = V_dx / (1 - beta1**(k+1))
S_dx = beta2 * S_dx + (1 - beta2) * dx.pow(2)
S_dx_corr = S_dx / (1 - beta2**(k+1))
grad = V_dx_corr / (torch.sqrt(S_dx_corr + 1e-8))
x0 = x0 - sub_opt_lr * grad
x_bounded = torch.min(torch.max(x0.data, bounds[:,0]), bounds[:,1])
x0.data = x_bounded
acq = self.acq_func(x0, y_min, self.gp)
if acq < y_acq:
y_acq = acq
x_sample = x0
distances = (x_sample.view(1,-1) - x_hist).pow(2).sum(1).pow(0.5)
if torch.any(distances <= self.epsilon):
x_sample = bounds[:,0] + torch.rand(bounds.size(0)) * (bounds[:,1] - bounds[:,0])
y_sample = torch.from_numpy(objective(x_sample.data.numpy())).float()
x_hist = torch.cat((x_hist, x_sample.view(1,-1)), dim=0)
y_hist = torch.cat((y_hist, y_sample.view(1,-1)), dim=0)
if y_sample < y_min:
y_min = y_sample
x_min = x_sample
return x_min, y_min, x_hist, y_hist
class Acquisition(object):
def __init__(self, beta):
self.beta = beta
def __call__(self, x, y_min, gp):
if len(x.size()) < 2:
x = x.view(1,-1)
mu, sigma = gp.predict(x, return_var=True)
out = mu - self.beta*sigma
return out
def grad(self, x, y_min, gp):
if len(x.size()) < 2:
x = x.view(1,-1)
mu_grad = gp.mu_grad(x)
sigma_grad = gp.sigma_grad(x)
grad = mu_grad - self.beta*sigma_grad
return grad
def ackley(x):
x = torch.from_numpy(x)
x = x.view(-1,1)
exp1 = -0.2*(0.5*x.pow(2).sum(1)).pow(0.5)
exp2 = 0.5*torch.cos(2*np.pi*x).sum(1)
y = np.e + 20 - 20*torch.exp(exp1) - torch.exp(exp2)
return y.data.numpy()
bounds = torch.tensor([[-8.0, 8.0]]).expand(1,2)
iters = 20
kernel = krn.Matern32Kernel(1.0, 1.0, 'cpu') * \
krn.Matern52Kernel(1.0, 1.0, 'cpu')
gp = gaussp.GaussianProcess(kernel, alpha=0.0001)
acq_func = Acquisition(1.0)
optimiser = BayesOptimiser(gp, acq_func)
x_min, y_min, xp, yp = optimiser.minimise(ackley, bounds, iters)
filenames = glob.glob('ackley_frame*.png')
filenames = sorted(filenames)
gif_images = []
for filename in filenames:
gif_images.append(imageio.imread(filename))
imageio.mimsave('ackley.gif', gif_images, duration=1/2)
```
#### File: JeremyLinux/PTGPBO/bayes_opt.py
```python
import torch
import sys
class BayesOptimiser(object):
def __init__(self, gp, acq_func, epsilon=1e-8):
self.gp = gp
self.acq_func = acq_func
self.epsilon = epsilon
def minimise(self, objective, bounds, iters, iter_loops=20,
sub_opt_iters=100, sub_opt_lr=0.05, n_inits=5, verbose=True):
x_hist = None
y_hist = None
y_min = float('inf')
x_min = None
beta1 = 0.9
beta2 = 0.99
for x_sample in bounds[:,0].view(1,-1) + torch.rand((n_inits,1)) * \
(bounds[:,1] - bounds[:,0]).view(1,-1):
y_sample = objective(x_sample)
if y_sample < y_min:
y_min = y_sample
x_min = x_sample
if x_hist is not None:
x_hist = torch.cat((x_hist, x_sample.view(1,-1)), dim=0)
y_hist = torch.cat((y_hist, y_sample.view(1,-1)), dim=0)
else:
x_hist = x_sample.view(1,-1)
y_hist = y_sample.view(1,-1)
for i in range(iters):
self.gp.kernel.train()
self.gp.kernel.reset_params()
self.gp.fit(x_hist, y_hist)
self.gp.kernel.eval()
y_acq = float('inf')
with torch.no_grad():
for j in range(iter_loops):
x0 = bounds[:,0] + torch.rand(bounds.size(0)) * (bounds[:,1] - bounds[:,0])
V_dx = torch.zeros(x0.size())
S_dx = torch.zeros(x0.size())
for k in range(sub_opt_iters):
dx = self.acq_func.grad(x0, y_min, self.gp).flatten()
V_dx = beta1 * V_dx + (1 - beta1) * dx
V_dx_corr = V_dx / (1 - beta1**(k+1))
S_dx = beta2 * S_dx + (1 - beta2) * dx.pow(2)
S_dx_corr = S_dx / (1 - beta2**(k+1))
grad = V_dx_corr / (torch.sqrt(S_dx_corr + 1e-8))
x0 = x0 - sub_opt_lr * grad
x_bounded = torch.min(torch.max(x0.data, bounds[:,0]), bounds[:,1])
x0.data = x_bounded
acq = self.acq_func(x0, y_min, self.gp)
if acq < y_acq:
y_acq = acq
x_sample = x0
distances = (x_sample.view(1,-1) - x_hist).pow(2).sum(1).pow(0.5)
if torch.any(distances <= self.epsilon):
x_sample = bounds[:,0] + torch.rand(bounds.size(0)) * (bounds[:,1] - bounds[:,0])
y_sample = objective(x_sample)
x_hist = torch.cat((x_hist, x_sample.view(1,-1)), dim=0)
y_hist = torch.cat((y_hist, y_sample.view(1,-1)), dim=0)
if y_sample < y_min:
y_min = y_sample
x_min = x_sample
if verbose:
sys.stdout.write('\rIteration: %d Minimum: %4.2f ' % \
(i+1, y_min))
sys.stdout.flush()
return x_min, y_min, x_hist, y_hist
class DynamicBayesOptimiser(object):
def __init__(self, gp, acq_func, epsilon=1e-8):
self.gp = gp
self.acq_func = acq_func
self.epsilon = epsilon
def minimise(self, objective, bounds, iters, iter_loops=20,
sub_opt_iters=100, sub_opt_lr=0.01, n_inits=5, verbose=True):
x_hist = None
t_hist = None
y_hist = None
y_min = float('inf')
x_min = None
beta1 = 0.9
beta2 = 0.99
for x_sample in bounds[:,0].view(1,-1) + torch.rand((n_inits,1)) * \
(bounds[:,1] - bounds[:,0]).view(1,-1):
y_sample = objective(x_sample)
if y_sample < y_min:
y_min = y_sample
x_min = x_sample
if x_hist is not None:
x_hist = torch.cat((x_hist, x_sample.view(1,-1)), dim=0)
t_hist = torch.cat((t_hist, t_hist[-1] + torch.ones((1,1))), dim=0)
y_hist = torch.cat((y_hist, y_sample.view(1,-1)), dim=0)
else:
x_hist = x_sample.view(1,-1)
t_hist = torch.ones((1,1))
y_hist = y_sample.view(1,-1)
for i in range(iters):
self.gp.space_kernel.train()
self.gp.time_kernel.train()
self.gp.space_kernel.reset_params()
self.gp.time_kernel.reset_params()
self.gp.fit(x_hist, t_hist, y_hist)
self.gp.space_kernel.eval()
self.gp.time_kernel.eval()
y_acq = float('inf')
with torch.no_grad():
for j in range(iter_loops):
x0 = bounds[:,0] + torch.rand(bounds.size(0)) * (bounds[:,1] - bounds[:,0])
V_dx = torch.zeros(x0.size())
S_dx = torch.zeros(x0.size())
for k in range(sub_opt_iters):
dx = self.acq_func.grad(x0, t_hist[-1] + torch.ones((1,1)), y_min, self.gp).flatten()
V_dx = beta1 * V_dx + (1 - beta1) * dx
V_dx_corr = V_dx / (1 - beta1**(k+1))
S_dx = beta2 * S_dx + (1 - beta2) * dx.pow(2)
S_dx_corr = S_dx / (1 - beta2**(k+1))
grad = V_dx_corr / (torch.sqrt(S_dx_corr + 1e-8))
x0 = x0 - sub_opt_lr * grad
x_bounded = torch.min(torch.max(x0.data, bounds[:,0]), bounds[:,1])
x0.data = x_bounded
acq = self.acq_func(x0, t_hist[-1] + torch.ones((1,1)), y_min, self.gp)
if acq < y_acq:
y_acq = acq
x_sample = x0
distances = (x_sample.view(1,-1) - x_hist).pow(2).sum(1).pow(0.5)
if torch.any(distances <= self.epsilon):
x_sample = bounds[:,0] + torch.rand(bounds.size(0)) * (bounds[:,1] - bounds[:,0])
y_sample = objective(x_sample)
x_hist = torch.cat((x_hist, x_sample.view(1,-1)), dim=0)
t_hist = torch.cat((t_hist, t_hist[-1] + torch.ones((1,1))), dim=0)
y_hist = torch.cat((y_hist, y_sample.view(1,-1)), dim=0)
if y_sample < y_min:
y_min = y_sample
x_min = x_sample
if verbose:
sys.stdout.write('\rIteration: %d Minimum: %4.2f ' % \
(i+1, y_min))
sys.stdout.flush()
return x_min, y_min, x_hist, y_hist
class Acquisition(object):
def __init__(self, beta):
self.beta = beta
def __call__(self, x, y_min, gp):
if len(x.size()) < 2:
x = x.view(1,-1)
mu, sigma = gp.predict(x, return_var=True)
out = mu - self.beta*sigma
return out
def grad(self, x, y_min, gp):
if len(x.size()) < 2:
x = x.view(1,-1)
mu_grad = gp.mu_grad(x)
sigma_grad = gp.sigma_grad(x)
grad = mu_grad - self.beta*sigma_grad
return grad
class DynamicAcquisition(object):
def __init__(self, beta):
self.beta = beta
def __call__(self, x, t, y_min, gp):
if len(x.size()) < 2:
x = x.view(1,-1)
t = t.view(1,-1)
mu, sigma = gp.predict(x, t, return_var=True)
out = mu - self.beta*sigma
return out
def grad(self, x, t, y_min, gp):
if len(x.size()) < 2:
x = x.view(1,-1)
t = t.view(1,-1)
mu_grad = gp.mu_grad(x, t)
sigma_grad = gp.sigma_grad(x, t)
grad = mu_grad - self.beta*sigma_grad
return grad
```
|
{
"source": "jeremyll/python-gearman",
"score": 2
}
|
#### File: python-gearman/tests/client_tests.py
```python
import collections
import random
import unittest
from gearman.client import GearmanClient
from gearman.client_handler import GearmanClientCommandHandler
from gearman.constants import PRIORITY_NONE, PRIORITY_HIGH, PRIORITY_LOW, JOB_UNKNOWN, JOB_PENDING, JOB_CREATED, JOB_FAILED, JOB_COMPLETE
from gearman.errors import ExceededConnectionAttempts, ServerUnavailable, InvalidClientState
from gearman.protocol import submit_cmd_for_background_priority, GEARMAN_COMMAND_STATUS_RES, GEARMAN_COMMAND_GET_STATUS, GEARMAN_COMMAND_JOB_CREATED, \
GEARMAN_COMMAND_WORK_STATUS, GEARMAN_COMMAND_WORK_FAIL, GEARMAN_COMMAND_WORK_COMPLETE, GEARMAN_COMMAND_WORK_DATA, GEARMAN_COMMAND_WORK_WARNING
from tests._core_testing import _GearmanAbstractTest, MockGearmanConnectionManager, MockGearmanConnection
class MockGearmanClient(GearmanClient, MockGearmanConnectionManager):
pass
class ClientTest(_GearmanAbstractTest):
"""Test the public client interface"""
connection_manager_class = MockGearmanClient
command_handler_class = GearmanClientCommandHandler
def setUp(self):
super(ClientTest, self).setUp()
self.original_handle_connection_activity = self.connection_manager.handle_connection_activity
def tearDown(self):
super(ClientTest, self).tearDown()
self.connection_manager.handle_connection_activity = self.original_handle_connection_activity
def generate_job_request(self, submitted=True, accepted=True):
current_request = super(ClientTest, self).generate_job_request()
if submitted or accepted:
self.connection_manager.establish_request_connection(current_request)
self.command_handler.send_job_request(current_request)
if submitted and accepted:
self.command_handler.recv_command(GEARMAN_COMMAND_JOB_CREATED, job_handle=current_request.job.handle)
self.assert_(current_request.job.handle in self.command_handler.handle_to_request_map)
return current_request
def test_establish_request_connection_complex(self):
# Spin up a bunch of imaginary gearman connections
failed_connection = MockGearmanConnection()
failed_connection._fail_on_bind = True
failed_then_retried_connection = MockGearmanConnection()
failed_then_retried_connection._fail_on_bind = True
good_connection = MockGearmanConnection()
good_connection.connect()
# Register all our connections
self.connection_manager.connection_list = [failed_connection, failed_then_retried_connection, good_connection]
# When we first create our request, our client shouldn't know anything about it
current_request = self.generate_job_request(submitted=False, accepted=False)
self.failIf(current_request in self.connection_manager.request_to_rotating_connection_queue)
# Make sure that when we start up, we get our good connection
chosen_connection = self.connection_manager.establish_request_connection(current_request)
self.assertEqual(chosen_connection, good_connection)
self.assertFalse(failed_connection.connected)
self.assertFalse(failed_then_retried_connection.connected)
self.assertTrue(good_connection.connected)
# No state changed so we should still go to the correct connection
chosen_connection = self.connection_manager.establish_request_connection(current_request)
self.assertEqual(chosen_connection, good_connection)
# Pretend like our good connection died so we'll need to choose somethign else
good_connection._reset_connection()
good_connection._fail_on_bind = True
failed_then_retried_connection._fail_on_bind = False
failed_then_retried_connection.connect()
# Make sure we rotate good_connection and failed_connection out
chosen_connection = self.connection_manager.establish_request_connection(current_request)
self.assertEqual(chosen_connection, failed_then_retried_connection)
self.assertFalse(failed_connection.connected)
self.assertTrue(failed_then_retried_connection.connected)
self.assertFalse(good_connection.connected)
def test_establish_request_connection_dead(self):
self.connection_manager.connection_list = []
self.connection_manager.command_handlers = {}
current_request = self.generate_job_request(submitted=False, accepted=False)
# No connections == death
self.assertRaises(ServerUnavailable, self.connection_manager.establish_request_connection, current_request)
# Spin up a bunch of imaginary gearman connections
failed_connection = MockGearmanConnection()
failed_connection._fail_on_bind = True
self.connection_manager.connection_list.append(failed_connection)
# All failed connections == death
self.assertRaises(ServerUnavailable, self.connection_manager.establish_request_connection, current_request)
def test_auto_retry_behavior(self):
current_request = self.generate_job_request(submitted=False, accepted=False)
def fail_then_create_jobs(rx_conns, wr_conns, ex_conns):
if self.connection_manager.current_failures < self.connection_manager.expected_failures:
self.connection_manager.current_failures += 1
# We're going to down this connection and reset state
self.assertTrue(self.connection.connected)
self.connection_manager.handle_error(self.connection)
self.assertFalse(self.connection.connected)
# We're then going to IMMEDIATELY pull this connection back up
# So we don't bail out of the "self.connection_manager.poll_connections_until_stopped" loop
self.connection_manager.establish_connection(self.connection)
else:
self.assertEquals(current_request.state, JOB_PENDING)
self.command_handler.recv_command(GEARMAN_COMMAND_JOB_CREATED, job_handle=current_request.job.handle)
return rx_conns, wr_conns, ex_conns
self.connection_manager.handle_connection_activity = fail_then_create_jobs
self.connection_manager.expected_failures = 5
# Now that we've setup our rety behavior, we need to reset the entire state of our experiment
# First pass should succeed as we JUST touch our max attempts
self.connection_manager.current_failures = current_request.connection_attempts = 0
current_request.max_connection_attempts = self.connection_manager.expected_failures + 1
current_request.state = JOB_UNKNOWN
accepted_jobs = self.connection_manager.wait_until_jobs_accepted([current_request])
self.assertEquals(current_request.state, JOB_CREATED)
self.assertEquals(current_request.connection_attempts, current_request.max_connection_attempts)
# Second pass should fail as we JUST exceed our max attempts
self.connection_manager.current_failures = current_request.connection_attempts = 0
current_request.max_connection_attempts = self.connection_manager.expected_failures
current_request.state = JOB_UNKNOWN
self.assertRaises(ExceededConnectionAttempts, self.connection_manager.wait_until_jobs_accepted, [current_request])
self.assertEquals(current_request.state, JOB_UNKNOWN)
self.assertEquals(current_request.connection_attempts, current_request.max_connection_attempts)
def test_multiple_fg_job_submission(self):
submitted_job_count = 5
expected_job_list = [self.generate_job() for _ in xrange(submitted_job_count)]
def mark_jobs_created(rx_conns, wr_conns, ex_conns):
for current_job in expected_job_list:
self.command_handler.recv_command(GEARMAN_COMMAND_JOB_CREATED, job_handle=current_job.handle)
return rx_conns, wr_conns, ex_conns
self.connection_manager.handle_connection_activity = mark_jobs_created
job_dictionaries = [current_job.to_dict() for current_job in expected_job_list]
# Test multiple job submission
job_requests = self.connection_manager.submit_multiple_jobs(job_dictionaries, wait_until_complete=False)
for current_request, expected_job in zip(job_requests, expected_job_list):
current_job = current_request.job
self.assert_jobs_equal(current_job, expected_job)
self.assertEqual(current_request.priority, PRIORITY_NONE)
self.assertEqual(current_request.background, False)
self.assertEqual(current_request.state, JOB_CREATED)
self.assertFalse(current_request.complete)
def test_single_bg_job_submission(self):
expected_job = self.generate_job()
def mark_job_created(rx_conns, wr_conns, ex_conns):
self.command_handler.recv_command(GEARMAN_COMMAND_JOB_CREATED, job_handle=expected_job.handle)
return rx_conns, wr_conns, ex_conns
self.connection_manager.handle_connection_activity = mark_job_created
job_request = self.connection_manager.submit_job(expected_job.task, expected_job.data, unique=expected_job.unique, background=True, priority=PRIORITY_LOW, wait_until_complete=False)
current_job = job_request.job
self.assert_jobs_equal(current_job, expected_job)
self.assertEqual(job_request.priority, PRIORITY_LOW)
self.assertEqual(job_request.background, True)
self.assertEqual(job_request.state, JOB_CREATED)
self.assertTrue(job_request.complete)
def test_single_fg_job_submission_timeout(self):
expected_job = self.generate_job()
def job_failed_submission(rx_conns, wr_conns, ex_conns):
return rx_conns, wr_conns, ex_conns
self.connection_manager.handle_connection_activity = job_failed_submission
job_request = self.connection_manager.submit_job(expected_job.task, expected_job.data, unique=expected_job.unique, priority=PRIORITY_HIGH, poll_timeout=0.01)
self.assertEqual(job_request.priority, PRIORITY_HIGH)
self.assertEqual(job_request.background, False)
self.assertEqual(job_request.state, JOB_PENDING)
self.assertFalse(job_request.complete)
self.assertTrue(job_request.timed_out)
def test_wait_for_multiple_jobs_to_complete_or_timeout(self):
completed_request = self.generate_job_request()
failed_request = self.generate_job_request()
timeout_request = self.generate_job_request()
self.update_requests = True
def multiple_job_updates(rx_conns, wr_conns, ex_conns):
# Only give a single status update and have the 3rd job handle timeout
if self.update_requests:
self.command_handler.recv_command(GEARMAN_COMMAND_WORK_COMPLETE, job_handle=completed_request.job.handle, data='12345')
self.command_handler.recv_command(GEARMAN_COMMAND_WORK_FAIL, job_handle=failed_request.job.handle)
self.update_requests = False
return rx_conns, wr_conns, ex_conns
self.connection_manager.handle_connection_activity = multiple_job_updates
finished_requests = self.connection_manager.wait_until_jobs_completed([completed_request, failed_request, timeout_request], poll_timeout=0.01)
del self.update_requests
finished_completed_request, finished_failed_request, finished_timeout_request = finished_requests
self.assert_jobs_equal(finished_completed_request.job, completed_request.job)
self.assertEqual(finished_completed_request.state, JOB_COMPLETE)
self.assertEqual(finished_completed_request.result, '12345')
self.assertFalse(finished_completed_request.timed_out)
#self.assert_(finished_completed_request.job.handle not in self.command_handler.handle_to_request_map)
self.assert_jobs_equal(finished_failed_request.job, failed_request.job)
self.assertEqual(finished_failed_request.state, JOB_FAILED)
self.assertEqual(finished_failed_request.result, None)
self.assertFalse(finished_failed_request.timed_out)
#self.assert_(finished_failed_request.job.handle not in self.command_handler.handle_to_request_map)
self.assertEqual(finished_timeout_request.state, JOB_CREATED)
self.assertEqual(finished_timeout_request.result, None)
self.assertTrue(finished_timeout_request.timed_out)
self.assert_(finished_timeout_request.job.handle in self.command_handler.handle_to_request_map)
def test_get_job_status(self):
single_request = self.generate_job_request()
def retrieve_status(rx_conns, wr_conns, ex_conns):
self.command_handler.recv_command(GEARMAN_COMMAND_STATUS_RES, job_handle=single_request.job.handle, known='1', running='0', numerator='0', denominator='1')
return rx_conns, wr_conns, ex_conns
self.connection_manager.handle_connection_activity = retrieve_status
job_request = self.connection_manager.get_job_status(single_request)
request_status = job_request.status
self.failUnless(request_status)
self.assertTrue(request_status['known'])
self.assertFalse(request_status['running'])
self.assertEqual(request_status['numerator'], 0)
self.assertEqual(request_status['denominator'], 1)
self.assertFalse(job_request.timed_out)
def test_get_job_status_unknown(self):
single_request = self.generate_job_request()
current_handle = single_request.job.handle
self.command_handler.recv_command(GEARMAN_COMMAND_WORK_FAIL, job_handle=current_handle)
def retrieve_status(rx_conns, wr_conns, ex_conns):
self.command_handler.recv_command(GEARMAN_COMMAND_STATUS_RES, job_handle=current_handle, known='0', running='0', numerator='0', denominator='1')
return rx_conns, wr_conns, ex_conns
self.connection_manager.handle_connection_activity = retrieve_status
job_request = self.connection_manager.get_job_status(single_request)
request_status = job_request.status
self.failUnless(request_status)
self.assertFalse(request_status['known'])
self.assertFalse(request_status['running'])
self.assertEqual(request_status['numerator'], 0)
self.assertEqual(request_status['denominator'], 1)
self.assertFalse(job_request.timed_out)
#self.assert_(current_handle not in self.command_handler.handle_to_request_map)
def test_get_job_status_timeout(self):
single_request = self.generate_job_request()
def retrieve_status_timeout(rx_conns, wr_conns, ex_conns):
return rx_conns, wr_conns, ex_conns
self.connection_manager.handle_connection_activity = retrieve_status_timeout
job_request = self.connection_manager.get_job_status(single_request, poll_timeout=0.01)
self.assertTrue(job_request.timed_out)
class ClientCommandHandlerInterfaceTest(_GearmanAbstractTest):
"""Test the public interface a GearmanClient may need to call in order to update state on a GearmanClientCommandHandler"""
connection_manager_class = MockGearmanClient
command_handler_class = GearmanClientCommandHandler
def test_send_job_request(self):
current_request = self.generate_job_request()
gearman_job = current_request.job
for priority in (PRIORITY_NONE, PRIORITY_HIGH, PRIORITY_LOW):
for background in (False, True):
current_request.reset()
current_request.priority = priority
current_request.background = background
self.command_handler.send_job_request(current_request)
queued_request = self.command_handler.requests_awaiting_handles.popleft()
self.assertEqual(queued_request, current_request)
expected_cmd_type = submit_cmd_for_background_priority(background, priority)
self.assert_sent_command(expected_cmd_type, task=gearman_job.task, data=gearman_job.data, unique=gearman_job.unique)
def test_get_status_of_job(self):
current_request = self.generate_job_request()
self.command_handler.send_get_status_of_job(current_request)
self.assert_sent_command(GEARMAN_COMMAND_GET_STATUS, job_handle=current_request.job.handle)
class ClientCommandHandlerStateMachineTest(_GearmanAbstractTest):
"""Test single state transitions within a GearmanWorkerCommandHandler"""
connection_manager_class = MockGearmanClient
command_handler_class = GearmanClientCommandHandler
def generate_job_request(self, submitted=True, accepted=True):
current_request = super(ClientCommandHandlerStateMachineTest, self).generate_job_request()
if submitted or accepted:
self.command_handler.requests_awaiting_handles.append(current_request)
current_request.state = JOB_PENDING
if submitted and accepted:
self.command_handler.recv_command(GEARMAN_COMMAND_JOB_CREATED, job_handle=current_request.job.handle)
return current_request
def test_received_job_created(self):
current_request = self.generate_job_request(accepted=False)
new_handle = str(random.random())
self.command_handler.recv_command(GEARMAN_COMMAND_JOB_CREATED, job_handle=new_handle)
self.assertEqual(current_request.job.handle, new_handle)
self.assertEqual(current_request.state, JOB_CREATED)
self.assertEqual(self.command_handler.handle_to_request_map[new_handle], current_request)
def test_received_job_created_out_of_order(self):
self.assertEqual(self.command_handler.requests_awaiting_handles, collections.deque())
# Make sure we bail cuz we have an empty queue
self.assertRaises(InvalidClientState, self.command_handler.recv_command, GEARMAN_COMMAND_JOB_CREATED, job_handle=None)
def test_required_state_pending(self):
current_request = self.generate_job_request(submitted=False, accepted=False)
new_handle = str(random.random())
invalid_states = [JOB_UNKNOWN, JOB_CREATED, JOB_COMPLETE, JOB_FAILED]
for bad_state in invalid_states:
current_request.state = bad_state
# We only want to check the state of request... not die if we don't have any pending requests
self.command_handler.requests_awaiting_handles.append(current_request)
self.assertRaises(InvalidClientState, self.command_handler.recv_command, GEARMAN_COMMAND_JOB_CREATED, job_handle=new_handle)
def test_required_state_queued(self):
current_request = self.generate_job_request()
job_handle = current_request.job.handle
new_data = str(random.random())
invalid_states = [JOB_UNKNOWN, JOB_PENDING, JOB_COMPLETE, JOB_FAILED]
for bad_state in invalid_states:
current_request.state = bad_state
# All these commands expect to be in JOB_CREATED
self.assertRaises(InvalidClientState, self.command_handler.recv_command, GEARMAN_COMMAND_WORK_DATA, job_handle=job_handle, data=new_data)
self.assertRaises(InvalidClientState, self.command_handler.recv_command, GEARMAN_COMMAND_WORK_WARNING, job_handle=job_handle, data=new_data)
self.assertRaises(InvalidClientState, self.command_handler.recv_command, GEARMAN_COMMAND_WORK_STATUS, job_handle=job_handle, numerator=0, denominator=1)
self.assertRaises(InvalidClientState, self.command_handler.recv_command, GEARMAN_COMMAND_WORK_COMPLETE, job_handle=job_handle, data=new_data)
self.assertRaises(InvalidClientState, self.command_handler.recv_command, GEARMAN_COMMAND_WORK_FAIL, job_handle=job_handle)
def test_in_flight_work_updates(self):
current_request = self.generate_job_request()
job_handle = current_request.job.handle
new_data = str(random.random())
# Test WORK_DATA
self.command_handler.recv_command(GEARMAN_COMMAND_WORK_DATA, job_handle=job_handle, data=new_data)
self.assertEqual(current_request.data_updates.popleft(), new_data)
self.assertEqual(current_request.state, JOB_CREATED)
# Test WORK_WARNING
self.command_handler.recv_command(GEARMAN_COMMAND_WORK_WARNING, job_handle=job_handle, data=new_data)
self.assertEqual(current_request.warning_updates.popleft(), new_data)
self.assertEqual(current_request.state, JOB_CREATED)
# Test WORK_STATUS
self.command_handler.recv_command(GEARMAN_COMMAND_WORK_STATUS, job_handle=job_handle, numerator=0, denominator=1)
self.assertEqual(current_request.status_updates.popleft(), (0, 1))
self.assertEqual(current_request.state, JOB_CREATED)
def test_work_complete(self):
current_request = self.generate_job_request()
job_handle = current_request.job.handle
new_data = str(random.random())
self.command_handler.recv_command(GEARMAN_COMMAND_WORK_COMPLETE, job_handle=job_handle, data=new_data)
self.assertEqual(current_request.result, new_data)
self.assertEqual(current_request.state, JOB_COMPLETE)
def test_work_fail(self):
current_request = self.generate_job_request()
job_handle = current_request.job.handle
new_data = str(random.random())
self.command_handler.recv_command(GEARMAN_COMMAND_WORK_FAIL, job_handle=job_handle)
self.assertEqual(current_request.state, JOB_FAILED)
def test_status_request(self):
current_request = self.generate_job_request()
job_handle = current_request.job.handle
self.assertEqual(current_request.status, {})
self.command_handler.recv_command(GEARMAN_COMMAND_STATUS_RES, job_handle=job_handle, known='1', running='1', numerator='0', denominator='1')
self.assertEqual(current_request.status['handle'], job_handle)
self.assertTrue(current_request.status['known'])
self.assertTrue(current_request.status['running'])
self.assertEqual(current_request.status['numerator'], 0)
self.assertEqual(current_request.status['denominator'], 1)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeremylow/dri",
"score": 2
}
|
#### File: example/simple_uploader/models.py
```python
from django.db import models
from django.core.urlresolvers import reverse
from django_dri.fields import ResponsiveImageField
class ImageModel(models.Model):
""" Class to provide storage of an image """
image = ResponsiveImageField(upload_to='images')
def get_absolute_url(self):
return reverse('viewer', kwargs={'pk': self.id})
```
|
{
"source": "jeremylowery/stypes",
"score": 3
}
|
#### File: stypes/stypes/numeric.py
```python
import decimal
import re
from cStringIO import StringIO
from .spec import Spec
from .util import UnconvertedValue, InvalidSpecError
class Integer(Spec):
def __init__(self, width, pad='0'):
self.width = width
self.pad = pad
def from_bytes(self, text):
clean = text.strip()
if not clean:
return None
try:
return int(text.strip())
except ValueError:
return UnconvertedValue(text, 'expecting all digits for integer')
def to_bytes(self, value):
if value is None:
text = ''
else:
text = str(value)
if len(text) > self.width:
msg = 'Cannot fit into text of width %d' % self.width
return UnconvertedValue(text, msg)
elif len(text) < self.width:
return text.rjust(self.width, self.pad)
else:
return text
class NumericFormatError(InvalidSpecError):
pass
class Numeric(Spec):
""" 9(6)V99
999V99
999.99
999,999.99
"""
width = property(lambda s: s._width)
def __init__(self, fspec):
self._fspec = fspec
self._converters = []
self._build_convert_procs()
self._compute_precision()
self._width = sum(c.width for c in self._converters)
def from_bytes(self, text):
if not text.strip():
return None
if len(text) != self._width:
text = text.rjust(self._width)
text_input = StringIO(text)
decimal_input = ConvertState()
for converter in self._converters:
err = converter.write_decimal_input(text_input, decimal_input)
if err:
return UnconvertedValue(text, err)
try:
v = decimal.Decimal(decimal_input.getvalue())
return v if decimal_input.positive else -v
except decimal.InvalidOperation, e:
return UnconvertedValue(text, err)
def to_bytes(self, value):
if value is None:
return ' '*self._width
text = self._precision_fmt % abs(value)
buf = ConvertState(text[::-1])
if value < 0:
buf.positive = False
out = StringIO()
#print self._fspec, value, buf.getvalue(), self._precision_fmt, list(reversed(self._converters))
for converter in reversed(self._converters):
err = converter.write_output_text(buf, out)
if err:
return UnconvertedValue(text, err)
return out.getvalue()[::-1]
def _compute_precision(self):
""" The precision of the numeric value. We have to have this so when
we write the data out to text it will pad out correctly """
prec = 0
adding = False
for c in self._converters:
# find a decimal point
if isinstance(c, (VConverter, DECIMALConverter)):
adding = True
elif isinstance(c, (VConverter, SIGNConverter)):
pass
# add all the numbers past it
elif adding:
prec += c.width
self._precision_fmt = "%." + str(prec) + "f"
def _build_convert_procs(self):
nine_count = 0
paren_digits = ''
symbols = list(self._fspec)
state = 'START'
has_sign = False
DIGIT = re.compile("\d")
for position, symbol in enumerate(self._fspec):
position += 1
if state == 'START':
if symbol == '9':
state = 'NINE'
nine_count = 1
elif symbol == '.':
self._converters.append(DECIMALConverter())
elif symbol == ',':
self._converters.append(COMMAConverter())
elif symbol == 'V':
self._converters.append(VConverter())
elif symbol == 'S':
if has_sign:
raise NumericFormatError("Unexpected sign at "
"position %s. Only one S allowed." % position)
has_sign = True
self._converters.append(SIGNConverter())
elif symbol == ' ':
self._converters.append(SPACEConverter())
else:
raise NumericFormatError("Unexpected character %r at "
"position %s" % (symbol, position))
elif state == 'NINE':
if symbol == '9':
nine_count += 1
elif symbol == '.':
self._converters.append(NINEConverter(nine_count))
self._converters.append(DECIMALConverter())
nine_count = 0
state = 'START'
elif symbol == ',':
self._converters.append(NINEConverter(nine_count))
self._converters.append(COMMAConverter())
nine_count = 0
state = 'START'
elif symbol == 'V':
self._converters.append(NINEConverter(nine_count))
self._converters.append(VConverter())
nine_count = 0
state = 'START'
elif symbol == 'S':
if has_sign:
raise NumericFormatError("Unexpected sign at "
"position %s. Only one S allowed." % position)
has_sign = True
self._converters.append(NINEConverter(nine_count))
self._converters.append(SIGNConverter())
nine_count = 0
state = 'START'
elif symbol == '(':
state = 'LPAREN'
else:
raise NumericFormatError("Unexpected character %r at "
"position %s" % (symbol, position))
elif state == 'LPAREN':
if DIGIT.match(symbol):
paren_digits += symbol
elif symbol == ')':
# We have a -1 here because we got the first 9 of the
# paren on the 9 preciding
if paren_digits: # Weird case of 9()
pd = int(paren_digits)
if pd != 0: # Weird case of 9(0)
nine_count += int(paren_digits) - 1
paren_digits = ''
state = 'NINE'
else:
raise NumericFormatError("Unexpected character %r at "
"position %s" % (symbol, position))
if state == 'NINE':
self._converters.append(NINEConverter(nine_count))
elif state == 'LPAREN':
raise NumericFormatError("Unexpected end of input. expected )")
class ConvertState(object):
""" We need a stateful object to keep track of whether the number is
positive or negative. If we could, we we've just added an attribute to the
StringIO buffer.
"""
def __init__(self, iv=None):
if iv is None:
self.buf = StringIO()
else:
self.buf = StringIO(iv)
self.positive = True
def read(self, n):
return self.buf.read(n)
def write(self, v):
self.buf.write(v)
def getvalue(self):
return self.buf.getvalue()
class VConverter(object):
width = property(lambda s: 0)
def __repr__(self):
return "V"
def write_decimal_input(self, inp, outp):
outp.write(".")
def write_output_text(self, inp, outp):
v = inp.read(1)
if v != ".":
return "Excepted '.' found %r" % v
class SIGNConverter(object):
width = property(lambda s: 1)
def __repr__(self):
return "S"
def write_decimal_input(self, inp, outp):
v = inp.read(1)
if v == "-":
outp.positive = False
else:
outp.positive = True
def write_output_text(self, inp, outp):
if inp.positive:
outp.write(" ")
else:
outp.write("-")
class DECIMALConverter(object):
width = property(lambda s: 1)
def __repr__(self):
return "."
def write_decimal_input(self, inp, outp):
v = inp.read(1)
if v != ".":
return "Excepted '.' found %r" % v
outp.write(".")
def write_output_text(self, inp, outp):
v = inp.read(1)
if v != ".":
return "Excepted '.' found %r" % v
outp.write(".")
class COMMAConverter(object):
width = property(lambda s: 1)
def __repr__(self):
return ","
def write_decimal_input(self, inp, outp):
v = inp.read(1)
if v != ",":
return "Excepted ',' found %r" % v
def write_output_text(self, inp, outp):
outp.write(",")
class SPACEConverter(object):
def __repr__(self):
return "_"
width = property(lambda s: 1)
def write_decimal_input(self, inp, outp):
""" We ignore whatever is in the input on a space """
inp.read(1)
def write_output_text(self, inp, outp):
outp.write(" ")
class NINEConverter(object):
width = property(lambda s: s._count)
def __init__(self, count):
self._count = count
self._matcher = re.compile("[ \d]{%s}" % count)
def __repr__(self):
return "9(%d)" % self.width
def write_decimal_input(self, inp, outp):
inv = inp.read(self._count)
if not self._matcher.match(inv):
return "Expected %s digits. Found %r" % (self._count, inv)
outp.write(inv.replace(" ", "0"))
def write_output_text(self, inp, outp):
# change "00.25" into "00025" with
# "00.20" into "00020"
# 9(3),V,9(2)
# need to process this backwards
bytes = inp.read(self.width)
if not re.match("^\d*$", bytes):
return "Found non numeric data %r in value" % bytes
if len(bytes) != self.width:
bytes = bytes.ljust(self.width, "0")
outp.write(bytes)
```
#### File: stypes/stypes/test_mapping.py
```python
import unittest
from .mapping import Dict
from .odict import OrderedDict
from .numeric import Integer, Numeric, NumericFormatError
from .sequence import Array
from .spec import String
class OrderedDictTestCase(unittest.TestCase):
def setUp(self):
self._spec = OrderedDict([
('first_name', String(12)),
('last_name', String(15)),
('middle_initial', String(1)),
('age', Integer(3)),
('colors', Array(3, Integer(4)))])
def test_mainline(self):
inp = "jeremy lowery s031000100020003"
rec = self._spec.unpack(inp)
self.assertEquals(rec['first_name'], 'jeremy')
self.assertEquals(rec['last_name'], 'lowery')
self.assertEquals(rec['middle_initial'], 's')
self.assertEquals(rec['age'], 31)
self.assertEquals(rec['colors'], [1, 2, 3])
self.assertEquals(rec.items(), [
('first_name', 'jeremy'),
('last_name', 'lowery'),
('middle_initial', 's'),
('age', 31),
('colors', [1, 2, 3])
])
self.assertEquals(rec.pack(), inp)
def test_update(self):
inp = "jeremy lowery s031000100020003"
rec = self._spec.unpack(inp)
rec['last_name'] = 'smith'
outp = "jeremy smith s031000100020003"
self.assertEquals(rec.pack(), outp)
try:
del rec['last_name']
self.assertTrue(False, 'deleting did not raise TypeError')
except TypeError:
pass
try:
rec.clear()
self.assertTrue(False, 'clearing did not raise TypeError')
except TypeError:
pass
rec.update({'age': '35', 'colors': ['2', '4', '3']})
out = 'jeremy smith s035000200040003'
self.assertEquals(rec.pack(), out)
class NDictTestCase(unittest.TestCase):
def setUp(self):
self._spec = Dict([
('first_name', 12),
('last_name', 15),
('middle_initial', 1),
('age', Integer(3))])
def test_foobar(self):
inp = "jeremy lowery s031"
rec = self._spec.unpack(inp)
self.assertEquals(rec['age'], 31)
def test_update(self):
inp = "jeremy lowery s031000100020003"
outp ="jeremy smith s031"
rec = self._spec.unpack(inp)
rec['last_name'] = 'smith'
self.assertEquals(rec.pack(), outp)
try:
del rec['last_name']
self.assertTrue(False, 'deleting did not raise TypeError')
except TypeError:
pass
try:
rec.clear()
self.assertTrue(False, 'clearing did not raise TypeError')
except TypeError:
pass
class DictTestCase(unittest.TestCase):
def setUp(self):
self._spec = Dict([
('first_name', String(12)),
('last_name', String(15)),
('middle_initial', String(1)),
('age', Integer(3)),
('colors', Array(3, Integer(4)))])
def test_mainline(self):
inp = "jeremy lowery s031000100020003"
rec = self._spec.unpack(inp)
self.assertEquals(rec['first_name'], 'jeremy')
self.assertEquals(rec['last_name'], 'lowery')
self.assertEquals(rec['middle_initial'], 's')
self.assertEquals(rec['age'], 31)
self.assertEquals(rec['colors'], [1, 2, 3])
def test_assignment(self):
inp = "jeremy lowery s031000100020003"
rec = self._spec.unpack(inp)
rec['last_name'] = 'smith'
rec['age'] = 30
outp = 'jeremy smith s030000100020003'
self.assertEquals(rec.pack(), outp)
def test_sublist_assignment(self):
rec = self._spec.unpack('')
rec['colors'][:] = '123'
def test_unpack_with_explicit_type_spec(self):
spec = Dict([
('first_name', String(12)),
('last_name', String(15)),
('middle_initial', String(1)),
('age', String(3)),
('colors', Array(3, String(4)))])
inp = "jeremy lowery s031000100020003"
rec = spec.unpack(inp)
self.assertEquals(rec['first_name'], 'jeremy')
self.assertEquals(rec['last_name'], 'lowery')
self.assertEquals(rec['middle_initial'], 's')
self.assertEquals(rec['age'], '031')
self.assertEquals(rec['colors'], ['0001', '0002', '0003'])
def test_unpack_with_structured_data_spec(self):
spec = Dict([
('first_name', 12),
('last_name', 15),
('middle_initial', 1),
('age', 3)
])
inp = "jeremy lowery s"
rec = spec.unpack(inp)
self.assertEquals(rec['first_name'], 'jeremy')
self.assertEquals(rec['last_name'], 'lowery')
self.assertEquals(rec['middle_initial'], 's')
def test_unpack_with_string_spec(self):
inp = "YYNGOT CUT OF"
rec = Dict("a;b;c;msg:100").unpack(inp)
self.assertEquals(rec['msg'], 'GOT CUT OF')
def test_multiunpack_with_data_structure_spec(self):
spec = Dict([
('first_name', 12),
('last_name', 15),
('middle_initial', 1),
('age', 3)
])
buf = ("jeremy lowery s 23\n"
"tom jones V X3\n")
for line in buf.split("\n"):
rec = spec.unpack(line)
def test_array_unpack(self):
expected = {'color': ['a', 'b', 'c']}
# explicit as a pair
spec = Dict([(('color', 3), 1)])
self.assertEquals(spec.unpack("abc"), expected)
# Given as a string
spec = Dict("color[3]")
self.assertEquals(spec.unpack("abc"), expected)
def test_sub_with_structured_data_spec(self):
spec = Dict([
('demo', Dict([
('first_name', 6),
('last_name', 6),
('sex', 1)
])),
('address', Dict([
('line_1', 10),
('line_2', 10)
]))
])
text = "jeremyloweryM100 elm stbelton, tx"
r = spec.unpack(text)
self.assertEquals(r['demo']['first_name'], 'jeremy')
self.assertEquals(r['demo']['last_name'], 'lowery')
self.assertEquals(r['demo']['sex'], 'M')
self.assertEquals(r['address']['line_1'], '100 elm st')
self.assertEquals(r['address']['line_2'], 'belton, tx')
if __name__ == '__main__': unittest.main()
```
#### File: stypes/stypes/test_sequence.py
```python
import unittest
from .sequence import Array, List, Tuple, NamedTuple
from .spec import String
from .numeric import Integer
from .util import UnconvertedValue
class ListTestCase(unittest.TestCase):
def test_mainline(self):
spec = List([12, 15, 1, Integer(3)])
string = "jeremy lowery s031"
value = spec.unpack(string)
self.assertIsInstance(value, list)
text_rec = ['jeremy', 'lowery', 's', 31]
self.assertEquals(value, text_rec)
self.assertEquals(value.pack(), string)
def test_atom(self):
spec = List([String(3), 1, 1])
v = spec.unpack("")
self.assertEquals(v.pack(), ' '*5)
def test_slice(self):
spec = List([1]*5)
v = spec.unpack("")
v[1] = "X"
self.assertEquals(v[1], "X")
v[2:4] = ["Y", "Z"]
self.assertEquals(v, ["", "X", "Y", "Z", ""])
def test_assign(self):
spec = List([12, 15, 1, Integer(3)])
value = spec.unpack("")
value[3] = "4"
def test_list(self):
spec = List("1;1;2")
value = ["A", "B", "EE"]
self.assertEquals(spec.pack(value), "ABEE")
def test_pack(self):
spec = List([1, 1, 10])
data = ["Y", "N", "Bacon"]
self.assertEquals(spec.pack(data), "YNBacon ")
def test_convert(self):
spec = List([Integer(5)])
value = spec.unpack("00040")
self.assertEquals(value, [40])
def test_nested_array(self):
spec = List([String(12),
String(15),
String(1),
String(3),
Array(3, String(4))])
string = "jeremy lowery s031000100020003"
value = spec.unpack(string)
expected = ['jeremy', 'lowery', 's', '031', ['0001', '0002', '0003']]
self.assertEquals(value, expected)
def test_sub_assignment(self):
mint = Integer(1)
spec = List([mint, mint, List([mint, mint, mint])])
s = spec.unpack('')
self.assertEquals(s, [None, None, [None, None, None]])
s[:2] = ['3', '4']
self.assertEquals(s[:2], [3, 4])
s[2][0] = '1'
self.assertEquals(s[2], [1, None, None])
s[2][:] = ['9', '8', '70']
self.assertEquals(s, [3, 4, [9, 8, 70]])
try:
s[2][:] = [1, 2, 3, 4, 5, 6]
self.assertTrue(False, 'Attempted to change size of list with : slice')
except TypeError:
pass
try:
s[2][:] = [1, 2]
self.assertTrue(False, 'Attempted to change size of list with : slice')
except TypeError:
pass
class TupleTestCase(unittest.TestCase):
def test_mainline(self):
spec = Tuple([12, 15, 1, Integer(3)])
string = "jeremy lowery s031"
value = spec.unpack(string)
text_rec = ('jeremy', 'lowery', 's', 31)
self.assertEquals(value, text_rec)
self.assertEquals(value.pack(), string)
def test_tuple_convert(self):
spec = Tuple([Integer(5)])
value = spec.unpack("00040")
self.assertEquals(value, (40,))
class ArrayTestCase(unittest.TestCase):
def test_mainline(self):
spec = Array(3, Integer(1))
string = "123"
value = spec.unpack(string)
self.assertEquals(value, [1, 2, 3])
self.assertEquals(value.pack(), string)
class NamedTupleTestCase(unittest.TestCase):
def setUp(self):
self._spec = NamedTuple([
('first_name', 12),
('last_name', 15),
('middle_initial', 1),
('age', Integer(3))
])
def test_mainline(self):
buf = "jeremy lowery s 23"
tup = self._spec.unpack(buf)
self.assertEquals(tup.first_name, "jeremy")
self.assertEquals(tup.last_name, "lowery")
self.assertEquals(tup.middle_initial, "s")
self.assertEquals(tup.age, 23)
buf = "jeremy lowery s023"
self.assertEquals(tup.pack(), buf)
def test_bad_data(self):
buf = "jeremy lowery sX23"
tup = self._spec.unpack(buf)
self.assertEquals(type(tup.age), UnconvertedValue)
def test_has_unconverted(self):
buf = "jeremy lowery sX23"
tup = self._spec.unpack(buf)
self.assertEquals(tup.has_unconverted(), True)
def test_unconverted_report(self):
buf = "jeremy lowery sX23"
tup = self._spec.unpack(buf)
self.assertEquals(tup.unconverted_report(), "Index 3: expecting all digits for integer, given='X23'")
if __name__ == '__main__':
unittest.main()
```
#### File: stypes/stypes/test_string.py
```python
import unittest
from .spec import BoxedString
class DateTestCase(unittest.TestCase):
def setUp(self):
self._s = BoxedString(5, 2)
def test_to_bytes(self):
self.assertEquals(self._s.to_bytes("aaaaa\r\nbbbbb"), "aaaaabbbbb")
def test_to_trunc(self):
self.assertEquals(self._s.to_bytes("aaaaavvvvv\r\nbbbbb"), "aaaaabbbbb")
def test_width(self):
self.assertEquals(self._s.width, 10)
def test_from_bytes(self):
self.assertEquals(self._s.from_bytes("aaaaabbbbb"), "aaaaa\r\nbbbbb")
```
#### File: stypes/stypes/util.py
```python
from functools import partial
class InvalidSpecError(Exception):
pass
class UnconvertedValue(object):
def __init__(self, string, reason):
self.string = string
self.reason = reason
def __nonzero__(self):
return False
def __str__(self):
return "%s, given=%r" % (self.reason, self.string)
def __repr__(self):
return '<UnconvertedValue string=%r reason=%r>' % (self.string,
self.reason)
class imemoize(object):
"""cache the return value of a method
This class is meant to be used as a decorator of methods. The return value
from a given method invocation will be cached on the instance whose method
was invoked. All arguments passed to a method decorated with memoize must
be hashable.
If a memoized method is invoked directly on its class the result will not
be cached. Instead the method will be invoked like a static method:
class Obj(object):
@memoize
def add_to(self, arg):
return self + arg
Obj.add_to(1) # not enough arguments
Obj.add_to(1, 2) # returns 3, result is not cached
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype=None):
if obj is None:
return self.func
return partial(self, obj)
def __call__(self, *args, **kw):
obj = args[0]
try:
cache = obj.__cache
except AttributeError:
cache = obj.__cache = {}
key = (self.func, args[1:], frozenset(kw.items()))
try:
res = cache[key]
except KeyError:
res = cache[key] = self.func(*args, **kw)
return res
```
|
{
"source": "jeremylow/pepp",
"score": 3
}
|
#### File: pepp/pepp/cli.py
```python
from __future__ import absolute_import
import os
from os.path import exists, join
import shlex
import subprocess
import pipfile
import toml
import click
from pepp import core
from pepp import __version__ as pepp_version
@click.group()
@click.version_option(prog_name="pepp", version=pepp_version)
def main():
pass
@main.command(context_settings=dict(ignore_unknown_options=True))
@click.argument("package")
@click.option("--save", is_flag=True, default=False)
@click.option("--save-dev", is_flag=True, default=False)
@click.argument("pip_args", nargs=-1, type=click.UNPROCESSED)
def install(package, save, save_dev, pip_args):
packages = core.download_package(package)
core.install_packages(packages)
if save:
core.save_package(packages, dev=False)
if save_dev:
core.save_package(packages, dev=True)
return True
@main.command()
@click.option("--force", is_flag=True, default=False)
def init(force):
core.init_pipfile(force=force)
@main.command()
def config():
project_root = click.prompt(
"Enter the root directory for your project", default=os.path.curdir
)
create_pipfile = click.prompt(
"Would you like to create a default Pipfile now?", default=True
)
if create_pipfile:
if exists(join(project_root, "Pipfile")):
overwrite = click.prompt(
"Pipfile found. Would you like to overwrite?", default=False
)
if overwrite:
click.echo("Overwriting and creating Pipfile")
core.init_pipfile(force=True)
else:
click.echo("Aborting creation of Pipfile")
if exists(join(project_root, "pepp.ini")):
overwrite = click.prompt("Configuration file exists. Overwrite?", default=False)
if not overwrite:
click.echo("Aborting creation of configuration file.")
return -1
click.echo("Overwriting configuration file.")
with open(join(project_root, "pepp.ini"), "w") as f:
dikt = {"project-root": project_root, "pipfile": join(project_root, "Pipfile")}
toml.dump(dikt, f)
if __name__ == "__main__":
main()
```
|
{
"source": "jeremylow/pre-commit-hooks",
"score": 3
}
|
#### File: jeremylow/pre-commit-hooks/check_inline_doc_spacing.py
```python
from __future__ import print_function
from __future__ import unicode_literals
import ast
import os
import re
import sys
FIND_PATTERN = re.compile(r"^([ ]{1,})(?:.*)", re.UNICODE)
AST_TO_HUMAN = {
ast.Module: "module",
ast.FunctionDef: "function",
ast.ClassDef: "class",
}
def check_module_indents(module):
try:
if not isinstance(module.body[0], ast.Expr):
return (0, "")
expr = module.body[0]
init_lineno, doc_string = expr.lineno, expr.value.s
# for modules, lineno is the *end* of the doc string
lineno = init_lineno - len(re.findall("\n", doc_string))
return (lineno, doc_string)
except Exception as e:
return (0, "")
def check_doc_string(f, lineno, node, doc_string):
fails = []
retval = 0
for line in doc_string.split("\n"):
lineno += 1
white_space_strings = re.findall(pattern=FIND_PATTERN, string=line)
if not white_space_strings:
continue
if isinstance(node, ast.Module):
node.name = os.path.basename(f)
section = AST_TO_HUMAN.get(type(node), "")
if 0 < len(white_space_strings[0]) < 4:
fails.append(
"{0}:{1} in {2} {3}: spacing not a multiple of four.".format(
f, lineno, section, node.name
)
)
retval = 1
elif len(white_space_strings[0]) % 4 != 0:
fails.append(
"{0}:{1} in {2} {3}: spacing not a multiple of four.".format(
f, lineno, section, node.name
)
)
retval = 1
return (retval, fails)
def main(files):
retval = 0
failures = []
for f in files:
with open(f) as _file:
source = _file.read()
tree = ast.parse(source)
for node in ast.walk(tree):
if isinstance(node, ast.Module):
lineno, doc_string = check_module_indents(node)
retval, fails = check_doc_string(f, lineno, node, doc_string)
if fails:
failures += fails
else:
try:
doc_string = ast.get_docstring(node)
except Exception as e:
continue
if doc_string:
retval, fails = check_doc_string(f, node.lineno, node, doc_string)
if fails:
failures += fails
else:
continue
if failures:
[print(failure) for failure in failures]
return 1
else:
return 0
if __name__ == "__main__":
files = sys.argv[1:]
exit(main(files))
```
|
{
"source": "jeremylow/pyshk",
"score": 3
}
|
#### File: pyshk/tests/test_api.py
```python
import unittest
import json
import responses
from pyshk.api import Api
from pyshk import models
from pyshk import errors
class ApiTests(unittest.TestCase):
""" Test various API functions """
def test_api_creation(self):
a = Api(consumer_key='test',
consumer_secret='test',
access_token_key='test',
access_token_secret='supersecret')
self.assertEqual(a.base_url, 'http://mlkshk.com')
a = Api(base_url='http://example.com')
self.assertEqual(a.base_url, 'http://example.com')
def test_api_unauthorized(self):
a = Api(consumer_key='test', consumer_secret='test')
self.assertRaises(errors.ApiInstanceUnauthorized, a.get_user)
# @responses.activate
# def test_api_auth(self):
# auth_url = (
# 'https://mlkshk.com/api/authorize?response_type='
# 'code&client_id=test&redirect_uri=http://localhost:8000')
# with open('tests/test_data/api/authorize') as f:
# resp_data = f.read()
# responses.add(
# responses.POST,
# auth_url,
# body=resp_data,
# status_code=200)
# a = Api(
# consumer_key='test',
# consumer_key='test',
# redirect_uri='http://localhost:8000')
# req = a.get_auth()
class TestAPIResponses(unittest.TestCase):
def setUp(self):
self.api = Api(
consumer_key='test',
consumer_secret='test',
access_token_key='test',
access_token_secret='test')
@responses.activate
def test_get_user(self):
with open('tests/test_data/api/user') as f:
resp_data = f.read()
responses.add(
responses.GET,
'http://mlkshk.com/api/user',
body=resp_data,
status=200)
user = self.api.get_user()
self.assertEqual(user.name, 'jcbl')
# By user_id
with open('tests/test_data/api/user_id/67136') as f:
resp_data = f.read()
responses.add(
responses.GET,
'http://mlkshk.com/api/user_id/67136',
body=resp_data,
status=200)
user = self.api.get_user(user_id=67136)
self.assertEqual(user.name, 'jcbl')
# By user_name
with open('tests/test_data/api/user_name/jcbl') as f:
resp_data = f.read()
responses.add(
responses.GET,
'http://mlkshk.com/api/user_name/jcbl',
body=resp_data,
status=200)
user = self.api.get_user(user_name='jcbl')
self.assertEqual(user.name, 'jcbl')
@responses.activate
def test_get_authd_user_shakes(self):
with open('tests/test_data/api/shakes') as f:
resp_data = f.read()
responses.add(
responses.GET,
'http://mlkshk.com/api/shakes',
body=resp_data,
status=200)
shakes = self.api.get_user_shakes()
self.assertEqual(len(shakes), 1)
@responses.activate
def test_get_sharedfiles_from_shake(self):
with open('tests/test_data/api/shakes.59884') as f:
resp_data = f.read()
responses.add(
responses.GET,
'http://mlkshk.com/api/shakes/59884',
body=resp_data,
status=200)
files = self.api.get_shared_files_from_shake(shake_id=59884)
self.assertEqual(len(files), 10)
with open('tests/test_data/api/sharedfile/16509') as f:
sharedfile = models.SharedFile.NewFromJSON(json.load(f))
self.assertTrue(sharedfile == files[0])
self.assertRaises(
Exception,
lambda: self.api.get_shared_files_from_shake(
before='0001', after='0002'))
@responses.activate
def test_post_shared_file(self):
with open('tests/test_data/api/upload') as f:
resp_data = f.read().encode('utf8')
responses.add(
responses.POST,
'http://mlkshk.com/api/upload',
body=resp_data,
status=200
)
resp = self.api.post_shared_file(
image_file='tests/test_data/like-tiny.gif')
expected_resp = {"share_key": "164L4", "name": "like-tiny.gif"}
self.assertEqual(resp['share_key'], expected_resp['share_key'])
self.assertRaises(
Exception,
lambda: self.api.post_shared_file(
image_file='1.jpg',
source_link='http://example.com'))
def test_headers(self):
test_headers = self.api._make_headers(
nonce='bcbd15da3e38847ebad5655231912ac8',
timestamp=1444770686)
std_headers = (
'MAC token="test", timestamp="1444770686", '
'nonce="bcbd15da3e38847ebad5655231912ac8", '
'signature="vdjjGj07papwf0H28Nkc3SbWuUQ="')
self.assertEqual(test_headers, std_headers)
@responses.activate
def test_get_shared_file(self):
with open('tests/test_data/api/sharedfile/164DW') as f:
resp_data = f.read()
responses.add(
responses.GET,
'http://mlkshk.com/api/sharedfile/164DW',
body=resp_data,
status=200
)
sf = self.api.get_shared_file(sharekey='164DW')
sf.user = None
sf2 = models.SharedFile(
comments=0,
pivot_id="164DW",
title="Shakeys",
name="tumblr_ntg61qKrOe1rtr67io1_500.jpg",
posted_at="2015-10-10T15:14:30Z",
like=False,
saved=False,
permalink_page="http://mlkshk.com/p/164DW",
nsfw=False,
height=645,
likes=1,
description=None,
sharekey="164DW",
original_image_url="http://s.mlkshk.com/r/164DW",
saves=0,
width=500,
views=0)
self.assertTrue(sf == sf2)
@responses.activate
def test_like_shared_file(self):
with open('tests/test_data/api/sharedfile/164DW') as f:
resp_data = f.read()
responses.add(
responses.POST,
'http://mlkshk.com/api/sharedfile/164DW/like',
body=resp_data,
status=200)
sf = self.api.like_shared_file(sharekey='164DW')
self.assertTrue(sf.liked)
self.assertRaises(Exception, lambda: self.api.like_shared_file())
@responses.activate
def test_save_shared_file(self):
with open('tests/test_data/api/sharedfile/165PZ/save') as f:
resp_data = f.read()
responses.add(
responses.POST,
'http://mlkshk.com/api/sharedfile/165PZ/save',
body=resp_data,
status=200)
sf = self.api.save_shared_file(sharekey='165PZ')
self.assertTrue(sf.saved)
self.assertEqual(sf.user.id, 14430)
@responses.activate
def test_get_favorites(self):
with open('tests/test_data/api/favorites') as f:
resp_data = f.read()
responses.add(
responses.GET,
'http://mlkshk.com/api/favorites',
body=resp_data,
status=200)
favs = self.api.get_favorites()
self.assertEqual(len(favs), 10)
self.assertIsInstance(favs[0], models.SharedFile)
@responses.activate
def test_get_friends_shake(self):
with open('tests/test_data/api/friends') as f:
resp_data = f.read()
responses.add(
responses.GET,
'http://mlkshk.com/api/friends',
body=resp_data,
status=200
)
friends_shake = self.api.get_friends_shake()
self.assertIsInstance(friends_shake[0], models.SharedFile)
self.assertIsInstance(friends_shake, list)
self.assertRaises(
Exception,
lambda: self.api.get_friends_shake(
before='0001', after='0002'))
@responses.activate
def test_get_incoming_shake(self):
with open('tests/test_data/api/incoming') as f:
resp_data = f.read()
responses.add(
responses.GET,
'http://mlkshk.com/api/incoming',
body=resp_data,
status=200
)
incoming_shake = self.api.get_incoming_shake()
self.assertIsInstance(incoming_shake[0], models.SharedFile)
self.assertIsInstance(incoming_shake, list)
@responses.activate
def test_get_magic_files(self):
with open('tests/test_data/api/magicfiles') as f:
resp_data = f.read()
responses.add(
responses.GET,
'http://mlkshk.com/api/magicfiles',
body=resp_data,
status=200
)
magic_files = self.api.get_magic_shake()
self.assertIsInstance(magic_files[0], models.SharedFile)
self.assertIsInstance(magic_files, list)
@responses.activate
def test_get_magic_files_after(self):
with open('tests/test_data/api/magicfiles.after') as f:
resp_data = f.read()
responses.add(
responses.GET,
'http://mlkshk.com/api/magicfiles/after/1643X',
body=resp_data,
status=200
)
magic_files = self.api.get_magic_shake(after='1643X')
self.assertIsInstance(magic_files[0], models.SharedFile)
self.assertEqual(magic_files[0].sharekey, 'CO04')
self.assertIsInstance(magic_files, list)
self.assertEqual(len(magic_files), 10)
@responses.activate
def test_get_magic_files_before(self):
with open('tests/test_data/api/magicfiles.before') as f:
resp_data = f.read()
responses.add(
responses.GET,
'http://mlkshk.com/api/magicfiles/before/1643X',
body=resp_data,
status=200
)
magic_files = self.api.get_magic_shake(before='1643X')
self.assertIsInstance(magic_files[0], models.SharedFile)
self.assertEqual(magic_files[0].sharekey, 'CO0E')
self.assertIsInstance(magic_files, list)
self.assertEqual(len(magic_files), 10)
@responses.activate
def test_get_user_shakes(self):
with open('tests/test_data/api/shakes') as f:
resp_data = f.read()
responses.add(
responses.GET,
'http://mlkshk.com/api/shakes',
body=resp_data,
status=200
)
shakes = self.api.get_user_shakes()
self.assertIsInstance(shakes[0], models.Shake)
self.assertIsInstance(shakes[0].owner, models.User)
self.assertEqual(len(shakes), 1)
@responses.activate
def test_update_shared_file(self):
with open('tests/test_data/api/sharedfile/1669X.POST') as f:
resp_data = f.read()
responses.add(
responses.POST,
'http://mlkshk.com/api/sharedfile/1669X',
body=resp_data,
status=200
)
sf = self.api.update_shared_file(sharekey='1669X', description='test')
self.assertIsInstance(sf, models.SharedFile)
self.assertEqual(sf.description, 'test')
with open('tests/test_data/api/sharedfile/1669X.POST.title') as f:
resp_data = f.read()
responses.add(
responses.POST,
'http://mlkshk.com/api/sharedfile/1669X',
body=resp_data,
status=200
)
sf = self.api.update_shared_file(sharekey='1669X',
title='journalism')
self.assertIsInstance(sf, models.SharedFile)
self.assertEqual(sf.title, 'journalism')
self.assertRaises(
Exception,
lambda: self.api.update_shared_file(sharekey='1669X'))
self.assertRaises(
Exception,
lambda: self.api.update_shared_file())
@responses.activate
def test_get_comments(self):
with open('tests/test_data/api/sharedfile/162LG/comments') as f:
resp_data = f.read()
responses.add(
responses.GET,
'http://mlkshk.com/api/sharedfile/162LG/comments',
body=resp_data,
status=200
)
comments = self.api.get_comments(sharekey='162LG')
self.assertEqual(len(comments), 4)
self.assertIsInstance(comments[0], models.Comment)
self.assertEqual(comments[0].body, '!!!!!!!')
self.assertRaises(Exception,
lambda: self.api.get_comments())
```
|
{
"source": "jeremylow/rotoscope",
"score": 2
}
|
#### File: jeremylow/rotoscope/rotoscope.py
```python
from __future__ import print_function
import os
from os.path import dirname, abspath
import uuid
import shlex
import subprocess
import sys
import threading
from PIL import Image
import time
try:
from queue import Queue
except ImportError:
from Queue import Queue
files_q = Queue()
def extract_frames(film=None, work_dir=None):
command = (
"ffmpeg -i {film} "
"-f image2 -pix_fmt bgr24 %03d.bmp").format(
film=film,
outdir=work_dir)
print(command)
subprocess.call(command, shell=True)
def trace_file(work_file=None, tracer=None):
fn, ext = os.path.splitext(work_file)
autotrace_cmd = (
"autotrace {input} "
"-output-format svg "
"-color-count 16 "
"-despeckle-level 19 "
"-despeckle-tightness 0.45 "
"-filter-iterations 25 "
"-output-file {output}.svg").format(
input=work_file,
output=fn)
# Setup the tracers
if tracer == 'autotrace':
trace_command = autotrace_cmd
elif tracer == 'potrace':
trace_command = "potrace -t 25 -s -k .5 -a 2 {0}".format(work_file)
# if tracer == 'both':
# a_svg =
# im1 =
# Call the tracer
subprocess.call(shlex.split(trace_command))
# Convert traced file back to img format for ffmpeg
convert_back = "convert {fn}.svg {fn}.bmp".format(fn=fn)
subprocess.call(shlex.split(convert_back))
# Remove working files
# subprocess.call(shlex.split('rm {0}'.format(work_file)))
# subprocess.call(shlex.split('rm {0}.svg'.format(fn)))
def trace_thread(q, tracer):
while True:
work_file = q.get()
print("FRAMES REMAINING: ", q.qsize())
trace_file(work_file=work_file, tracer=tracer)
q.task_done()
def autotrace(tracer=None):
files = os.listdir('.')
[files_q.put(f) for f in files]
print("QUEUE SIZE IS ", files_q.qsize())
for x in range(8):
t = threading.Thread(target=trace_thread, args=(files_q, tracer))
t.daemon = True
t.start()
def create_new_video(tmp_dir):
cmd = "../imgs_to_mp4.sh {tmp_dir} ../{out_file}".format(
tmp_dir=tmp_dir,
out_file='out{0}.mp4'.format(time.time()))
subprocess.call(cmd, shell=True)
subprocess.call('rm *.bmp', shell=True)
if __name__ == '__main__':
a = time.time()
BASE_DIR = dirname(abspath(__file__))
wrkdir = "work_{0}".format(uuid.uuid4().hex)
film_name = os.path.abspath(sys.argv[1])
tracer = sys.argv[2]
print('making directory: ', wrkdir)
os.mkdir(wrkdir)
os.chdir(wrkdir)
extract_frames(film=film_name, work_dir=wrkdir)
autotrace(tracer=tracer)
files_q.join()
create_new_video(tmp_dir=wrkdir)
print("total time taken", time.time() - a)
```
|
{
"source": "JeremyLozanoKeays/Pylogix-Tools",
"score": 3
}
|
#### File: Pylogix-Tools/Pylogix-Tools/Main.py
```python
import tkinter as tk
from tkinter import ttk, simpledialog
import pylogix
import ast
import datetime
import xml.etree.cElementTree as et
from xml.dom import minidom
''' This is a tool that creates a simple GUI for pylogix. This will allow connections only to
Rockwell ControlLogix, CompactLogix, and Micro800's PLCS.
'''
class App(ttk.Frame):
def __init__(self, parent):
ttk.Frame.__init__(self)
# Make the app responsive
for index in [0, 1, 2]:
self.columnconfigure(index=index, weight=1)
self.rowconfigure(index=index, weight=1)
# Create value lists
self.option_menu_list = ["", "OptionMenu", "Option 1", "Option 2"]
self.processor_combo_list = ["0", "1", "2", "3", "4"]
# Create control variables with defaults
self.var_0 = tk.StringVar(value="10.10.100.1")
self.var_1 = tk.StringVar(value="Tag_values.xml")
self.var_2 = tk.StringVar(value="5000") # Connection Refresh Rate
self.var_3 = tk.IntVar(value=3) # Connection Enabled status
self.var_4 = tk.IntVar(value=0) # Processor Slot
self.var_5 = tk.StringVar(value="Disabled") #Is an Emulator
self.var_6 = tk.StringVar(value="Discover") #discovery
self.var_7 = tk.StringVar(value="Disabled") #Is a Micro 800
self.treedata = TreeData()
self.img_1 = tk.PhotoImage(file='Logo_Shadow_Cropped.png').subsample(2, 2)
self.console = Console(5)
self.console_last = tk.StringVar(value="") # Consle store to prevent reprints
self.console_line_1 = tk.StringVar(value="Line 1 : Loading Saved Configuration")
self.console_line_2 = tk.StringVar(value="Line 2 : ")
self.console_line_3 = tk.StringVar(value="Line 3 : ")
self.console_line_4 = tk.StringVar(value="Line 4 : ")
self.console_line_5 = tk.StringVar(value="Line 5 : ")
self.flag = 0
# Create widgets :)
self.setup_widgets()
def setup_widgets(self):
#Left Column
# Logo
self.logo = ttk.Label(self, image = self.img_1 )
self.logo.grid(row=0, column=0, padx=(20, 10), pady=(10, 5), sticky="nsew")
# Seperator
self.separator_c1 = ttk.Separator(self)
self.separator_c1.grid(row=1, column=0, padx=(20, 10), pady=(0, 0), sticky="ew" )
# Create a Frame for the Connection Radiobuttons
self.radio_frame = ttk.LabelFrame(self, text="Connection", padding=(20, 10, 0, 10))
self.radio_frame.grid(row=2, column=0, padx=(20, 10), pady=5, sticky="ew")
# Connection Radiobuttons
self.radio_1 = ttk.Radiobutton(
self.radio_frame, text="Write Mode", variable=self.var_3, value=1
)
self.radio_1.grid(row=0, column=0, padx=5, pady=2, sticky="nsew")
self.radio_2 = ttk.Radiobutton(
self.radio_frame, text="Read Mode", variable=self.var_3, value=2
)
self.radio_2.grid(row=1, column=0, padx=5, pady=2, sticky="nsew")
self.radio_3 = ttk.Radiobutton(
self.radio_frame, text="Disabled", variable=self.var_3, value=3
)
self.radio_3.grid(row=2, column=0, padx=5, pady=2, sticky="nsew")
# Discovery
# Save Changes button
self.discoverbutton = ttk.Button(
self, text="Discover all Tags", style="Accent.TButton"
)
self.discoverbutton.grid(row=4, column=0, padx=(20, 10), pady=(5, 5), sticky="nsew" )
self.discoverbutton['command'] = self.discover # Calls Save_Changes function on command
# Middle Column
# Create a Frame for input widgets
self.widgets_frame = ttk.LabelFrame(self, text="Configuration", padding=(0, 0, 0, 10))
self.widgets_frame.grid(
row=0, column=1, padx=10, pady=(30, 10), sticky="nsew", rowspan=10
)
self.widgets_frame.columnconfigure(index=0, weight=1)
# IP address Entry Label
self.ip_entry_label = ttk.Label(self.widgets_frame, text="IP Address")
self.ip_entry_label.grid(row=0, column=0, padx=10, pady=(2, 2), sticky="ew")
# IP addresss Entry
self.ip_entry = ttk.Entry(self.widgets_frame, textvariable=self.var_0)
self.ip_entry.grid(row=1, column=0, padx=20, pady=(0, 10), sticky="ew")
# XML Source Entry Label
self.xml_entry_label = ttk.Label(self.widgets_frame, text="XML Tag Source")
self.xml_entry_label.grid(row=2, column=0, padx=10, pady=(0,2), sticky="ew")
# XML Source Entry
self.xml_entry = ttk.Entry(self.widgets_frame, textvariable=self.var_1)
self.xml_entry.grid(row=3, column=0, padx=20, pady=(0, 10), sticky="ew")
# Refresh Rate Entry Label
self.refresh_entry_label = ttk.Label(self.widgets_frame, text="Refresh Rate (mS)")
self.refresh_entry_label.grid(row=4, column=0, padx=10, pady=(0,2), sticky="ew")
# Refresh Rate Entry
self.refresh_entry = ttk.Entry(self.widgets_frame, textvariable=self.var_2)
self.refresh_entry.grid(row=5, column=0, padx=20, pady=(0, 10), sticky="ew")
# Processor combobox Label
self.ip_entry_label = ttk.Label(self.widgets_frame, text="Processor Slot")
self.ip_entry_label.grid(row=6, column=0, padx=10, pady=(0, 2), sticky="ew")
# Processor combobox
self.processor_entry = ttk.Combobox(
self.widgets_frame, state="readonly", values=self.processor_combo_list
)
self.processor_entry.current(0)
self.processor_entry.grid(row=7, column=0, padx=20, pady=(0,10), sticky="ew")
# Is Emulator button Label
self.emulator_entry_label = ttk.Label(self.widgets_frame, text="Emulate 5000")
self.emulator_entry_label.grid(row=8, column=0, padx=10, pady=(0, 2), sticky="ew")
# Is Emulator button
self.emulator_switch = ttk.Checkbutton(
self.widgets_frame, textvariable=self.var_5, style="Toggle.TButton"
, variable=self.var_5, onvalue="Enabled", offvalue="Disabled" # Var_5 updates display text
)
self.emulator_switch.grid(row=9, column=0, padx=20, pady=(0, 10), sticky="nsew")
# micro 800 button Label
self.emulator_entry_label = ttk.Label(self.widgets_frame, text="Micro 800")
self.emulator_entry_label.grid(row=10, column=0, padx=10, pady=(0, 2), sticky="ew")
# micro 800 button
self.emulator_switch = ttk.Checkbutton(
self.widgets_frame, textvariable=self.var_7, style="Toggle.TButton"
, variable=self.var_7, onvalue="Enabled", offvalue="Disabled" # Var_5 updates display text
)
self.emulator_switch.grid(row=11, column=0, padx=20, pady=(0, 10), sticky="nsew")
# Separator
self.separator = ttk.Separator(self.widgets_frame)
self.separator.grid(row=12 , column=0, padx=10, pady=5, sticky="ew" , columnspan=4)
# Save Changes button
self.savebutton = ttk.Button(
self.widgets_frame, text="Save Changes", style="Accent.TButton"
)
self.savebutton.grid(row=15, column=0, padx=10, pady=(10, 0), sticky="nsew" )
self.savebutton['command'] = self.save_changes # Calls Save_Changes function on command
#Right Column
# Panedwindow
self.paned = ttk.PanedWindow(self)
self.paned.grid(row=0, column=2, pady=(35, 10), sticky="nsew", rowspan=10)
# Pane #1
self.pane_1 = ttk.Frame(self.paned, padding=5)
self.paned.add(self.pane_1, weight=1)
# Scrollbar
self.scrollbar = ttk.Scrollbar(self.pane_1)
self.scrollbar.pack(side="right", fill="y")
# Treeview
self.treeview = ttk.Treeview(
self.pane_1,
selectmode="browse",
yscrollcommand=self.scrollbar.set,
columns=(1, 2),
height=12,
)
self.treeview.pack(expand=True, fill="both")
self.scrollbar.config(command=self.treeview.yview)
# Treeview columns
self.treeview.column("#0", anchor="w", width=280)
self.treeview.column(1, anchor="w", width=40)
self.treeview.column(2, anchor="w", width=60)
# Treeview headings
self.treeview.heading("#0", text="Tag Name", anchor="center")
self.treeview.heading(1, text="Value", anchor="center")
self.treeview.heading(2, text="Data Type", anchor="center")
# Define treeview data
self.xml_to_treeview()
# Select and scroll
self.treeview.selection_set(9)
self.treeview.bind('<Double-1>', self.onDoubleClick)
self.treeview.bind("<Button-3>", self.onRightClick)
self.treeview.see(7)
# Right click popup Menu
# Notebook, pane #2
self.pane_2 = ttk.Frame(self.paned, padding=5)
self.paned.add(self.pane_2, weight=3)
# Notebook, pane #2
self.notebook = ttk.Notebook(self.pane_2)
self.notebook.pack(fill="both", expand=True)
# Tab #1
self.tab_1 = ttk.Frame(self.notebook)
for index in [0, 1]:
self.tab_1.columnconfigure(index=index, weight=1)
self.tab_1.rowconfigure(index=index, weight=1)
self.notebook.add(self.tab_1, text="Console")
# Display Print Console date
self.t1_label_1 = ttk.Label(self.tab_1, textvariable=self.console_line_1, justify="left",
font=("-size", 8),
)
self.t1_label_1.grid(row=1, column=0, pady=(1, 0), columnspan=2, sticky="ew")
self.t1_label_2 = ttk.Label(self.tab_1, textvariable=self.console_line_2, justify="left",
font=("-size", 8),
)
self.t1_label_2.grid(row=2, column=0, pady=(1, 0), columnspan=2, sticky="ew")
self.t1_label_3 = ttk.Label(self.tab_1, textvariable=self.console_line_3, justify="left",
font=("-size", 8),
)
self.t1_label_3.grid(row=3, column=0, pady=(1, 0), columnspan=2, sticky="ew")
self.t1_label_4 = ttk.Label(self.tab_1, textvariable=self.console_line_4, justify="left",
font=("-size", 8),
)
self.t1_label_4.grid(row=4, column=0, pady=(1, 0), columnspan=2, sticky="ew")
self.t1_label_5 = ttk.Label(self.tab_1, textvariable=self.console_line_5, justify="left",
font=("-size", 8),
)
self.t1_label_5.grid(row=5, column=0, pady=(1, 1), columnspan=2, sticky="ew")
# Tab #2
self.tab_2 = ttk.Frame(self.notebook)
self.notebook.add(self.tab_2, text="About")
# Label
self.t2_label_1 = ttk.Label(
self.tab_2,
text="Contact",
justify="center",
font=("-size", 12, "-weight", "bold"),
)
self.t2_label_1.grid(row=1, column=0, padx=0, pady=(10, 5), columnspan=2)
# Label
self.t2_label_2 = ttk.Label(
self.tab_2,
text="+1 (818) 538 6577\n+52 55 6083 5305",
justify="center",
font=("-size", 8, "-weight", "bold"),
)
self.t2_label_2.grid(row=2, column=0, padx=20, pady=2, columnspan=2)
#Footer
# Separator
self.separator = ttk.Separator(self)
self.separator.grid(row=10, column=0, padx=(20, 10), pady=10, sticky="ew" , columnspan=4)
self.footleft = ttk.Label(
self,
text="© 2021 Avalae Automation. ",
justify="center",
font=("-size", 8, "-weight", "normal"),
)
self.footleft.grid(row=11, column=0, padx=(20, 10), pady=10, sticky="ew" , columnspan=2)
self.footright = ttk.Label(
self,
text="By <NAME> ",
justify="right",
font=("-size", 8, "-weight", "normal"),
)
self.footright.grid(row=11, column=2, padx=(20, 10), pady=10, sticky="e" )
# Sizegrip
self.sizegrip = ttk.Sizegrip(self)
self.sizegrip.grid(row=100, column=100, padx=(0, 3), pady=(0, 3))
def onDoubleClick(self, event):
''' Executed, when a row is double-clicked. Opens
writeable string dialog above in the center of the treeview, so it is possible
to modify the text '''
# what row and column was clicked on
rowid = self.treeview.identify_row(event.y)
column = self.treeview.identify_column(event.x)
columnint = int(column.strip('#'))
# Store current row, text and values
text = self.treeview.item(rowid, 'text')
values = self.treeview.item(rowid, 'values')
# Open changebox dialog and set new data into treeview
if column == '#0':
self.entryPopup = simpledialog.askstring('Change Value', "Current Tag Name: " + text, parent=self.treeview)
if self.entryPopup != None:
self.treeview.delete(rowid)
self.treeview.insert('', index=(int(rowid)), iid=int(rowid), text=self.entryPopup , values=values)
elif column == '#1':
self.entryPopup = simpledialog.askstring('Change Value', "Current Value: " + values[0], parent=self.treeview)
if self.entryPopup != None:
values_new = (self.entryPopup, values[1])
self.treeview.delete(rowid)
self.treeview.insert('', index=(int(rowid)), iid=int(rowid), text=text, values=values_new)
elif column == '#2':
self.entryPopup = simpledialog.askstring('Change Value', "Current Data Type: " + values[1], parent=self.treeview)
if self.entryPopup != None:
values_new = (values[0], self.entryPopup)
self.treeview.delete(rowid)
self.treeview.insert('', index=(int(rowid)), iid=int(rowid), text=text, values=values_new)
def onRightClick(self, event):
self.event = event
self.popup = tk.Menu(self,tearoff=0)
self.popup.add_separator()
self.popup.add_command(label="Delete", command=self.onDelete)
self.popup.add_separator()
self.popup.add_command(label="Trend")
self.popup.add_separator()
try:
self.popup.tk_popup(event.x_root, event.y_root, 0)
except:
self.popup.grab_release()
finally:
self.popup.grab_release()
def onDelete(self):
# what row and column was clicked on
rowid = self.treeview.identify_row(self.event.y)
self.treeview.delete(rowid)
def plc_routine(self, initialize=False):
if initialize:
self.plc_initialize()
# Connection is Enabled
if self.var_3.get() == 1:
if self.flag != 1:
self.console.insert('Connection set: Enabled')
self.flag = 1
self.bottle_treeview(self.treedata)
try:
fail = False
self.plc_status = self.PLC.Write(self.treedata.write_data)
for i in self.plc_status:
if i.Status != 'Success':
fail = True
if fail:
self.console.insert('Data failed to send, Check configuration')
else:
self.console.insert('Data Write Successful')
except:
self.console.insert('Data failed to write, Check Data Types')
# Connection is Read-Only
elif self.var_3.get() == 2:
if self.flag != 2:
self.console.insert('Connection set: Read-Only')
self.flag = 2
self.bottle_treeview(self.treedata, readonly=True)
try:
fail = False
self.plc_status = self.PLC.Read(self.treedata.read_data)
self.update_treeview(self.plc_status)
for i in self.plc_status:
if i.Status != 'Success':
fail = True
if fail:
self.console.insert('Data failed to read, Check configuration')
else:
self.console.insert('Data Read Successful')
except:
self.console.insert('Data failed to send, Check Data Types')
# Connection is Disabled
elif self.var_3.get() == 3:
if self.flag != 3:
self.console.insert('Connection set: Disabled')
self.flag = 3
# LOOP CALL - After Delay call plc_routine again
root.after(self.plc_config["Refresh_Rate"], self.plc_routine)
def plc_initialize(self):
# Open Log file settings
try:
self.log_File = open("Log.txt", "a")
self.log_File.close
except:
self.console.insert('No existing file Creating - log.txt')
self.log_File = open("Log.txt", "a+") # Create a new file
self.log_File.close
# Open Configuration Settings and retrieve stored settings
try:
config_File = open("Config.txt", "r")
config_Settings = config_File.read()
config_File.close
config_Settings = ast.literal_eval(config_Settings)
# Update settings
self.var_0.set(config_Settings['IP_Address']) # Set IP Address
self.var_1.set(config_Settings['XML_Source']) # Set XML Filepath
self.var_2.set(config_Settings['Refresh_Rate']) # Set Refresh Rate
self.processor_entry.set(config_Settings['Processor_Slot']) # Processor Slot
self.var_5.set(config_Settings['Is_Emulator']) # Set Is Emulator
self.var_7.set(config_Settings['Is_Micro_800']) # Set Is Micro 800
except:
self.console.insert('No existing file Creating - Config.txt')
config_File = open("Config.txt", "a+") # Create a new file
config_File.close
finally:
self.PLC = pylogix.PLC()
self.save_changes(True)
self.console.insert('Configuring PLC Settings')
def save_changes(self, init=False):
# Get settings into PLC_Config dictionary
self.plc_config = {
"IP_Address" : self.var_0.get(),
"XML_Source" : self.var_1.get(),
"Refresh_Rate" : self.var_2.get(),
"Connection_Status" : self.var_3.get(),
"Processor_Slot" : int(self.processor_entry.get()) ,
"Is_Emulator" : self.var_5.get(),
"Is_Micro_800" : self.var_7.get(),
}
# Update PLC Setting from dictionary
self.PLC.IPAddress = self.plc_config['IP_Address']
self.PLC.ProcessorSlot = self.plc_config['Processor_Slot']
if self.plc_config['Is_Micro_800'] == 'Enabled':
self.PLC.Micro800 = True
else:
self.PLC.Micro800 = False
if self.plc_config['Is_Emulator'] == 'Enabled':
self.PLC.ConnectionSize = 504
else:
self.PLC.ConnectionSize = 4002
# Store settings as Config.txt
config_File = open("Config.txt", "w")
config_File.write(str(self.plc_config))
config_File.close
self.console.log(str(self.plc_config))
# SaveXMl
if not init:
self.write_to_xml(self.treedata.all_data)
self.console.insert("Changes Saved")
def discover(self):
try:
tags = self.PLC.GetTagList()
self.update_treeview(tags.Value, value_data=False)
except:
self.console.insert('Failed to discover tags - Check Settings')
try:
device = self.PLC.GetDeviceProperties().Value
self.console.insert('Discovered: '+ str(device.ProductName) + " v." + str(device.Revision))
except:
self.console.insert('Failed to discover device - Check Settings')
def xml_to_treeview(self):
# Open XML containing File
try:
treeview_data = []
data = []
tree = et.parse(self.var_1.get())
base = tree.getroot()
for j in range(len(base.findall('Tag'))):
for child in base[j]:
data.append(child.text)
treeview_data.append(('', j+1, data[-3], (data[-2], data[-1])))
except:
self.console.insert('Error opening - ' + self.var_1.get())
treeview_data = [("", 1, "Tag_1", ("Sample", "String"))] # Use this as a template dat unpacking
# Insert treeview data
for item in treeview_data:
self.treeview.insert(
parent=item[0], index="end", iid=item[1], text=item[2], values=item[3]
)
if item[0] == "" or item[1] in {8, 21}:
self.treeview.item(item[1], open=True) # Open parents
def update_treeview(self, read_data, value_data=True):
# Update Treeview with changes
for rowid in self.treeview.get_children():
self.treeview.delete(rowid) # Clear old tree
# Create new rows
scope = []
for data, newid in zip(read_data, range(len(read_data))):
if value_data:
if isinstance(data.Value, float):
data.Value = round(data.Value, 4)
data_type = "REAL"
elif isinstance(data.Value, str):
data_type = "STRING"
else:
data_type = "DINT"
self.treeview.insert('', index="end", iid=int(newid), text=data.TagName, values=(data.Value, data_type))
else:
if not data.DataType:
scope.append(data.TagName)
elif data.DataType in ["DINT", "STRING", "REAL", "INT", "BOOL"]:
self.treeview.insert('', index="end", iid=int(newid), text=data.TagName, values=(None, data.DataType))
if scope:
self.console.log('Programs Found: '+str(scope))
def bottle_treeview(self, tree_data, readonly=False):
# CLear tree_data
tree_data.clear()
values = []
# Iterate through treeview children iids
for x in self.treeview.get_children():
skip = False
# Retireves Value and Datatype from Treeview
for y in self.treeview.item(x,'values'):
tree_data.values.append(y)
# Retrieves Value from Treeview
if not readonly:
if tree_data.values[-1] == "STRING": # Builds the STRING, length is fixed at 25
tree_data.write_data.append(tuple([self.treeview.item(x,'text') + '.LEN', len(tree_data.values[-2])]))
values = [ord(c) for c in tree_data.values[-2]] + [0] * (25 - len(tree_data.values[-2]))
tree_data.write_data.append(tuple([self.treeview.item(x,'text') + '.DATA[0]', values]))
skip = True
elif tree_data.values[-1] == "REAL": # Builds the REAL
values = [float(tree_data.values[-2])]
else:
try:
values = [int(tree_data.values[-2])]
except ValueError:
return
if not skip: # Skips append when STRING is selected.
tree_data.write_data.append(tuple([self.treeview.item(x,'text'), values[0]]))
else:
tree_data.read_data.append(self.treeview.item(x,'text'))
# Build list of all treeview data
tree_data.all_data.append(tuple([self.treeview.item(x,'text'), tree_data.values[-2], tree_data.values[-1]]))
print(tree_data.write_data)
return tree_data
def write_to_xml(self, treeview_data):
# Create new base element
scope = et.Element('Scope')
tag = et.SubElement(scope, 'Tag')
Name = et.SubElement(tag, 'Name')
Value = et.SubElement(tag, 'Value')
DataType = et.SubElement(tag, 'DataType')
# Create blank XML the size of treeview data
for i in range(len(treeview_data)):
if i > 0:
scope.append(tag)
# Convert XML to pretty formating and overwrite old file
reparsed = minidom.parseString(et.tostring(scope, "utf-8"))
tree = et.ElementTree(et.fromstring(reparsed.toprettyxml(indent=" ")))
root = tree.getroot()
# Ammend tree with Treeview data
for data, iter in zip(treeview_data, range(len(root.findall('Tag')))):
root[iter][0].text = str(data[0])
root[iter][1].text = str(data[1])
root[iter][2].text = str(data[2])
# Write to XML
tree.write('Tag_values.xml', encoding="utf-8", xml_declaration=True)
class Console:
# Console FiFo
def __init__(self, size = 10):
self.items =[]
self.size = size
def is_empty(self):
return self.items == []
def insert(self, item):
now = datetime.datetime.now()
if app.console_last.get() != item :
self.items.insert(0, now.strftime('%d-%m-%Y %H:%M:%S - ') + item)
app.console_last.set(item)
self.log(self.items[0])
if len(self.items) > self.size:
self.out()
self.refresh()
def out(self):
return self.items.pop()
def get(self, item = None):
try:
if item >= 0:
return str(self.items[item])
else:
return str(self.items)
except:
return '.'
def refresh(self):
app.console_line_1.set("Line 1:" + self.get(0))
app.console_line_2.set("Line 2:" + self.get(1))
app.console_line_3.set("Line 3:" + self.get(2))
app.console_line_4.set("Line 4:" + self.get(3))
app.console_line_5.set("Line 5:" + self.get(4))
def log(self, text):
log_File = open("Log.txt", "a")
log_File.write(text + '\n')
log_File.close
class TreeData:
# TreeData Class for reading writing and saving
def __init__(self):
self.values = []
self.read_data = []
self.write_data = []
self.all_data =[]
def clear(self):
self.values.clear()
self.read_data.clear()
self.write_data.clear()
self.all_data.clear()
if __name__ == "__main__":
root = tk.Tk()
root.title("Pylogix - Tools")
# Simply set the theme
root.tk.call("source", "azure.tcl")
root.tk.call("set_theme", "dark")
app = App(root)
app.pack(fill="both", expand=True)
# Set a minsize for the window, and place it in the middle
root.update()
root.minsize(root.winfo_width(), root.winfo_height())
x_cordinate = int((root.winfo_screenwidth() / 2) - (root.winfo_width() / 2))
y_cordinate = int((root.winfo_screenheight() / 2) - (root.winfo_height() / 2))
root.geometry("+{}+{}".format(x_cordinate, y_cordinate-20))
# Deplay application
root.after(2000, app.plc_routine(True))
root.mainloop()
```
|
{
"source": "jeremylt/AlexaAutoDM",
"score": 3
}
|
#### File: jeremylt/AlexaAutoDM/autoDM.py
```python
from lambdaHandlers import alexa_lambda_handler, web_lambda_handler
# ------------------------------------------------------------------------------
# Initial handler
# ------------------------------------------------------------------------------
def lambda_handler(event, context):
""" Determine if Alexa or Web request and pass to correct handler """
# Check for web reqest
is_web_event = 'web' in event.keys()
if is_web_event:
return web_lambda_handler(event, context)
else:
return alexa_lambda_handler(event, context)
# ------------------------------------------------------------------------------
```
#### File: AlexaAutoDM/lambdaHandlers/utilities.py
```python
IS_DEBUG = False
def debug_print(*args):
""" Print if in debugging mode """
if (IS_DEBUG):
print(*args)
# ------------------------------------------------------------------------------
def is_error_string(return_value):
""" Check if return value is error message
>>> is_error_string(42)
False
>>> is_error_string("Some message")
False
>>> is_error_string("ERROR: some error message")
True
"""
return isinstance(return_value, str) and return_value.find("ERROR") != -1
# ------------------------------------------------------------------------------
def to_int(string):
""" Convert string to int
>>> to_int("42")
42
"""
try:
number = int(float(string))
except:
number = 0
return number
# ------------------------------------------------------------------------------
def to_float(string):
""" Convert string to float
>>> to_float("42.0")
42.0
"""
try:
number = float(string)
except:
number = 0.0
return number
# ------------------------------------------------------------------------------
def to_string(number):
""" Convert float/int to string
>>> to_string(13)
'13'
"""
return str(int(number))
# ------------------------------------------------------------------------------
```
|
{
"source": "jeremylt/freeCodeCampProjects",
"score": 3
}
|
#### File: freeCodeCampProjects/Scientific-Computing-with-Python/02-budget-app.py
```python
class Category:
def __init__(self, name):
self._name = name
self._balance = 0
self.ledger = []
def __str__(self):
output_string = self._name.center(30, "*")
for item in self.ledger:
output_string += "\n" + item["description"].ljust(23, " ")[0:23]
output_string += "{0:.2f}".format(item["amount"]).rjust(7, " ")
output_string += "\nTotal: " + "{:.2f}".format(self._balance)
return output_string
def get_balance(self):
return self._balance
def check_funds(self, amount):
return amount <= self.get_balance()
def deposit(self, amount, description=""):
self._balance += amount
self.ledger.append({"amount": 1.0 * amount, "description": description})
def withdraw(self, amount, description=""):
# only withdraw if adequate funds are present
if self.check_funds(amount):
self._balance -= amount
self.ledger.append({"amount": -1.0 * amount, "description": description})
return True
return False
def transfer(self, amount, category):
description = "Transfer to " + category._name
withdrawn = self.withdraw(amount, description)
if withdrawn:
description = "Transfer from " + self._name
category.deposit(amount, description)
return withdrawn
def create_spend_chart(categories):
category_info = []
total_spent = 0
# find amount spent in each category
for category in categories:
spent = 0
for item in category.ledger:
if item["amount"] < 0:
spent -= item["amount"]
total_spent += spent
category_info.append({"name": category._name, "spent": spent})
# calculate percentages
category_info = [
{"name": x["name"], "percent": x["spent"] * 100.0 / total_spent}
for x in category_info
]
# build output
output_string = "Percentage spent by category\n"
for percent in range(100, -1, -10):
output_string += str(percent).rjust(3, " ") + "| "
for category in category_info:
output_string += "o " if category["percent"] >= percent else " "
output_string += "\n"
output_string += " -" + ("---" * len(category_info))
longest_name_len = max([len(x["name"]) for x in category_info])
for i in range(0, longest_name_len):
output_string += "\n "
for category in category_info:
if i < len(category["name"]):
output_string += category["name"][i] + " "
else:
output_string += " "
return output_string
```
|
{
"source": "jeremy-ma/gmmmc",
"score": 3
}
|
#### File: gmmmc/proposals/gaussian_proposals.py
```python
import numpy as np
from gmmmc.gmm import GMM
from gmmmc.proposals.proposals import Proposal
import pdb
import logging
class GaussianStepMeansProposal(Proposal):
"""Gaussian Proposal distribution for means of a GMM"""
def __init__(self, step_sizes=(0.001,)):
"""
Gaussian proposal distribution for the means. The multivariate Gaussian is centered at the means of the current
state in the Markov Chain and has covariance given by step_sizes. Multiple step sizes can be specified.
The proposal algorithm will take these steps in the sequence specified in step_sizes.
Parameters
----------
step_sizes : 1-D array_like
Iterable containing the sequence of step sizes (covariances of the Gaussian proposal distribution"
"""
super(GaussianStepMeansProposal, self).__init__()
self.step_sizes = step_sizes
self.count_accepted = np.zeros((len(step_sizes),))
self.count_illegal = np.zeros((len(step_sizes),))
self.count_proposed = np.zeros((len(step_sizes),))
def propose(self, X, gmm, target, n_jobs=1):
"""
Propose a new set of GMM means.
Parameters
----------
X : 2-D array like of shape (n_samples, n_features)
The observed data or evidence.
gmm : GMM object
The current state (set of gmm parameters) in the Markov Chain
target : GMMPosteriorTarget object
The target distribution to be found.
n_jobs : int
Number of cpu cores to use in the calculation of log probabilities.
Returns
-------
: GMM
A new GMM object initialised with new mean parameters.
"""
new_means = np.array(gmm.means)
beta = target.beta
prior = target.prior
steps = [np.random.multivariate_normal(np.zeros(gmm.n_features),
step_size * np.eye(gmm.n_features),
size=gmm.n_mixtures)
for step_size in self.step_sizes]
# calculation of prior probabilities of only the means, since only means will change
log_priors = np.array([prior.means_prior.log_prob_single(gmm.means[mixture], mixture) for mixture in xrange(gmm.n_mixtures)])
log_prob_priors = np.sum(log_priors)
previous_prob = beta * gmm.log_likelihood(X, n_jobs) + np.sum(log_priors)
for i, step in enumerate(steps):
for mixture in xrange(gmm.n_mixtures):
self.count_proposed[i] += 1
# propose new means
new_mixture_means = gmm.means[mixture] + step[mixture]
# try out the new means
proposed_means = np.array(new_means)
proposed_means[mixture] = new_mixture_means
proposed_gmm = GMM(proposed_means, np.array(gmm.covars), np.array(gmm.weights))
# calculate new prior
new_log_prob_mixture = prior.means_prior.log_prob_single(new_mixture_means, mixture)
new_log_prob_priors = log_prob_priors - log_priors[mixture] + new_log_prob_mixture
# priors
proposed_prob = beta * proposed_gmm.log_likelihood(X, n_jobs) + new_log_prob_priors
# ratio
ratio = proposed_prob - previous_prob
if ratio > 0 or ratio > np.log(np.random.uniform()):
# accept proposal
new_means = proposed_means
previous_prob = proposed_prob
# update prior probability calculation
log_prob_priors = new_log_prob_priors
log_priors[mixture] = new_log_prob_mixture
self.count_accepted[i] += 1
return GMM(new_means, np.array(gmm.covars), np.array(gmm.weights))
class GaussianStepCovarProposal(Proposal):
def __init__(self, step_sizes=(0.001,)):
"""
Gaussian proposal function for the covariances of the GMM.
Parameters
----------
step_sizes : array_like
Array of covariance values for the Gaussian proposal.
"""
super(GaussianStepCovarProposal, self).__init__()
self.step_sizes = step_sizes
self.count_accepted = np.zeros((len(step_sizes),))
self.count_illegal = np.zeros((len(step_sizes),))
self.count_proposed = np.zeros((len(step_sizes),))
def propose(self, X, gmm, target, n_jobs=1):
"""
Propose a new set of GMM covariances (diagonal only).
Parameters
----------
X : 2-D array like of shape (n_samples, n_features)
The observed data or evidence.
gmm : GMM object
The current state (set of gmm parameters) in the Markov Chain
target : GMMPosteriorTarget object
The target distribution to be found.
n_jobs : int
Number of cpu cores to use in the calculation of log probabilities.
Returns
-------
: GMM
A new GMM object initialised with new covariance parameters.
"""
new_covars = np.array(gmm.covars)
beta = target.beta
prior = target.prior
previous_prob = beta * gmm.log_likelihood(X, n_jobs) + prior.log_prob(gmm)
steps = [np.random.multivariate_normal(np.zeros(gmm.n_features),
step_size * np.eye(gmm.n_features),
size=gmm.n_mixtures) for step_size in self.step_sizes]
log_priors = np.array([prior.means_prior.log_prob_single(gmm.means[mixture], mixture) for mixture in xrange(gmm.n_mixtures)])
log_prob_priors = np.sum(log_priors)
for i, step in enumerate(steps):
for mixture in xrange(gmm.n_mixtures):
self.count_proposed[i] += 1
# propose new covars
new_mixture_covars = gmm.covars[mixture] + step[mixture]
if (new_mixture_covars > 0).all(): # check covariances are valid
# try out the new covars
proposed_covars = np.array(new_covars)
proposed_covars[mixture] = new_mixture_covars
proposed_gmm = GMM(np.array(gmm.means), proposed_covars, np.array(gmm.weights))
# calculate desired distribution
new_log_prob_mixture = prior.covars_prior.log_prob_single(new_mixture_covars, mixture)
new_log_prob_priors = log_prob_priors - log_priors[mixture] + new_log_prob_mixture
proposed_prob = beta * proposed_gmm.log_likelihood(X, n_jobs) + new_log_prob_priors
# ratio
ratio = proposed_prob - previous_prob
if ratio > 0 or ratio > np.log(np.random.uniform()):
# accept proposal
new_covars = proposed_covars
previous_prob = proposed_prob
log_prob_priors = new_log_prob_priors
log_priors[mixture] = new_log_prob_mixture
self.count_accepted[i] += 1
else:
self.count_illegal[i] += 1
return GMM(np.array(gmm.means), np.array(new_covars), np.array(gmm.weights))
class GaussianStepWeightsProposal(Proposal):
def __init__(self, n_mixtures, step_sizes=(0.001,), threshold=0.001):
"""
Gaussian proposal function for the weights of a GMM.
Parameters
----------
n_mixtures
step_sizes
Notes
----------
The proposal function works by projecting the weight vector w onto the simplex defined by
w_1 + w_2 + ..... w_n = 1 , 0<=w_i<=1. The change of basis matrix is found by finding n-1 vectors lying on the plane
and using gramm schmidt to get an orthonormal basis. A Gaussian proposal function in (n-1)-d space is
used to find the next point on the simplex.
"""
super(GaussianStepWeightsProposal, self).__init__()
self.step_sizes = step_sizes
self.n_mixtures = n_mixtures
self.count_accepted = np.zeros((len(step_sizes),))
self.count_illegal = np.zeros((len(step_sizes),))
self.count_proposed = np.zeros((len(step_sizes),))
self.threshold = threshold
if n_mixtures > 1:
# get change of basis matrix mapping n dim coodinates to n-1 dim coordinates on simplex
# x1 + x2 + x3 ..... =1
points = np.random.dirichlet([1 for i in xrange(n_mixtures)], size=n_mixtures - 1)
points = points.T
self.plane_origin = np.ones((n_mixtures)) / float(n_mixtures)
# get vectors parallel to plane from its center (1/n,1/n,....)
parallel = points - np.ones(points.shape) / float(n_mixtures)
# do gramm schmidt to get mutually orthonormal vectors (basis)
self.e, _ = np.linalg.qr(parallel)
def transformSimplex(self, weights):
"""
Project weight vector onto the normal simplex.
Parameters
----------
weights : array_like of shape (n_mixtures,)
vector of weights for each gaussian component
Returns
-------
: array_like of shape (n_mixtures-1,)
vector of weights projected onto the simplex plane
"""
# project onto the simplex
return np.dot(self.e.T, weights - self.plane_origin)
def invTransformSimplex(self, simplex_coords):
"""
Transforms a point on the simplex to the original vector space.
Parameters
----------
simplex_coords : array_like of shape (n_mixtures - 1,)
Coordinates of a weight vector on the simplex.
Returns
-------
: array_like of shape(n_mixtures,)
vector of weights.
"""
return self.plane_origin + np.dot(self.e, simplex_coords)
def propose(self, X, gmm, target, n_jobs=1):
"""
Propose a new set of weight vectors.
Parameters
----------
X : 2-D array like of shape (n_samples, n_features)
The observed data or evidence.
gmm : GMM object
The current state (set of gmm parameters) in the Markov Chain
target : GMMPosteriorTarget object
The target distribution to be found.
n_jobs : int
Number of cpu cores to use in the calculation of log probabilities.
Returns
-------
: GMM
A new GMM object initialised with new covariance parameters.
"""
accepted = False
cur_gmm = gmm
if gmm.n_mixtures > 1:
for i, step_size in enumerate(self.step_sizes):
self.count_proposed[i] += 1
current_weights_transformed = self.transformSimplex(cur_gmm.weights)
proposed_weights_transformed = np.random.multivariate_normal(current_weights_transformed,
np.eye(self.n_mixtures - 1) * step_size)
proposed_weights = self.invTransformSimplex(proposed_weights_transformed)
if np.logical_and(0 <= proposed_weights, proposed_weights <= 1).all()\
and np.isclose(np.sum(proposed_weights), 1.0) and (proposed_weights>self.threshold).all():
previous_prob = target.log_prob(X, cur_gmm, n_jobs)
proposed_gmm = GMM(np.array(cur_gmm.means), np.array(cur_gmm.covars), proposed_weights)
proposed_prob = target.log_prob(X, proposed_gmm, n_jobs)
ratio = proposed_prob - previous_prob
if ratio > 0 or ratio > np.log(np.random.uniform()):
# accept proposal
self.count_accepted[i] += 1
accepted = True
cur_gmm = proposed_gmm
else:
self.count_illegal[i] += 1
if accepted is True:
return cur_gmm
else:
return GMM(np.array(gmm.means), np.array(gmm.covars), np.array(gmm.weights))
class GaussianTuningStepMeansProposal(Proposal):
"""Gaussian Proposal distribution for means of a GMM"""
def __init__(self, step_sizes=(0.001,), limit=200):
"""
Gaussian proposal distribution for the means. The multivariate Gaussian is centered at the means of the current
state in the Markov Chain and has covariance given by step_sizes. Multiple step sizes can be specified.
The proposal algorithm will take these steps in the sequence specified in step_sizes.
Parameters
----------
step_sizes : 1-D array_like
Iterable containing the sequence of step sizes (covariances of the Gaussian proposal distribution"
"""
super(GaussianTuningStepMeansProposal, self).__init__()
self.limit = limit
self.count_steps = 0
self.count_acceptance_bucket = np.zeros((len(step_sizes),))
self.record = []
self.step_sizes = step_sizes
self.count_accepted = np.zeros((len(step_sizes),))
self.count_illegal = np.zeros((len(step_sizes),))
self.count_proposed = np.zeros((len(step_sizes),))
def propose(self, X, gmm, target, n_jobs=1):
"""
Propose a new set of GMM means.
Parameters
----------
X : 2-D array like of shape (n_samples, n_features)
The observed data or evidence.
gmm : GMM object
The current state (set of gmm parameters) in the Markov Chain
target : GMMPosteriorTarget object
The target distribution to be found.
n_jobs : int
Number of cpu cores to use in the calculation of log probabilities.
Returns
-------
: GMM
A new GMM object initialised with new mean parameters.
"""
new_means = np.array(gmm.means)
beta = target.beta
prior = target.prior
steps = [np.random.multivariate_normal(np.zeros(gmm.n_features),
step_size * np.eye(gmm.n_features),
size=gmm.n_mixtures)
for step_size in self.step_sizes]
# calculation of prior probabilities of only the means, since only means will change
log_priors = np.array([prior.means_prior.log_prob_single(gmm.means[mixture], mixture) for mixture in xrange(gmm.n_mixtures)])
log_prob_priors = np.sum(log_priors)
previous_prob = beta * gmm.log_likelihood(X, n_jobs) + np.sum(log_priors)
for i, step in enumerate(steps):
for mixture in xrange(gmm.n_mixtures):
self.count_proposed[i] += 1
self.count_steps += 1
# propose new means
new_mixture_means = gmm.means[mixture] + step[mixture]
# try out the new means
proposed_means = np.array(new_means)
proposed_means[mixture] = new_mixture_means
proposed_gmm = GMM(proposed_means, np.array(gmm.covars), np.array(gmm.weights))
# calculate new prior
new_log_prob_mixture = prior.means_prior.log_prob_single(new_mixture_means, mixture)
new_log_prob_priors = log_prob_priors - log_priors[mixture] + new_log_prob_mixture
# priors
proposed_prob = beta * proposed_gmm.log_likelihood(X, n_jobs) + new_log_prob_priors
# ratio
ratio = proposed_prob - previous_prob
if ratio > 0 or ratio > np.log(np.random.uniform()):
# accept proposal
new_means = proposed_means
previous_prob = proposed_prob
# update prior probability calculation
log_prob_priors = new_log_prob_priors
log_priors[mixture] = new_log_prob_mixture
self.count_accepted[i] += 1
self.count_acceptance_bucket[i] += 1
if self.count_steps > self.limit:
for i, acceptances in enumerate(self.count_acceptance_bucket):
acceptance_ratio = acceptances / self.count_steps
if acceptance_ratio > 0.46:
self.step_sizes[i] = self.step_sizes[i] * 2
elif acceptance_ratio < 0.11:
self.step_sizes[i] = self.step_sizes[i] / 2
logging.info("Acceptance Rates: {0}".format(self.count_acceptance_bucket / self.count_steps))
self.record.append(self.count_acceptance_bucket / self.count_steps)
self.count_steps = 0
self.count_acceptance_bucket = np.zeros((len(self.step_sizes),))
return GMM(new_means, np.array(gmm.covars), np.array(gmm.weights))
```
|
{
"source": "jeremymanning/davos",
"score": 3
}
|
#### File: davos/core/exceptions.py
```python
__all__ = [
'DavosError',
'DavosConfigError',
'DavosParserError',
'OnionParserError',
'OnionArgumentError',
'ParserNotImplementedError',
'SmugglerError',
'InstallerError'
]
from argparse import ArgumentError
from shutil import get_terminal_size
from subprocess import CalledProcessError
from textwrap import fill, indent
import IPython
class DavosError(Exception):
"""Base class for all `davos` library exceptions."""
class DavosConfigError(DavosError):
"""Class for errors related to the `davos.config` object."""
def __init__(self, field, msg):
"""
Parameters
----------
field : str
The config field about which the exception should be raised
msg : str
The specific error message
"""
self.field = field
self.msg = msg
super().__init__(f"'davos.config.{field}': {msg}")
class DavosParserError(SyntaxError, DavosError):
"""
Base class for errors raised during the pre-execution parsing phase.
Any `davos` exception classes related to user input-parsing step
must inherit from this class in order to work in IPython
environments.
Notes
-----
Since the raw cell contents haven't been passed to the Python
compiler yet, IPython doesn't allow input transformers to raise any
exceptions other than `SyntaxError` during the input transformation
phase. All others will cause the cell to hang indefinitely, meaning
all `davos` library exception classes related to the parsing phrase
must inherit from `SyntaxError` in order to work.
"""
def __init__(
self,
msg=None,
target_text=None,
target_offset=1
):
"""
Parameters
----------
msg : str, optional
The error message to be displayed.
target_text : str, optional
The text of the code responsible for the error, to be
displayed in Python's `SyntaxError`-specific traceback
format. Typically, this is the full text of the line on
which the error occurred, with whitespace stripped. If
`None` (default), no text is shown and `target_offset` has
no effect.
target_offset : int, optional
The (*1-indexed*) column offset from the beginning of
`target_text` where the error occurred. Defaults to `1` (the
first character in `target_text`).
"""
# note: flot is a 4-tuple of (filename, lineno, offset, text)
# passed to the SyntaxError constructor.
if target_text is None or IPython.version_info[0] >= 7:
flot = (None, None, None, None)
else:
from davos import config
xform_manager = config.ipython_shell.input_transformer_manager
# number of "real" lines in the current cell before the
# start of the current "chunk" (potentially multi-line
# python statement) being parsed
n_prev_lines = len(xform_manager.source.splitlines())
all_cell_lines = xform_manager.source_raw.splitlines()
# remaining cell lines starting with the beginning of the
# current "chunk" (no way to isolate just the current chunk)
rest_cell_lines = all_cell_lines[n_prev_lines:]
for ix, line in enumerate(rest_cell_lines, start=1):
if target_text in line:
lineno = n_prev_lines + ix
offset = line.index(target_text) + target_offset
break
else:
offset = None
lineno = None
# leave lineno as None so IPython will fill it in as
# "<ipython-input-...>"
flot = (None, lineno, offset, target_text)
super().__init__(msg, flot)
class OnionParserError(DavosParserError):
"""Class for errors related to parsing the onion comment syntax."""
class OnionArgumentError(ArgumentError, OnionParserError):
"""
Class for errors related to arguments provided via an onion comment.
This exception class inherits from both `OnionParserError` and
`argparse.ArgumentError`. It functions as an onion comment-specific
analog of `argparse.ArgumentError`, whose key distinction is that
it can be raised during IPython's pre-execution phase (due to
inheriting from `DavosParserError`). Instances of this exception can
be expected to support attributes defined on both parents.
"""
def __init__(self, msg, argument=None, onion_txt=None):
"""
Parameters
----------
msg : str
The error message to be displayed.
argument : str, optional
The argument responsible for the error. if `None` (default),
determines the argument name from `msg`, which will be the
error message from an `argparse.ArgumentError` instance.
onion_txt : str, optional
The text of the installer arguments from onion comment in
which the error occurred (i.e., the full onion comment with
`# <installer>:` removed). If `None` (default), the error
will not be displayed in Python's `SyntaxError`-specific
traceback format.
"""
if (
msg is not None and
argument is None and
msg.startswith('argument ')
):
split_msg = msg.split()
argument = split_msg[1].rstrip(':')
msg = ' '.join(split_msg[2:])
if (onion_txt is not None) and (argument is not None):
# default sorting is alphabetical where '--a' comes before
# '-a', so long option name will always be checked first,
# which is what we want
for aname in sorted(argument.split('/')):
if aname in onion_txt:
target_offset = onion_txt.index(aname)
break
else:
target_offset = 0
else:
target_offset = 0
# both `argparse.ArgumentError` and `SyntaxError` are
# non-cooperative, so need to initialize them separately rather
# than just running through the MRO via a call to super()
ArgumentError.__init__(self, argument=None, message=msg)
OnionParserError.__init__(self, msg=msg, target_text=onion_txt,
target_offset=target_offset)
self.argument_name = argument
class ParserNotImplementedError(OnionParserError, NotImplementedError):
"""
Class for errors related to yet-to-be-implemented onion parsers.
This exception is an onion comment-specific subclass of the built-in
`NotImplementedError` that also inherits from `OnionParserError`,
allowing it to be raised during IPython's pre-execution phase (due
to inheriting from `DavosParserError`). This error is specifically
raised when a user specifies an installer program (via an onion
comment) whose command line parser has not yet been added to `davos`
"""
class SmugglerError(DavosError):
"""Base class for errors raised during the smuggle phase"""
class TheNightIsDarkAndFullOfTerrors(SmugglerError):
"""A little Easter egg for if someone tries to `smuggle davos`"""
class InstallerError(SmugglerError, CalledProcessError):
"""
Class for errors related to the installer program
This exception is raised when the installer program itself (rather
than `davos`) encounters an error (e.g., failure to connect to
upstream package repository, find package with a given name, resolve
local environment, etc.).
"""
@classmethod
def from_error(cls, cpe, show_output=None):
"""
Create a class instance from a `subprocess.CalledProcessError`
Parameters
----------
cpe : subprocess.CalledProcessError
The exception from which to create the `InstallerError`
show_output : bool, optional
Whether or not to include the failed command's stdout and/or
stderr in the error message. If `None` (default),
stdout/stderr will be displayed if the `suppress_stdout`
field of `davos.config` is currently set to `True` (i.e.,
stdout would have been suppressed during execution).
Returns
-------
InstallerError
The exception instance.
"""
if not isinstance(cpe, CalledProcessError):
raise TypeError(
"InstallerError.from_error() requires a "
f"'subprocess.CalledProcessError' instance, not a {type(cpe)}"
)
return cls(returncode=cpe.returncode,
cmd=cpe.cmd,
output=cpe.output,
stderr=cpe.stderr,
show_output=show_output)
def __init__(
self,
returncode,
cmd,
output=None,
stderr=None,
show_output=None
):
"""
Parameters
----------
returncode : int
Return code for the failed command.
cmd : str
Text of the failed command.
output : str, optional
stdout generated from executing `cmd`.
stderr : str, optional
stderr generated from executing `cmd`.
show_output : bool, optional
Whether or not to include the failed command's stdout and/or
stderr in the error message. If `None` (default),
stdout/stderr will be displayed if the `suppress_stdout`
field of `davos.config` is currently set to `True` (i.e.,
stdout would have been suppressed during execution).
"""
super().__init__(returncode=returncode, cmd=cmd, output=output,
stderr=stderr)
if show_output is None:
from davos import config
# if stdout from installer command that raised error was
# suppressed, include it in the error message
self.show_output = config.suppress_stdout
else:
self.show_output = show_output
def __str__(self):
msg = super().__str__()
if self.show_output and (self.output or self.stderr):
msg = f"{msg} See below for details."
textwidth = min(get_terminal_size().columns, 85)
if self.output:
text = fill(self.output, textwidth, replace_whitespace=False)
msg = f"{msg}\n\nstdout:\n{indent(text, ' ')}"
if self.stderr:
text = fill(self.stderr, textwidth, replace_whitespace=False)
msg = f"{msg}\n\nstderr:\n{indent(text, ' ')}"
return msg
```
#### File: davos/core/parsers.py
```python
__all__ = ['EditableAction', 'OnionParser', 'pip_parser', 'SubtractAction']
import sys
from argparse import (
Action,
ArgumentError,
ArgumentTypeError,
ArgumentParser,
SUPPRESS
)
from textwrap import dedent, indent
from davos.core.exceptions import OnionArgumentError
class OnionParser(ArgumentParser):
"""
`argparse.ArgumentParser` subclass for parsing Onion comments.
This class is essentially used to reimplement the installer
programs' existing command line parsers with some slight tweaks
specific to parsing Onion comments during the notebook's pre-cell
execution phase. Since the arguments are eventually passed to the
actual installer's parser, this is, admittedly, slightly redundant.
However, it affords a number of advantages, such as:
- protecting against shell injections (e.g., `# pip: --upgrade
numpy && rm -rf /` fails due to the OnionParser, but would
otherwise execute successfully)
- detecting errors quickly, before spawning a subprocesses
- allowing `davos` to adopt per-smuggle-command behaviors based
on user-provided arguments (e.g., `--force-reinstall`,
`-I/--ignore-installed`, `--no-input`, `-v/--verbose`, etc.)
See Also
--------
argparse.ArgumentParser :
https://docs.python.org/3/library/argparse.html#argumentparser-objects
Notes
-----
Currently supported arguments are based on `pip` v21.1.3, regardless
of the user's local `pip` version. However, the vast majority of
arguments are consistent across versions.
"""
# pylint: disable=signature-differs
def parse_args(self, args, namespace=None):
"""
Parse installer options as command line arguments.
Parameters
----------
args : list of str
Command line arguments for the installer program specified
in an Onion comment, split into a list of strings.
namespace : object, optional
An object whose namespace should be populated with the
parsed arguments. If `None` (default), creates a new, empty
`argparse.Namespace` object.
Returns
-------
object
The populated object passed as `namespace`
"""
# pylint: disable=attribute-defined-outside-init
self._args = ' '.join(args)
try:
ns, extras = super().parse_known_args(args=args,
namespace=namespace)
except (ArgumentError, ArgumentTypeError) as e:
if isinstance(e, OnionArgumentError):
raise
if isinstance(e, ArgumentError):
raise OnionArgumentError(msg=e.message,
argument=e.argument_name,
onion_txt=self._args) from None
raise OnionArgumentError(msg=e.args[0],
onion_txt=self._args) from None
else:
if extras:
msg = f"Unrecognized arguments: {' '.join(extras)}"
raise OnionArgumentError(msg=msg,
argument=extras[0],
onion_txt=self._args)
return ns
finally:
# noinspection PyAttributeOutsideInit
self._args = None
def error(self, message):
"""
Raise an OnionArgumentError with a given message.
This is needed to override `argparse.ArgumentParser.error()`.
`argparse` is which exits the program when called (in response
to an exception being raised) because it is generally intended
for command line interfaces. The generally idea is to affect the
stack trace displayed for the user as little as possible, while
also ensuring all exceptions raised inherit from `SyntaxError`
so they can be successfully raised during the notebook cell
pre-execution step.
Parameters
----------
message : str
The error message to be displayed for the raised exception.
"""
if sys.exc_info()[1] is not None:
raise # pylint: disable=misplaced-bare-raise
if message == 'one of the arguments -e/--editable is required':
message = 'Onion comment must specify a package name'
raise OnionArgumentError(msg=message, onion_txt=self._args)
class EditableAction(Action):
"""
`argparse.Action` subclass for pip's `-e/--editable <path/url>` arg.
The `-e/--editable` argument is implemented in a slightly unusual
way in order to allow the OnionParser to mimic the behavior of the
real `pip install` command's far more complex parser. The argument
is optional (default: `False`), and is mutually exclusive with the
positional `spec` argument. If provided, it accepts/requires a
single value (metavar: `'<path/url>'`). However, rather than storing
the value in the `argparse.Namespace` object's "editable" attribute,
the EditableAction stores `editable=True` and assigns the passed
value to "`spec`". This way:
1. As with the real `pip install` command, the project path/url
for an editable install must immediately follow the
`-e/--editable` argument (i.e., `/some/file/path -e` is
invalid)
2. The parser accepts only a single regular or editable package
spec (since an Onion comment can only ever refer to a single
package).
3. After parsing, argument values and types are consistent and
easier to handle: `spec` will always be the package spec and
`editable` will always be a `bool`.
"""
# noinspection PyShadowingBuiltins
def __init__(
self,
option_strings,
dest,
default=None,
metavar=None,
help=None
):
"""
Parameters
----------
option_strings : sequence of str
Command line option strings which should be associated with
this action.
dest : str
The name of the attribute to hold the created object(s).
default : multiple types, optional
The value to be produced if the option is not specified
(default: `None`).
metavar : str, optional
The name to be used for the option's argument with the help
string. If `None` (default), the `dest` value will be used
as the name.
help : str, optional
The help string describing the argument.
Notes
-----
`argparse.Action` subclass constructors must take `dest` as a
positional argument, but `self.dest` will always be `'editable'`
for EditableAction instances.
"""
super().__init__(option_strings, dest, default=default,
metavar=metavar, help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, 'editable', True)
setattr(namespace, 'spec', values)
class SubtractAction(Action):
"""
`argparse.Action` subclass for subtracting from an attribute.
This action functions as the inverse of the `argparse` module's
built-in `'count'` action (`argparse._CountAction`). For each
occurrence of a given keyword argument, it *subtracts* 1 from the
specified namespace attribute (`dest`). Generally, it can be used to
implement an argument whose presence negates that of a different
argument. For example, the `pip install` command accepts both
`-v`/`--verbose` and `-q`/`--quiet` options, which have opposite
effects on the stdout generated during installation, and both of
which may be passed multiple times. Assigning the `'count'` action
to `-v`/`--verbose`, the `SubtractAction` to `-q`/`--quiet`, and a
common `dest` to both of the two allows the ultimate verbosity to be
the net value of the two.
"""
# noinspection PyShadowingBuiltins
def __init__(
self,
option_strings,
dest,
default=None,
required=False,
help=None
):
"""
Parameters
----------
option_strings : sequence of str
Command-line option strings which should be associated with
this action.
dest : str
The name of the attribute to subtract from.
default : multiple types, optional
The value to be produced if the option is not specified
(default: `None`).
required : bool, optional
Whether the option must be specified at the command line
(default: `False`).
help : str, optional
The help string describing the argument.
"""
super().__init__(option_strings=option_strings, dest=dest, nargs=0,
default=default, required=required, help=help)
def __call__(self, parser, namespace, values, option_string=None):
curr_count = getattr(namespace, self.dest, 0)
setattr(namespace, self.dest, curr_count - 1)
# does not include usage for `pip install [options] -r
# <requirements file> [package-index-options] ...` since it's not
# relevant to `davos` usage
_pip_install_usage = indent( # noqa: E124 pylint: disable=invalid-name
dedent("""\
pip install [options] <requirement specifier> [package-index-options] ...
pip install [options] [-e] <vcs project url> ...
pip install [options] [-e] <local project path> ...
pip install [options] <archive url/path> ..."""
), ' '
)
pip_parser = OnionParser(usage=_pip_install_usage,
add_help=False,
argument_default=SUPPRESS)
# ======== Install Options ========
pip_install_opts = pip_parser.add_argument_group(title="Install Options")
pip_install_opts.add_argument(
'-c',
'--constraint',
metavar='<file>',
help="Constrain versions using the given constraints file. This option "
"can be used multiple times."
)
pip_install_opts.add_argument(
'--no-deps',
action='store_true',
help="Don't install package dependencies."
)
pip_install_opts.add_argument(
'--pre',
action='store_true',
help="Include pre-release and development versions. By default, pip only "
"finds stable versions."
)
spec_or_editable = pip_install_opts.add_mutually_exclusive_group(required=True)
spec_or_editable.add_argument('spec', nargs='?', help=SUPPRESS)
spec_or_editable.add_argument(
'-e',
'--editable',
action=EditableAction,
default=False,
metavar='<path/url>',
help='Install a project in editable mode (i.e. setuptools "develop mode") '
'from a local project path or a VCS url.'
)
pip_install_opts.add_argument(
'-t',
'--target',
metavar='<dir>',
help="Install packages into <dir>. By default this will not replace "
"existing files/folders in <dir>. Use --upgrade to replace existing "
"packages in <dir> with new versions."
)
pip_install_opts.add_argument(
'--platform',
metavar='<platform>',
help="Only use wheels compatible with <platform>. Defaults to the "
"platform of the running system. Use this option multiple times to "
"specify multiple platforms supported by the target interpreter."
)
pip_install_opts.add_argument(
'--python-version',
metavar='<python_version>',
help='The Python interpreter version to use for wheel and '
'"Requires-Python" compatibility checks. Defaults to a version '
'derived from the running interpreter. The version can be specified '
'using up to three dot-separated integers (e.g. "3" for 3.0.0, "3.7" '
'for 3.7.0, or "3.7.3"). A major-minor version can also be given as '
'a string without dots (e.g. "37" for 3.7.0).'
)
pip_install_opts.add_argument(
'--implementation',
metavar='<implementation>',
help="Only use wheels compatible with Python implementation "
"<implementation>, e.g. 'pp', 'jy', 'cp', or 'ip'. If not specified, "
"then the current interpreter implementation is used. Use 'py' to "
"force implementation-agnostic wheels."
)
pip_install_opts.add_argument(
'--abi',
metavar='<abi>',
help="Only use wheels compatible with Python abi <abi>, e.g. 'pypy_41'. "
"If not specified, then the current interpreter abi tag is used. Use "
"this option multiple times to specify multiple abis supported by "
"the target interpreter. Generally you will need to specify "
"--implementation, --platform, and --python-version when using this "
"option."
)
pip_install_opts.add_argument(
'--user',
action='store_true',
help="Install to the Python user install directory for your platform. "
"Typically ~/.local/, or %%APPDATA%%Python on Windows. (See the Python "
"documentation for site.USER_BASE for full details.)"
)
pip_install_opts.add_argument(
'--root',
metavar='<dir>',
help="Install everything relative to this alternate root directory."
)
pip_install_opts.add_argument(
'--prefix',
metavar='<dir>',
help="Installation prefix where lib, bin and other top-level folders are "
"placed"
)
pip_install_opts.add_argument(
'--src',
metavar='<dir>',
help='Directory to check out editable projects into. The default in a '
'virtualenv is "<venv path>/src". The default for global installs is '
'"<current dir>/src".'
)
pip_install_opts.add_argument(
'-U',
'--upgrade',
action='store_true',
help="Upgrade all specified packages to the newest available version. The "
"handling of dependencies depends on the upgrade-strategy used."
)
pip_install_opts.add_argument(
'--upgrade-strategy',
metavar='<upgrade_strategy>',
help='Determines how dependency upgrading should be handled [default: '
'only-if-needed]. "eager" - dependencies are upgraded regardless of '
'whether the currently installed version satisfies the requirements '
'of the upgraded package(s). "only-if-needed" - are upgraded only '
'when they do not satisfy the requirements of the upgraded '
'package(s).'
)
pip_install_opts.add_argument(
'--force-reinstall',
action='store_true',
help="Reinstall all packages even if they are already up-to-date."
)
pip_install_opts.add_argument(
'-I',
'--ignore-installed',
action='store_true',
help="Ignore the installed packages, overwriting them. This can break "
"your system if the existing package is of a different version or "
"was installed with a different package manager!"
)
pip_install_opts.add_argument(
'--ignore-requires-python',
action='store_true',
help="Ignore the Requires-Python information."
)
pip_install_opts.add_argument(
'--no-build-isolation',
action='store_true',
help="Disable isolation when building a modern source distribution. Build "
"dependencies specified by PEP 518 must be already installed if this "
"option is used."
)
pep_517_subgroup = pip_install_opts.add_mutually_exclusive_group()
pep_517_subgroup.add_argument(
'--use-pep517',
action='store_true',
help="Use PEP 517 for building source distributions (use --no-use-pep517 "
"to force legacy behaviour)."
)
pep_517_subgroup.add_argument(
'--no-use-pep517',
action='store_true',
help=SUPPRESS
)
pip_install_opts.add_argument(
'--install-option',
action='append',
metavar='<options>',
help='Extra arguments to be supplied to the setup.py install command (use '
'like --install-option="--install-scripts=/usr/local/bin"). Use '
'multiple --install-option options to pass multiple options to '
'setup.py install. If you are using an option with a directory path, '
'be sure to use absolute path.'
)
pip_install_opts.add_argument(
'--global-option',
action='append',
metavar='<options>',
help="Extra global options to be supplied to the setup.py call before the "
"install command."
)
compile_subgroup = pip_install_opts.add_mutually_exclusive_group()
compile_subgroup.add_argument(
'--compile',
action='store_true',
help="Compile Python source files to bytecode"
)
compile_subgroup.add_argument(
'--no-compile',
action='store_true',
help="Do not compile Python source files to bytecode"
)
pip_install_opts.add_argument(
'--no-warn-script-location',
action='store_true',
help="Do not warn when installing scripts outside PATH"
)
pip_install_opts.add_argument(
'--no-warn-conflicts',
action='store_true',
help="Do not warn about broken dependencies"
)
# NOTE: in the actual pip-install implementation, `--no-binary`,
# `--only-binary`, and `--prefer-binary` triggers a fairly complex
# callback. But fortunately, we can just store all invocations and
# forward them to the real pip-install parser
pip_install_opts.add_argument(
'--no-binary',
action='append',
metavar='<format_control>',
help='Do not use binary packages. Can be supplied multiple times, and '
'each time adds to the existing value. Accepts either ":all:" to '
'disable all binary packages, ":none:" to empty the set (notice the '
'colons), or one or more package names with commas between them (no '
'colons). Note that some packages are tricky to compile and may fail '
'to install when this option is used on them.'
)
pip_install_opts.add_argument(
'--only-binary',
action='append',
metavar='<format_control>',
help='Do not use source packages. Can be supplied multiple times, and '
'each time adds to the existing value. Accepts either ":all:" to '
'disable all source packages, ":none:" to empty the set, or one or '
'more package names with commas between them. Packages without '
'binary distributions will fail to install when this option is used '
'on them.'
)
pip_install_opts.add_argument(
'--prefer-binary',
action='store_true',
help="Prefer older binary packages over newer source packages."
)
pip_install_opts.add_argument(
'--require-hashes',
action='store_true',
help="Require a hash to check each requirement against, for repeatable "
"installs. This option is implied when any package in a requirements "
"file has a --hash option."
)
pip_install_opts.add_argument(
'--progress-bar',
choices=('off', 'on', 'ascii', 'pretty', 'emoji'),
metavar='<progress_bar>',
help="Specify type of progress to be displayed "
"[off|on|ascii|pretty|emoji] (default: on)"
)
pip_install_opts.add_argument(
'--no-clean',
action='store_true',
help="Don’t clean up build directories."
)
# ======== Package Index Options ========
pip_index_opts = pip_parser.add_argument_group(title="Package Index Options")
pip_index_opts.add_argument(
'-i',
'--index-url',
metavar='<url>',
help="Base URL of the Python Package Index (default "
"https://pypi.org/simple). This should point to a repository "
"compliant with PEP 503 (the simple repository API) or a local "
"directory laid out in the same format."
)
pip_index_opts.add_argument(
'--extra-index-url',
action='append',
metavar='<url>',
help="Extra URLs of package indexes to use in addition to --index-url. "
"Should follow the same rules as --index-url."
)
pip_index_opts.add_argument(
'--no-index',
action='store_true',
help="Ignore package index (only looking at --find-links URLs instead)."
)
pip_index_opts.add_argument(
'-f',
'--find-links',
action='append',
metavar='<url>',
help="If a URL or path to an html file, then parse for links to archives "
"such as sdist (.tar.gz) or wheel (.whl) files. If a local path or "
"file:// URL that’s a directory, then look for archives in the "
"directory listing. Links to VCS project URLs are not supported."
)
# ======== General Options ========
pip_general_opts = pip_parser.add_argument_group(title='General Options')
pip_general_opts.add_argument(
'--isolated',
action='store_true',
help="Run pip in an isolated mode, ignoring environment variables and "
"user configuration."
)
# verbose and quiet should theoretically be mutally exclusive, but pip
# itself doesn't seem to implement them as such, so not worth doing so
# here
pip_general_opts.add_argument(
'-v',
'--verbose',
action='count',
dest='verbosity',
help="Give more output. Option is additive, and can be used up to 3 times."
)
pip_general_opts.add_argument(
'-q',
'--quiet',
action=SubtractAction,
dest='verbosity',
help="Give less output. Option is additive, and can be used up to 3 times "
"(corresponding to WARNING, ERROR, and CRITICAL logging levels)."
)
pip_general_opts.add_argument(
'--log',
metavar='<path>',
help="Path to a verbose appending log."
)
pip_general_opts.add_argument(
'--no-input',
action='store_true',
help="Disable prompting for input."
)
pip_general_opts.add_argument(
'--retries',
type=int,
metavar='<retries>',
help="Maximum number of retries each connection should attempt (default 5 "
"times)."
)
pip_general_opts.add_argument(
'--timeout',
type=float,
metavar='<sec>',
help="Set the socket timeout (default 15 seconds)."
)
pip_general_opts.add_argument(
'--exists-action',
choices=(
's',
'switch',
'i',
'ignore',
'w',
'wipe',
'b',
'backup',
'a',
'abort'
),
metavar='<action>',
help="Default action when a path already exists: (s)witch, (i)gnore, "
"(w)ipe, (b)ackup, (a)bort."
)
pip_general_opts.add_argument(
'--trusted-host',
metavar='<hostname>',
help="Mark this host or host:port pair as trusted, even though it does "
"not have valid or any HTTPS."
)
pip_general_opts.add_argument(
'--cert',
metavar='<path>',
help="Path to alternate CA bundle."
)
pip_general_opts.add_argument(
'--client-cert',
metavar='<path>',
help="Path to SSL client certificate, a single file containing the "
"private key and the certificate in PEM format."
)
cache_dir_subgroup = pip_general_opts.add_mutually_exclusive_group()
cache_dir_subgroup.add_argument(
'--cache-dir',
metavar='<dir>',
help="Store the cache data in <dir>."
)
cache_dir_subgroup.add_argument(
'--no-cache-dir',
action='store_true',
help="Disable the cache."
)
pip_general_opts.add_argument(
'--disable-pip-version-check',
action='store_true',
help="Don't periodically check PyPI to determine whether a new version of "
"pip is available for download. Implied with --no-index."
)
pip_general_opts.add_argument(
'--no-color',
action='store_true',
help="Suppress colored output."
)
pip_general_opts.add_argument(
'--no-python-version-warning',
action='store_true',
help="Silence deprecation warnings for upcoming unsupported Pythons."
)
pip_general_opts.add_argument(
'--use-feature',
metavar='<feature>',
help="Enable new functionality, that may be backward incompatible."
)
pip_general_opts.add_argument(
'--use-deprecated',
metavar='<feature>',
help="Enable deprecated functionality, that will be removed in the future."
)
```
#### File: davos/implementations/__init__.py
```python
__all__ = [
'auto_restart_rerun',
'full_parser',
'generate_parser_func',
'prompt_restart_rerun_buttons'
]
from davos import config
from davos.core.config import DavosConfig
from davos.core.exceptions import DavosConfigError
import_environment = config.environment
if import_environment == 'Python':
# noinspection PyUnresolvedReferences
from davos.implementations.python import (
_activate_helper,
_check_conda_avail_helper,
_deactivate_helper,
_run_shell_command_helper,
auto_restart_rerun,
generate_parser_func,
prompt_restart_rerun_buttons
)
else:
# noinspection PyUnresolvedReferences
from davos.implementations.ipython_common import (
_check_conda_avail_helper,
_run_shell_command_helper,
_set_custom_showsyntaxerror
)
_set_custom_showsyntaxerror()
if import_environment == 'IPython>=7.0':
from davos.implementations.ipython_post7 import (
_activate_helper,
_deactivate_helper,
generate_parser_func
)
from davos.implementations.jupyter import (
auto_restart_rerun,
prompt_restart_rerun_buttons
)
else:
from davos.implementations.ipython_pre7 import (
_activate_helper,
_deactivate_helper,
generate_parser_func
)
if import_environment == 'Colaboratory':
from davos.implementations.colab import (
auto_restart_rerun,
prompt_restart_rerun_buttons
)
else:
from davos.implementations.jupyter import (
auto_restart_rerun,
prompt_restart_rerun_buttons
)
from davos.core.core import check_conda, smuggle, parse_line
# Implementation-specific wrapper around davos.core.core.parse_line
full_parser = generate_parser_func(parse_line)
########################################
# ADDITIONAL DAVOS.CONFIG PROPERTIES #
########################################
# some properties are added to the davos.core.config.DavosConfig class
# here rather than when it's initially defined, because they depend on
# either:
# 1. an implementation-specific function that hasn't been determined
# yet, or
# 2. a function defined in the `davos.core.core` module (namely,
# `check_conda`) which needs to be imported after
# implementation-specific functions are set here.
# pylint: disable=unused-argument
# noinspection PyUnusedLocal
def _active_fget(conf):
"""getter for davos.config.active"""
return config._active
def _active_fset(conf, value):
"""setter for davos.config.active"""
if value is True:
_activate_helper(smuggle, full_parser)
elif value is False:
_deactivate_helper(smuggle, full_parser)
else:
raise DavosConfigError('active', "field may be 'True' or 'False'")
conf._active = value
def _conda_avail_fget(conf):
"""getter for davos.config.conda_avail"""
if conf._conda_avail is None:
check_conda()
return conf._conda_avail
# noinspection PyUnusedLocal
def _conda_avail_fset(conf, _):
"""setter for davos.config.conda_avail"""
raise DavosConfigError('conda_avail', 'field is read-only')
def _conda_env_fget(conf):
"""getter for davos.config.conda_env"""
if conf._conda_avail is None:
# _conda_env is None if we haven't checked conda yet *and* if
# conda is not available vs _conda_avail is None only if we
# haven't checked yet
check_conda()
return conf._conda_env
def _conda_env_fset(conf, new_env):
"""setter for davos.config.conda_env"""
if conf._conda_avail is None:
check_conda()
if conf._conda_avail is False:
raise DavosConfigError(
"conda_env",
"cannot set conda environment. No local conda installation found"
)
if new_env != conf._conda_env:
if (
conf._conda_envs_dirs is not None and
new_env not in conf._conda_envs_dirs.keys()
):
local_envs = {"', '".join(conf._conda_envs_dirs.keys())}
raise DavosConfigError(
"conda_env",
f"unrecognized environment name: '{new_env}'. Local "
f"environments are:\n\t'{local_envs}'"
)
conf._conda_env = new_env
def _conda_envs_dirs_fget(conf):
"""getter for davos.config.conda_envs_dirs"""
if conf._conda_avail is None:
check_conda()
return conf._conda_envs_dirs
# noinspection PyUnusedLocal
def _conda_envs_dirs_fset(conf, _):
"""setter for davos.config.conda_envs_dirs"""
raise DavosConfigError('conda_envs_dirs', 'field is read-only')
DavosConfig.active = property(fget=_active_fget, fset=_active_fset)
DavosConfig.conda_avail = property(fget=_conda_avail_fget,
fset=_conda_avail_fset)
DavosConfig.conda_env = property(fget=_conda_env_fget, fset=_conda_env_fset)
DavosConfig.conda_envs_dirs = property(fget=_conda_envs_dirs_fget,
fset=_conda_envs_dirs_fset)
```
|
{
"source": "jeremymanning/hypertools",
"score": 2
}
|
#### File: hypertools/align/hyperalign.py
```python
import datawrangler as dw
import numpy as np
from .procrustes import Procrustes
from .common import Aligner
from ..core import get_default_options, eval_dict
def fitter(data, n_iter=10):
assert type(data) == list, "data must be specified as a list"
n = len(data)
if n <= 1:
return data
if n_iter == 0:
return data
x = data.copy()
for i in range(n_iter):
# STEP 1: TEMPLATE
template = np.copy(x[0])
for j in range(1, len(x)):
p = Procrustes(target=template / j)
template += p.fit_transform(x[j])[0]
template /= len(x)
# STEP 2: NEW COMMON TEMPLATE
# - align each subj to template
template2 = np.zeros_like(template)
for j in range(0, len(x)):
p = Procrustes(target=template)
template2 += p.fit_transform(x[j])[0]
template2 /= len(x)
# align each subj to template2
p = [Procrustes(target=template2) for _ in x]
x = [m.fit_transform(i)[0] for m, i in zip(p, x)]
# noinspection PyUnboundLocalVariable
p = [Procrustes(target=template2) for _ in data]
_ = [m.fit(d) for m, d in zip(p, data)]
return {'proj': p}
def transformer(data, **kwargs):
assert 'proj' in kwargs.keys(), "Transformer needs to be trained before use."
proj = kwargs['proj']
assert type(proj) is list, "Projection must be a list"
assert type(data) is list, "Data must be a list"
assert len(proj) == len(data), "Must have one projection per data matrix"
return [p.transform(d)[0] for p, d in zip(proj, data)]
class HyperAlign(Aligner):
"""
Base class for HyperAlign objects. Takes a single keyword argument, n_iter, which specifies how many iterations
to run (default: 10).
"""
def __init__(self, **kwargs):
opts = dw.core.update_dict(eval_dict(get_default_options()['HyperAlign']), kwargs)
assert opts['n_iter'] >= 0, 'Number of iterations must be non-negative'
required = ['proj']
super().__init__(required=required, fitter=fitter, transformer=transformer, data=None, **opts)
for k, v in opts.items():
setattr(self, k, v)
self.required = required
self.fitter = fitter
self.transformer = transformer
self.data = None
```
#### File: hypertools/align/null.py
```python
import datawrangler as dw
from .common import Aligner
from ..core import get_default_options, eval_dict
# noinspection PyUnusedLocal
def fitter(*args, **kwargs):
return {}
# noinspection PyUnusedLocal
def transformer(data, **kwargs):
return data
class NullAlign(Aligner):
"""
Base class for NullAlign objects. Returns the original (unmodified) dataset after
trimming and padding it.
"""
def __init__(self, **kwargs):
opts = dw.core.update_dict(eval_dict(get_default_options()['NullAlign']), kwargs)
required = []
super().__init__(required=required, fitter=fitter, transformer=transformer, data=None)
for k, v in opts.items():
setattr(self, k, v)
self.required = required
self.fitter = fitter
self.transformer = transformer
self.data = None
```
#### File: hypertools/core/configurator.py
```python
import datawrangler as dw
import os
from pkg_resources import get_distribution
from .shared import RobustDict
__version__ = get_distribution('hypertools')
def get_default_options(fname=None):
"""
Parse a config.ini file
Parameters
----------
:param fname: absolute-path filename for the config.ini file (default: hypertools/hypertools/core/config.ini)
Returns
-------
:return: A dictionary whose keys are function names and whose values are dictionaries of default arguments and
keyword arguments
"""
if fname is None:
fname = os.path.join(os.path.dirname(__file__), 'config.ini')
return RobustDict(dw.core.update_dict(dw.core.get_default_options(), dw.core.get_default_options(fname)),
__default_value__={})
```
#### File: hypertools/core/shared.py
```python
import datawrangler as dw
@dw.decorate.list_generalizer
def unpack_model(m, valid=None, parent_class=None):
if valid is None:
valid = []
if (type(m) is str) and m in [v.__name__ for v in valid]:
return [v for v in valid if v.__name__ == m][0]
elif parent_class is not None:
try:
if issubclass(m, parent_class):
return m
except TypeError:
pass
if type(m) is dict and all([k in m.keys() for k in ['model', 'args', 'kwargs']]):
return dw.core.update_dict(m, {'model': unpack_model(m['model'], valid=valid, parent_class=parent_class)})
elif type(m) is str:
return m
else:
raise ValueError(f'unknown model: {m}')
class RobustDict(dict):
"""
Dictionary subclass with more forgiving indexing:
indexing a `RobustDict` with a key that doesn't exist returns
None (or another specified default value) instead of throwing an error.
"""
def __init__(self, *args, **kwargs):
self.default_value = kwargs.pop('__default_value__', None)
super().__init__(*args, **kwargs)
def __getitem__(self, key):
try:
return super().__getitem__(key)
except NameError:
return self.default_value
def __missing__(self, key):
return self.default_value
```
#### File: hypertools/manip/common.py
```python
import datawrangler as dw
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
# noinspection DuplicatedCode
class Manipulator(BaseEstimator):
def __init__(self, **kwargs):
self.data = kwargs.pop('data', None)
self.fitter = kwargs.pop('fitter', None)
self.transformer = kwargs.pop('transformer', None)
self.required = kwargs.pop('required', [])
self.kwargs = kwargs
def fit(self, data):
assert data is not None, ValueError('cannot manipulate an empty dataset')
self.data = data
if self.fitter is None:
NotFittedError('null fit function; returning without fitting manipulator')
return
params = self.fitter(data, **self.kwargs)
assert type(params) is dict, ValueError('fit function must return a dictionary')
assert all([r in params.keys() for r in self.required]), ValueError('one or more required fields not'
'returned')
for k, v in params.items():
setattr(self, k, v)
def transform(self, *_):
assert self.data is not None, NotFittedError('must fit manipulator before transforming data')
for r in self.required:
assert hasattr(self, r), NotFittedError(f'missing fitted attribute: {r}')
if self.transformer is None:
RuntimeWarning('null transform function; returning without manipulating data')
return self.data
required_params = {r: getattr(self, r) for r in self.required}
return self.transformer(self.data, **dw.core.update_dict(required_params, self.kwargs))
def fit_transform(self, data):
self.fit(data)
return self.transform()
```
#### File: hypertools/manip/resample.py
```python
import datawrangler as dw
import numpy as np
import pandas as pd
import scipy.interpolate as interpolate
from .common import Manipulator
from ..core import get
def fitter(data, **kwargs):
def listify_dicts(dicts):
if len(dicts) == 0:
return {}
ld = {}
for d in dicts:
for k in d.keys():
if k not in ld.keys():
ld[k] = [d[k]]
else:
ld[k].append(d[k])
return ld
if dw.zoo.is_multiindex_dataframe(data):
return listify_dicts([fitter(d, **kwargs) for d in dw.unstack(data)])
elif type(data) is list:
return listify_dicts([fitter(d, **kwargs) for d in data])
transpose = kwargs.pop('transpose', False)
assert 'axis' in kwargs.keys(), ValueError('Must specify axis')
if kwargs['axis'] == 1:
return fitter(data.T, **dw.core.update_dict(kwargs, {'axis': int(not kwargs['axis']), 'transpose': True}))
assert kwargs['axis'] == 0, ValueError('invalid transformation')
if dw.zoo.is_multiindex_dataframe(data):
x = np.array(data.index.levels[-1])
else:
x = data.index.values
resampled_x = np.linspace(np.min(x), np.max(x), num=kwargs['n_samples'])
pchip = pd.Series(index=data.columns)
for c in data.columns:
pchip[c] = interpolate.pchip(x, data[c].values)
return {'x': x, 'resampled_x': resampled_x, 'pchip': pchip, 'transpose': transpose, 'axis': kwargs['axis'],
'n_samples': kwargs['n_samples']}
def transformer(data, **kwargs):
if dw.zoo.is_multiindex_dataframe(data):
stack_result = True
data = dw.unstack(data)
else:
stack_result = False
if type(data) is list:
transformed_data = []
for i, d in enumerate(data):
next_kwargs = {k: get(v, i) for k, v in kwargs.items()}
transformed_data.append(transformer(d, **next_kwargs))
if stack_result:
return dw.stack(transformed_data)
else:
return transformed_data
# noinspection DuplicatedCode
transpose = kwargs.pop('transpose', False)
assert 'axis' in kwargs.keys(), ValueError('Must specify axis')
if transpose:
return transformer(data.T, **dw.core.update_dict(kwargs, {'axis': int(not kwargs['axis'])})).T
assert kwargs['axis'] == 0, ValueError('invalid transformation')
resampled = pd.DataFrame(index=kwargs['resampled_x'], columns=data.columns)
for c in data.columns:
try:
resampled[c] = kwargs['pchip'][c](kwargs['resampled_x'])
except IndexError:
resampled[c] = kwargs['pchip'][int(c)](kwargs['resampled_x'])
return resampled
class Resample(Manipulator):
# noinspection PyShadowingBuiltins
def __init__(self, axis=0, n_samples=100):
required = ['transpose', 'axis', 'n_samples', 'x', 'resampled_x', 'pchip']
super().__init__(axis=axis, fitter=fitter, transformer=transformer, data=None, n_samples=n_samples,
required=required)
self.axis = axis
self.fitter = fitter
self.transformer = transformer
self.data = None
self.n_samples = n_samples
self.required = required
```
#### File: hypertools/plot/print.py
```python
import gif
import plotly.graph_objects as go
@gif.frame
def frame2fig(fig, i):
template = list(fig.data).copy()
frame_data = list(fig.frames[i].data).copy()
for i in range(len(template)):
for coord in ['x', 'y', 'z']:
if hasattr(template[i], coord):
setattr(template[i], coord, getattr(frame_data[i], coord))
fig = go.Figure(layout=fig.layout, data=template)
fig.update_layout(showlegend=False)
fig = fig.to_dict()
fig['layout'].pop('sliders')
fig['layout'].pop('updatemenus')
return go.Figure(fig)
def save_gif(fig, fname, framerate=30):
frames = []
for i in range(len(fig.frames)):
frames.append(frame2fig(fig, i))
gif.save(frames, fname, duration=1000 / framerate)
def hypersave(fig, fname, **kwargs):
if hasattr(fig, 'frames') and len(fig.frames) > 0:
save_gif(fig, fname, **kwargs)
else:
fig.write_image(fname, **kwargs)
```
|
{
"source": "jeremymarch/OptionsDialog",
"score": 2
}
|
#### File: pythonpath/optionsdialog/component.py
```python
import gettext
import os
import unohelper
from com.sun.star.awt import XContainerWindowEventHandler
from com.sun.star.lang import XServiceInfo
from com.sun.star.awt import XActionListener
from com.sun.star.beans import PropertyValue
# from com.sun.star.awt.PosSize import POSSIZE # ピクセル単位でコントロールの座標を指定するときにPosSizeキーの値に使う。
import traceback
def localization(ctx): # 地域化。moファイルの切替。Linuxでは不要だがWindowsでは設定が必要。
global _
readConfig, dummy = createConfigAccessor(ctx, ctx.getServiceManager(), "/org.openoffice.Setup/L10N") # LibreOfficeの言語設定へのパス。
lang = readConfig("ooLocale"), # 現在のロケールを取得。タプルかリストで渡す。
lodir = os.path.join(os.path.abspath(os.path.dirname(__file__)), "locale") # このスクリプトと同じファルダにあるlocaleフォルダの絶対パスを取得。
mo = os.path.splitext(os.path.basename(__file__))[0] # moファイル名。拡張子なし。このスクリプトファイルと同名と想定。
t = gettext.translation(mo, lodir, lang, fallback=True) # Translations インスタンスを取得。moファイルがなくてもエラーはでない。
_ = t.gettext # _にt.gettext関数を代入。
def create(ctx, *args, imple_name, service_name):
global IMPLE_NAME
global SERVICE_NAME
IMPLE_NAME = imple_name
SERVICE_NAME = service_name
localization(ctx) # 地域化する。
return DilaogHandler(ctx, *args)
class DilaogHandler(unohelper.Base, XServiceInfo, XContainerWindowEventHandler): # UNOコンポーネントにするクラス。
METHODNAME = "external_event" # 変更できない。
def __init__(self, ctx, *args):
self.ctx = ctx
self.smgr = ctx.getServiceManager()
self.readConfig, self.writeConfig = createConfigAccessor(ctx, self.smgr, "/com.pq.blogspot.comp.ExtensionExample.ExtensionData/Leaves/MaximumPaperSize") # config.xcsに定義していあるコンポーネントデータノードへのパス。
self.cfgnames = "Width", "Height"
self.defaults = self.readConfig("Defaults/Width", "Defaults/Height")
# XContainerWindowEventHandler
def callHandlerMethod(self, dialog, eventname, methodname): # ブーリアンを返す必要あり。dialogはUnoControlDialog。 eventnameは文字列initialize, ok, backのいずれか。methodnameは文字列external_event。
if methodname==self.METHODNAME: # Falseのときがありうる?
try:
if eventname=="initialize": # オプションダイアログがアクティブになった時
maxwidth, maxheight = self.readConfig(*self.cfgnames) # コンポーネントデータノードの値を取得。取得した値は文字列。
maxwidth = maxwidth or self.defaults[0]
maxheight = maxheight or self.defaults[1]
buttonlistener = ButtonListener(dialog, self.defaults) # ボタンリスナーをインスタンス化。
addControl = controlCreator(self.ctx, self.smgr, dialog) # オプションダイアログdialogにコントロールを追加する関数を取得。
addControl("FixedLine", {"PositionX": 5, "PositionY": 13, "Width": 250, "Height": 10, "Label": _("Maximum page size")}) # 文字付き水平線。
addControl("FixedText", {"PositionX": 11, "PositionY": 39, "Width": 49, "Height": 15, "Label": _("Width"), "NoLabel": True}) # 文字列。
addControl("NumericField", {"PositionX": 65, "PositionY": 39, "Width": 60, "Height": 15, "Spin": True, "ValueMin": 0, "Value": float(maxwidth), "DecimalAccuracy": 2, "HelpText": _("Width")}) # 上下ボタン付き数字枠。小数点2桁、floatに変換して値を代入。
addControl("NumericField", {"PositionX": 65, "PositionY": 64, "Width": 60, "Height": 15, "Spin": True, "ValueMin": 0, "Value": float(maxheight), "DecimalAccuracy": 2, "HelpText": _("Height")}) # 同上。
addControl("FixedText", {"PositionX": 11, "PositionY": 66, "Width": 49, "Height": 15, "Label": _("Height"), "NoLabel": True}) # 文字列。
addControl("FixedText", {"PositionX": 127, "PositionY": 42, "Width": 25, "Height": 15, "Label": "cm", "NoLabel": True}) # 文字列。
addControl("FixedText", {"PositionX": 127, "PositionY": 68, "Width": 25, "Height": 15, "Label": "cm", "NoLabel": True}) # 文字列。
addControl("Button", {"PositionX": 155, "PositionY": 39, "Width": 50, "Height": 15, "Label": _("~Default")}, {"setActionCommand": "width", "addActionListener": buttonlistener}) # ボタン。
addControl("Button", {"PositionX": 155, "PositionY": 64, "Width": 50, "Height": 15, "Label": _("~Default")}, {"setActionCommand": "height", "addActionListener": buttonlistener}) # ボタン。
elif eventname=="ok": # OKボタンが押された時
maxwidth = dialog.getControl("NumericField1").getModel().Value # NumericFieldコントロールから値を取得。
maxheight = dialog.getControl("NumericField2").getModel().Value # NumericFieldコントロールから値を取得。
self.writeConfig(self.cfgnames, (str(maxwidth), str(maxheight))) # 取得した値を文字列にしてコンポーネントデータノードに保存。
elif eventname=="back": # 元に戻すボタンが押された時
maxwidth, maxheight = self.readConfig(*self.cfgnames)
dialog.getControl("NumericField1").getModel().Value= float(maxwidth) # コンポーネントデータノードの値を取得。
dialog.getControl("NumericField2").getModel().Value= float(maxheight) # コンポーネントデータノードの値を取得。
except:
traceback.print_exc() # トレースバックはimport pydevd; pydevd.settrace(stdoutToServer=True, stderrToServer=True)でブレークして取得できるようになる。
return False
return True
def getSupportedMethodNames(self):
return (self.METHODNAME,) # これも決め打ち。
# XServiceInfo
def getImplementationName(self):
return IMPLE_NAME
def supportsService(self, name):
return name == SERVICE_NAME
def getSupportedServiceNames(self):
return (SERVICE_NAME,)
class ButtonListener(unohelper.Base, XActionListener): # ボタンリスナー。
def __init__(self, dialog, defaults):
self.dialog = dialog
self.defaults = defaults
def actionPerformed(self, actionevent):
cmd = actionevent.ActionCommand
if cmd == "width":
self.dialog.getControl("NumericField1").Value = self.defaults[0]
elif cmd == "height":
self.dialog.getControl("NumericField2").Value = self.defaults[1]
def disposing(self,eventobject):
pass
def controlCreator(ctx, smgr, dialog): # コントロールを追加する関数を返す。
dialogmodel = dialog.getModel() # ダイアログモデルを取得。
def addControl(controltype, props, attrs=None): # props: コントロールモデルのプロパティ、attr: コントロールの属性。
if "PosSize" in props: # コントロールモデルのプロパティの辞書にPosSizeキーがあるときはピクセル単位でコントロールに設定をする。
control = smgr.createInstanceWithContext("com.sun.star.awt.UnoControl{}".format(controltype), ctx) # コントロールを生成。
control.setPosSize(props.pop("PositionX"), props.pop("PositionY"), props.pop("Width"), props.pop("Height"), props.pop("PosSize")) # ピクセルで指定するために位置座標と大きさだけコントロールで設定。
controlmodel = _createControlModel(controltype, props) # コントロールモデルの生成。
control.setModel(controlmodel) # コントロールにコントロールモデルを設定。
dialog.addControl(props["Name"], control) # コントロールをコントロールコンテナに追加。
else: # Map AppFont (ma)のときはダイアログモデルにモデルを追加しないと正しくピクセルに変換されない。
controlmodel = _createControlModel(controltype, props) # コントロールモデルの生成。
dialogmodel.insertByName(props["Name"], controlmodel) # ダイアログモデルにモデルを追加するだけでコントロールも作成される。
if attrs is not None: # Dialogに追加したあとでないと各コントロールへの属性は追加できない。
control = dialog.getControl(props["Name"]) # コントロールコンテナに追加された後のコントロールを取得。
for key, val in attrs.items(): # メソッドの引数がないときはvalをNoneにしている。
if val is None:
getattr(control, key)()
else:
getattr(control, key)(val)
def _createControlModel(controltype, props): # コントロールモデルの生成。
if not "Name" in props:
props["Name"] = _generateSequentialName(controltype) # Nameがpropsになければ通し番号名を生成。
controlmodel = dialogmodel.createInstance("com.sun.star.awt.UnoControl{}Model".format(controltype)) # コントロールモデルを生成。UnoControlDialogElementサービスのためにUnoControlDialogModelからの作成が必要。
if props:
values = props.values() # プロパティの値がタプルの時にsetProperties()でエラーが出るのでその対応が必要。
if any(map(isinstance, values, [tuple]*len(values))):
[setattr(controlmodel, key, val) for key, val in props.items()] # valはリストでもタプルでも対応可能。XMultiPropertySetのsetPropertyValues()では[]anyと判断されてタプルも使えない。
else:
controlmodel.setPropertyValues(tuple(props.keys()), tuple(values))
return controlmodel
def _generateSequentialName(controltype): # 連番名の作成。
i = 1
flg = True
while flg:
name = "{}{}".format(controltype, i)
flg = dialog.getControl(name) # 同名のコントロールの有無を判断。
i += 1
return name
return addControl # コントロールコンテナとそのコントロールコンテナにコントロールを追加する関数を返す。
def createConfigAccessor(ctx, smgr, rootpath): # コンポーネントデータノードへのアクセス。
cp = smgr.createInstanceWithContext("com.sun.star.configuration.ConfigurationProvider", ctx)
node = PropertyValue(Name="nodepath", Value=rootpath)
root = cp.createInstanceWithArguments("com.sun.star.configuration.ConfigurationUpdateAccess", (node,))
def readConfig(*args): # 値の取得。整数か文字列かブーリアンのいずれか。コンポーネントスキーマノードの設定に依存。
if len(args)==1: # 引数の数が1つのとき
return root.getHierarchicalPropertyValue(*args)
elif len(args)>1: # 引数の数が2つ以上のとき
return root.getHierarchicalPropertyValues(args)
def writeConfig(names, values): # 値の書き込み。整数か文字列かブーリアンのいずれか。コンポーネントスキーマノードの設定に依存。
try:
if isinstance(names, tuple): # 引数がタプルのとき
root.setHierarchicalPropertyValues(names, values)
else:
root.setHierarchicalPropertyValue(names, values)
root.commitChanges() # 変更値の書き込み。
except:
traceback.print_exc()
return readConfig, writeConfig
```
#### File: OptionsDialog/tools/createProtocolHandlerXcu.py
```python
import xml.etree.ElementTree as ET
import os
import sys
from config import getConfig
import types
from helper import Elem
class MenuItem(Elem):
'''
oor:node-type="MenuItem"を作成するメソッドをもつElemの派生クラス。
'''
def createNodes(self, c, xdic):
'''
oor:node-type="MenuItem"のElementのリストを返す。
:param DIC: PYTHON_UNO_Component,IMPLE_NAME,SERVICE_NAME,HANDLED_PROTOCOL
:type DIC: dict
:param xdic: Xml Attributes
:type xdic: dict
:returns: a list of nodes
:rtype: list
'''
ORDER = "URL","Title","Target","Context","Submenu","ControlType","Width" # ノードの順を指定。 "ImageIdentifier"ノードは使わないので無視する。
lst_nd = list() # ノードをいれるリスト。
for key in ORDER:
if key in xdic:
val = xdic[key]
if key == "Title": # タイトルノードのとき
nd = Elem("prop", {"oor:name": key, "oor:type": "xs:string"})
for lang,txt in val.items():
nd.append(Elem("value", {"xml:lang": lang}, text=txt))
lst_nd.append(nd)
elif key == "Submenu": # サブメニューノードのとき
fn = val.pop() # サブメニュー設定のための関数を取得。
if type(fn) is types.MethodType:
lst_nd.append(fn(c, val))
else: # それ以外のノードの時。
nd = Elem("prop", {"oor:name": key,"oor:type": "xs:string"})
nd.append(Elem("value", text=val))
lst_nd.append(nd)
return lst_nd
def createWindowStateNodes(self, c, xdic): # ツールバーの設定。
'''
Properties for ToolBar
:param DIC: PYTHON_UNO_Component,IMPLE_NAME,SERVICE_NAME,HANDLED_PROTOCOL
:type DIC: dict
:param xdic: Xml Attributes
:type xdic: dict
:returns: a list of nodes
:rtype: list
'''
ORDER = "UIName","ContextSensitive","Visible","Docked" # ノードの順を指定。
lst_nd = list() # ノードをいれるリスト。
for key in ORDER:
if key in xdic:
val = xdic[key]
if key == "UIName": # タイトルノードのとき
nd = Elem("prop", {"oor:name": key, "oor:type": "xs:string"})
for lang, txt in val.items():
nd.append(Elem("value", {"xml:lang": lang}, text=txt))
lst_nd.append(nd)
else: # それ以外のノードの時。
nd = Elem("prop", {"oor:name": key, "oor:type": "xs:boolean"})
nd.append(Elem("value", text=val))
lst_nd.append(nd)
return lst_nd
class AddonMenu(MenuItem): # ツール→アドオン、に表示されるメニュー項目を作成。
'''
Tools->Add-Ons->AddonMenu
'''
def __init__(self, c):
super().__init__("node", {'oor:name': "AddonMenu"}) # 変更不可。
self.append(Elem("node", {'oor:name': "{}.function".format(c["HANDLED_PROTOCOL"]), "oor:op": "replace"})) # oor:nameの値はノードの任意の固有名。
self[-1].extend(super().createNodes(c, {"Title": {"en-US": "Add-On example by AddonMenuNode"}, "Context": "com.sun.star.text.TextDocument", "Submenu": ["m1", "m2", self.subMenu]})) # ここから表示されるメニューの設定。
def subMenu(self, c, val):
'''
サブメニューの作成。
:param DIC: PYTHON_UNO_Component,IMPLE_NAME,SERVICE_NAME,HANDLED_PROTOCOL
:type DIC: dict
:param val: Submenu IDs
:type val: list
:returns: a node for submenu
:rtype: xml.etree.ElementTree.Element
'''
nd = Elem("node", {"oor:name": "Submenu"}) # 変更不可。
i = 0
nd.append(Elem("node", {"oor:name" :val[i], "oor:op": "replace"})) # oor:nameの値はノードの任意の固有名。この順でソートされる。
nd[i].extend(super().createNodes(c, {"URL": "{}:Function1".format(c["HANDLED_PROTOCOL"]), "Title": {"en-US": "Add-On Function 1"}, "Target": "_self"}))
i += 1
nd.append(Elem("node", {"oor:name": val[i], "oor:op": "replace"})) # oor:nameの値はノードの任意の固有名。この順でソートされる。
nd[i].extend(super().createNodes(c, {"URL": "{}:Function2".format(c["HANDLED_PROTOCOL"]), "Title": {"en-US": "Add-On Function 2"}, "Target": "_self"}))
return nd
class OfficeMenuBar(MenuItem): # メインメニューに追加される項目を作成。
'''
OfficeMenuBar
Main Menu Bar
'''
def __init__(self,DIC):
super().__init__("node", {'oor:name': "OfficeMenuBar"}) # 変更不可。
self.append(Elem("node", {'oor:name': c["HANDLED_PROTOCOL"], "oor:op": "replace"})) # oor:nameの値はノードの任意の固有名。
self[0].extend(super().createNodes(c, {"Title": {"en-US": "Add-On example by OfficeMenuBar"}, "Target": "_self", "Submenu": ["m1", "m2", "m3", self.subMenu]})) # ここから表示されるメニューの設定。
def subMenu(self, c, val):
nd = Elem("node", {"oor:name": "Submenu"}) # 変更不可。
i = 0
nd.append(Elem("node", {"oor:name": val[i], "oor:op": "replace"})) # oor:nameの値はノードの任意の固有名。この順でソートされる。
nd[i].extend(super().createNodes(c, {"URL": "{}:Function1".format(c["HANDLED_PROTOCOL"]), "Title": {"en-US": "Add-On Function 1"}, "Target": "_self", "Context": "com.sun.star.text.TextDocument"}))
i += 1
nd.append(Elem("node", {"oor:name": val[i], "oor:op": "replace"})) # oor:nameの値はノードの任意の固有名。この順でソートされる。
nd[i].extend(super().createNodes(c, {"URL": "private:separator"}))
i += 1
nd.append(Elem("node", {"oor:name": val[i], "oor:op": "replace"})) # oor:nameの値はノードの任意の固有名。この順でソートされる。
nd[i].extend(super().createNodes(c, {"URL": "", "Title": {"en-US": "Add-On sub menu"}, "Target": "_self", "Submenu": ["m1", self.subMenu2]}))
return nd
def subMenu2(self, c, val):
nd = Elem("node", {"oor:name": "Submenu"}) # 変更不可。
i = 0
nd.append(Elem("node", {"oor:name":val[i], "oor:op":"replace"})) # oor:nameの値はノードの任意の固有名。この順でソートされる。
nd[i].extend(super().createNodes(c, {"URL": "{}:Function2".format(c["HANDLED_PROTOCOL"]), "Title": {"en-US": "Add-On Function 2"}, "Target": "_self", "Context": "com.sun.star.sheet.SpreadsheetDocument"}))
return nd
class OfficeToolBar(MenuItem): # ツールバーを作成。
'''
OfficeToolBar
View->Toolbars
Select this tool bar.
ツールバーの名前は未設定。
'''
def __init__(self, c):
super().__init__("node", {'oor:name': "OfficeToolBar"}) # 変更不可。
self.append(Elem("node", {'oor:name': c["HANDLED_PROTOCOL"], "oor:op": "replace"})) # oor:nameの値はノードの任意の固有名。
self[0].append(Elem("node", {'oor:name': "m1", "oor:op": "replace"})) # oor:nameの値はノードの任意の固有名。この順でソートされる。
self[0][0].extend(super().createNodes(c, {"URL": "{}:Function1".format(c["HANDLED_PROTOCOL"]), "Title": {"en-US": "Function 1"}, "Target": "_self", "Context": "com.sun.star.text.TextDocument"}))
self[0].append(Elem("node", {'oor:name': "m2", "oor:op": "replace"})) # oor:nameの値はノードの任意の固有名。この順でソートされる。
self[0][1].extend(super().createNodes(c, {"URL": "{}:Function2".format(c["HANDLED_PROTOCOL"]), "Title": {"en-US": "Function 2"}, "Target": "_self", "Context": "com.sun.star.text.TextDocument"}))
self.createWindwStatexcu(c, "Writer") # Writer用のツールバーのプロパティの設定。
def createWindwStatexcu(self, c, ctxt): # ツールバーのプロパティの設定。
#Creation of WriterWindwState.xcu、Calcの場合はCalcWindwState.xcu
filename = "{}WindowState.xcu".format(ctxt)
c["backup"](filename) # すでにあるファイルをbkに改名
with open(filename, "w", encoding="utf-8") as f:
rt = Elem("oor:component-data", {"oor:name": "{}WindowState".format(ctxt), "oor:package": "org.openoffice.Office.UI", "xmlns:oor": "http://openoffice.org/2001/registry", "xmlns:xs": "http://www.w3.org/2001/XMLSchema"}) # 根の要素を作成。
rt.append(Elem("node", {'oor:name': "UIElements"}))
rt[-1].append(Elem("node", {'oor:name': "States"}))
rt[-1][-1].append(Elem("node", {'oor:name': "private:resource/toolbar/addon_{}".format(c["HANDLED_PROTOCOL"]), "oor:op": "replace"}))
rt[-1][-1][-1].extend(super().createWindowStateNodes(c, {"UIName": {"en-US": "OfficeToolBar Title"}, "ContextSensitive": "false", "Visible": "true", "Docked": "false"})) # ツールバーのプロパティを設定。
tree = ET.ElementTree(rt) # 根要素からxml.etree.ElementTree.ElementTreeオブジェクトにする。
tree.write(f.name, "utf-8", True) # xml_declarationを有効にしてutf-8でファイルに出力する。
print("{} has been created.".format(filename))
class Images(MenuItem): # アイコンを表示させるコマンドURLを設定。
'''
Specify command URL to display icon
'''
def __init__(self, c):
super().__init__("node", {'oor:name': "Images"}) # 変更不可。
# 画像1
name = "com.sun.star.comp.framework.addon.image1" # oor:nameの値はノードの任意の固有名。
url = "{}:Function1".format(c["HANDLED_PROTOCOL"]) # アイコンを表示させるコマンドURL
dic_image = {
"ImageSmallURL": "%origin%/icons/image1ImageSmall.png",
"ImageBigURL": "%origin%/icons/image1ImageBig.png",
}
self.append(self.userDefinedImages(name, url, dic_image))
# 画像2
name = "com.sun.star.comp.framework.addon.image2" # oor:nameの値はノードの任意の固有名。
url = "org.openoffice.Office.addon.example:Help" # アイコンを表示させるコマンドURL
dic_image = {
"ImageSmallURL":"%origin%/icons/image2ImageSmall.png",
"ImageBigURL":"%origin%/icons/image2ImageBig.png",
}
self.append(self.userDefinedImages(name, url, dic_image))
def userDefinedImages(self, name, url, dic_image):
'''
アイコンの設定。
:param name: name of icon
:type name: str
:param url: uri of icon
:type url: str
:param dic_image: a dictionary of the same image with different sizes
:type dic_image: dict
:returns: a node for an image
:rtype: xml.etree.ElementTree.Element
'''
nd = Elem("node", {"oor:name": name, "oor:op": "replace"}) # oor:nameの値はノードの任意の固有名。
nd.append(Elem("prop", {"oor:name": "URL"}))
nd[-1].append(Elem("value", text=url)) # アイコンを表示させるコマンドURLを設定。
nd.append(Elem("node", {"oor:name": "UserDefinedImages"}))
ORDER = "ImageSmall", "ImageBig", "ImageSmallHC", "ImageBigHC"
for key in ORDER:
if key in dic_image:
snd = Elem("prop", {"oor:name": key, "oor:type": "xs:hexBinary"})
snd.append(Elem("value", text=dic_image[key]))
nd[-1].append(snd)
ORDER = "ImageSmallURL", "ImageBigURL" # "ImageSmallHCURL","ImageBigHCURL" valueノードのテキストノードの空白があると画像が表示されない。HC画像が優先して表示されてしまうのでHC画像は使わない。
for key in ORDER:
if key in dic_image:
snd = Elem("prop", {"oor:name": key, "oor:type": "xs:string"})
snd.append(Elem("value", text=dic_image[key]))
nd[-1].append(snd)
return nd
class OfficeHelp(MenuItem): # ヘルプメニューの設定。
'''
Help Menu
'''
def __init__(self, c):
super().__init__("node", {'oor:name': "OfficeHelp"}) # 変更不可。
self.append(Elem("node", {'oor:name': "com.sun.star.comp.framework.addon", "oor:op": "replace"})) # oor:nameの値はノードの任意の固有名。
self[0].extend(super().createNodes(c, {"URL": "{}:Help".format(c["HANDLED_PROTOCOL"]), "Title": {"x-no-translate": "", "de": "Über Add-On Beispiel", "en-US":" About Add-On Example"}, "Target": "_self"}))
def createProtocolHandlerXcu(c):
#Creation of ProtocolHandler.xcu
os.chdir(c["src_path"]) # srcフォルダに移動。
if c["HANDLED_PROTOCOL"] is not None: # HANDLED_PROTOCOLの値があるとき
filename = "ProtocolHandler.xcu"
c["backup"](filename) # すでにあるファイルをバックアップ
with open(filename, "w", encoding="utf-8") as f: # ProtocolHandler.xcuファイルの作成
rt = Elem("oor:component-data", {"oor:name": "ProtocolHandler", "oor:package": "org.openoffice.Office", "xmlns:oor": "http://openoffice.org/2001/registry", "xmlns:xs": "http://www.w3.org/2001/XMLSchema", "xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance"}) # 根の要素を作成。
rt.append(Elem("node", {'oor:name': "HandlerSet"}))
rt[-1].append(Elem("node", {'oor:name': c["IMPLE_NAME"], "oor:op": "replace"}))
rt[-1][-1].append(Elem("prop",{'oor:name': "Protocols", "oor:type": "oor:string-list"}))
rt[-1][-1][-1].append(Elem("value", text = "{}:*".format(c["HANDLED_PROTOCOL"])))
tree = ET.ElementTree(rt) # 根要素からxml.etree.ElementTree.ElementTreeオブジェクトにする。
tree.write(f.name, "utf-8", True) # xml_declarationを有効にしてutf-8でファイルに出力する。
print("{} has been created.".format(filename))
#Creation of Addons.xcu
filename = "Addons.xcu"
c["backup"](filename) # すでにあるファイルをバックアップ
with open(filename, "w", encoding="utf-8") as f: # Addons.xcuファイルの作成。
rt = Elem("oor:component-data", {"oor:name":"Addons", "oor:package": "org.openoffice.Office", "xmlns:oor": "http://openoffice.org/2001/registry", "xmlns:xs": "http://www.w3.org/2001/XMLSchema"}) # 根の要素を作成。
rt.append(Elem("node", {'oor:name': "AddonUI"}))
rt[-1].extend([AddonMenu(c), OfficeMenuBar(c), OfficeToolBar(c), Images(c), OfficeHelp(c)]) # 追加するノード。
tree = ET.ElementTree(rt) # 根要素からxml.etree.ElementTree.ElementTreeオブジェクトにする。
tree.write(f.name, "utf-8", True) # xml_declarationを有効にしてutf-8でファイルに出力する。
print("{} has been created.".format(filename))
if __name__ == "__main__":
createProtocolHandlerXcu(getConfig(False))
```
#### File: OptionsDialog/tools/helper.py
```python
import xml.etree.ElementTree as ET
class Elem(ET.Element):
'''
キーワード引数textでテキストノードを付加するxml.etree.ElementTree.Element派生クラス。
'''
def __init__(self, tag, attrib={}, **kwargs): # textキーワードは文字列のみしか受け取らない。
if "text" in kwargs: # textキーワードがあるとき
txt = kwargs["text"]
del kwargs["text"]
super().__init__(tag, attrib, **kwargs)
self._text(txt)
else:
super().__init__(tag, attrib, **kwargs)
def _text(self, txt):
self.text = txt
class ElemProp(Elem): # 値を持つpropノード
def __init__(self, name, txt):
super().__init__("prop", {'oor:name': name})
self.append(Elem("value", text=txt))
class ElemPropLoc(Elem): # 多言語化された値を持つpropノード。
def __init__(self, name, langs):
super().__init__("prop", {'oor:name': name})
for lang, value in langs.items():
self.append(Elem("value", {"xml:lang": lang}, text=value))
```
|
{
"source": "JeremyMarshall/pymake",
"score": 3
}
|
#### File: pymake/pymake/globrelative.py
```python
import os, re, fnmatch
from . import util
_globcheck = re.compile('[\[*?]')
def hasglob(p):
return _globcheck.search(p) is not None
def glob(fsdir, path):
"""
Yield paths matching the path glob. Sorts as a bonus. Excludes '.' and '..'
"""
dir, leaf = os.path.split(path)
if dir == '':
return globpattern(fsdir, leaf)
if hasglob(dir):
dirsfound = glob(fsdir, dir)
else:
dirsfound = [dir]
r = []
for dir in dirsfound:
fspath = util.normaljoin(fsdir, dir)
if not os.path.isdir(fspath):
continue
r.extend((util.normaljoin(dir, found) for found in globpattern(fspath, leaf)))
return r
def globpattern(dir, pattern):
"""
Return leaf names in the specified directory which match the pattern.
"""
if not hasglob(pattern):
if pattern == '':
if os.path.isdir(dir):
return ['']
return []
if os.path.exists(util.normaljoin(dir, pattern)):
return [pattern]
return []
leaves = os.listdir(dir) + ['.', '..']
# "hidden" filenames are a bit special
if not pattern.startswith('.'):
leaves = [leaf for leaf in leaves
if not leaf.startswith('.')]
leaves = fnmatch.filter(leaves, pattern)
leaves = [l for l in leaves if os.path.exists(util.normaljoin(dir, l))]
leaves.sort()
return leaves
```
|
{
"source": "jeremymatt/coop_controller",
"score": 2
}
|
#### File: jeremymatt/coop_controller/cc.py
```python
import datetime as dt
import time
from suntime import Sun
from twilio.rest import Client
import traceback
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
import board
import busio
import adafruit_character_lcd.character_lcd_rgb_i2c as character_lcd
import smtplib
import settings
from email.message import EmailMessage
import os
def restart():
os.system('sudo reboot')
"""
#https://www.ridgesolutions.ie/index.php/2013/02/22/raspberry-pi-restart-shutdown-your-pi-from-python-code/
command = "/usr/bin/sudo /sbin/shutdown -r now"
import subprocess
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
print(output)
"""
def get_datetime_parts():
#Get the year, month, day, hour, minute, and second of the current local time
ct = dt.datetime.now()
y = ct.year
mo = str(ct.month).zfill(2)
d = str(ct.day).zfill(2)
h = str(ct.hour).zfill(2)
m = str(ct.minute).zfill(2)
s = str(ct.second).zfill(2)
return y,mo,d,h,m,s
def send_crash_notification(logfile_name):
#SEnd a text message crash notification. If that fails, write the failure
#to the logfile
y,mo,d,h,m,s = get_datetime_parts()
message = '*** ERROR ***\n COOP CONTROLLER HAS CRASHED\n {}-{}-{} {}:{}'.format(y,mo,d,h,m)
for address in settings.phone_numbers:
try:
send_message_twilio(address,message)
except Exception as e:
with open(logfile_name, 'a') as file:
file.write('\nFAILED TO SEND CRASH TEXT NOTIFICATION. \nADDRESS:\n{}\nMESSAGE:\n{}\n\nTRACEBACK:\n'.format(address,message))
traceback.print_exc(limit=None, file=file, chain=True)
def send_message_twilio(address,message):
#Send a message using the twilio text service
client = Client(settings.account_sid, settings.auth_token)
message = client.messages.create(
messaging_service_sid='MG3cef878fb0107a7b2c4412cc890ba226',
body=message,
to=address
)
class coop_controller:
def __init__(self):
#Get the current date & time and generate a logfile name
y,mo,d,h,m,s = get_datetime_parts()
self.logfile_name = 'LOGFILE_{}-{}-{}_{}:{}:{}.txt'.format(y,mo,d,h,m,s)
#Get the current directory, generate the log directory path, and make
#the directory if it doesn't exist
cd = os.getcwd()
self.log_dir = os.path.join(cd,'logs')
if not os.path.isdir(self.log_dir):
os.makedirs(self.log_dir)
#Clear old logs
self.clear_old_logs()
#Generate the logfile name
self.logfile_name = os.path.join(self.log_dir,self.logfile_name)
#Run the function to get the current time in UTC
self.get_cur_time()
#Calculate the sunrise/sunset times
self.get_sunrise_sunset()
#Initialize the GPIO pins
self.init_pins()
#INit the program control flags, constants, and variables.
self.init_flags()
#Initialize the butten menu dictionary
self.init_button_menu()
#Initialize the display
self.init_display()
def clear_old_logs(self):
#List all the foles in the log directory, sort, and keep only the 30
#newest logs
files = [os.path.join(self.log_dir,file) for file in os.listdir(self.log_dir)]
files = [file for file in files if os.path.isfile(file)]
files.sort()
if len(files)>30:
files_to_delete = files[:(len(files)-30)]
for file in files_to_delete:
os.remove(file)
def run(self):
#The main loop
#Init a trigger to print the current state to the console for debugging
#purposes
self.print_state_trigger = self.cur_time
while True:
#Get the current times
self.get_cur_time()
#Generate boolean control variables based on the current time relative
#to the door&light control times
self.check_times()
#Check if the program is in an error state (IE the door didn't
#open or close properly)
self.check_error_state()
#Check if there was a button press and handle
self.check_buttons()
#Check the status of the inputs (the switches testing if the door
#is open or closed)
self.check_inputs()
#Check the door status and open/close if necessary
self.check_door()
if self.cur_time > self.print_state_trigger:
self.print_state_trigger = self.cur_time + self.long_time
self.print_state()
self.check_display_status()
def print_sun_times(self,label_msg = None):
#Prints the sunrise/sunset times to the logfile
sunrise,parts = self.get_datetime_string(self.sunrise)
sunset,parts = self.get_datetime_string(self.sunset)
close_time,parts = self.get_datetime_string(self.close_time)
with open(self.logfile_name,'a') as f:
f.write('\n')
f.write('New Day!\n')
f.write(' Sunrise: {}\n'.format(sunrise))
f.write(' Sunset: {}\n'.format(sunset))
f.write(' Door close: {}\n'.format(close_time))
def print_state(self,label_msg = None):
#Prints the state of control variables to the logfile and to the console
with open(self.logfile_name,'a') as f:
f.write('\n')
if label_msg != None:
f.write(label_msg)
string,parts = self.get_datetime_string(self.cur_time)
f.write('{}\n'.format(string))
f.write('Door is opening: {}\n'.format(self.door_is_opening))
f.write('Door is fully open: {}\n'.format(self.door_is_open))
f.write('Door is closing: {}\n'.format(self.door_is_closing))
f.write('Door is closed: {}\n'.format(self.door_is_closed))
f.write('Door override: {}\n'.format(self.door_state_override))
f.write('Light is on: {}\n'.format(self.light_is_on))
f.write('Light override: {}\n'.format(self.light_state_override))
f.write('IN ERROR STATE: {}\n'.format(self.error_state))
f.write('current menu: {}\n'.format(self.cur_menu))
f.write('In submenu: {}\n'.format(self.in_sub_menu))
f.write('Items in message send queue: {}\n'.format(len(self.notification_list)))
f.write('Menu Message: {}\n'.format(self.msg))
print('\n')
# print('\nDoor is open: {}'.format(self.door_is_open))
if label_msg != None:
print(label_msg)
print(self.cur_time)
print('Door is opening: {}'.format(self.door_is_opening))
print('Door is fully open: {}'.format(self.door_is_open))
# print('Door is closed: {}'.format(self.door_is_closed))
print('Door is closing: {}'.format(self.door_is_closing))
print('Door is closed: {}'.format(self.door_is_closed))
print('Door override: {}'.format(self.door_state_override))
print('Light is on: {}'.format(self.light_is_on))
print('Light override: {}'.format(self.light_state_override))
print('IN ERROR STATE: {}'.format(self.error_state))
print('current menu: {}'.format(self.cur_menu))
print('In submenu: {}'.format(self.in_sub_menu))
print('Items in message send queue: {}'.format(len(self.notification_list)))
print('Menu Message: {}'.format(self.msg))
def check_error_state(self):
display_state = True
in_err_state = False
disp_blink_time = self.cur_time + dt.timedelta(seconds=0.5)
while self.error_state:
if not in_err_state:
self.print_state('IN ERROR STATE\n')
self.lcd.color = [100, 0, 0]
self.lcd.message = self.error_msg
in_err_state = True
self.get_cur_time()
self.check_send_notification_time()
self.check_buttons()
if self.cur_time>disp_blink_time:
if display_state:
self.lcd.color = [0,0,0]
disp_blink_time = self.cur_time + dt.timedelta(seconds=.5)
display_state = False
else:
self.lcd.color = [100,0,0]
disp_blink_time = self.cur_time + dt.timedelta(seconds=.75)
display_state = True
if in_err_state:
self.display_on()
self.cur_menu = 0
self.in_sub_menu = False
self.update_display()
def check_door(self):
if self.door_is_closed and self.door_is_open:
string,parts = self.get_datetime_string(self.cur_time)
msg = 'Chicken Door Malfunction:\n Both switches closed \n time: {}'.format(string)
self.error_msg = 'ERR:bth swch cls\nSelect ==> clear'
self.error_state = True
self.cur_menu = -3
self.in_sub_menu = False
self.door_stop()
self.queue_notification(msg)
if self.door_is_closed and self.door_is_closing:
print('triggered close stop at: {}'.format(self.cur_time))
# self.door_is_open = False
# self.door_is_closed = True
self.door_is_closing = False
self.door_is_opening = False
self.door_move_end_time = self.cur_time + self.long_time
self.print_state_trigger = self.cur_time - dt.timedelta(seconds=1)
self.door_travel_stop_time = self.cur_time + dt.timedelta(seconds=(settings.extra_door_travel+settings.door_lock_travel))
if self.cur_time>self.door_travel_stop_time:
print('Stopped move at: {}'.format(self.cur_time))
self.door_travel_stop_time = self.cur_time + self.long_time
self.print_state_trigger = self.cur_time - dt.timedelta(seconds=1)
self.door_stop()
if self.door_is_open and self.door_is_opening:
print('triggered open stop at: {}'.format(self.cur_time))
# self.door_stop()
# self.door_is_open = True
# self.door_is_closed = False
self.door_is_closing = False
self.door_is_opening = False
self.door_move_end_time = self.cur_time + self.long_time
self.print_state_trigger = self.cur_time - dt.timedelta(seconds=1)
self.door_travel_stop_time = self.cur_time + dt.timedelta(seconds=settings.extra_door_travel)
if self.door_open_time and not (self.door_is_open or self.door_is_opening) and not self.door_state_override:
string,parts = self.get_datetime_string(self.cur_time)
self.print_state_trigger = self.cur_time - dt.timedelta(seconds=1)
msg = 'Chicken Door Opening:\n time: {}'.format(string)
self.queue_notification(msg)
self.door_raise()
if not self.door_open_time and not (self.door_is_closed or self.door_is_closing) and not self.door_state_override:
string,parts = self.get_datetime_string(self.cur_time)
self.print_state_trigger = self.cur_time - dt.timedelta(seconds=1)
msg = 'Chicken Door Closing:\n time: {}'.format(string)
self.queue_notification(msg)
self.door_lower()
if self.light_on_time and not self.light_is_on and not self.light_state_override:
string,parts = self.get_datetime_string(self.cur_time)
self.print_state_trigger = self.cur_time - dt.timedelta(seconds=1)
msg = 'Chicken light turning on:\n time: {}'.format(string)
self.queue_notification(msg)
self.light_on()
if not self.light_on_time and self.light_is_on and not self.light_state_override:
string,parts = self.get_datetime_string(self.cur_time)
self.print_state_trigger = self.cur_time - dt.timedelta(seconds=1)
msg = 'Chicken light turning off:\n time: {}'.format(string)
self.queue_notification(msg)
self.light_off()
def check_inputs(self):
self.door_is_closed = GPIO.input(self.pins['door_closed'])==GPIO.LOW
self.door_is_open = GPIO.input(self.pins['door_open'])==GPIO.LOW
def init_button_menu(self):
self.button_menu = {}
menu = -3
sub_menu = False
self.button_menu[menu] = {}
self.button_menu[menu][sub_menu] = {}
self.button_menu[menu][sub_menu]['msg'] = self.disp_error_msg
self.button_menu[menu][sub_menu]['select'] = self.cancel_error
self.button_menu[menu][sub_menu]['left'] = None
self.button_menu[menu][sub_menu]['right'] = None
self.button_menu[menu][sub_menu]['up'] = None
self.button_menu[menu][sub_menu]['down'] = None
menu = -2
sub_menu = False
self.button_menu[menu] = {}
self.button_menu[menu][sub_menu] = {}
self.button_menu[menu][sub_menu]['msg'] = self.disp_override
self.button_menu[menu][sub_menu]['select'] = None
self.button_menu[menu][sub_menu]['left'] = None
self.button_menu[menu][sub_menu]['right'] = None
self.button_menu[menu][sub_menu]['up'] = self.next_menu
self.button_menu[menu][sub_menu]['down'] = self.prev_menu
menu = -1
sub_menu = False
self.button_menu[menu] = {}
self.button_menu[menu][sub_menu] = {}
self.button_menu[menu][sub_menu]['msg'] = None
self.button_menu[menu][sub_menu]['select'] = None
self.button_menu[menu][sub_menu]['left'] = None
self.button_menu[menu][sub_menu]['right'] = None
self.button_menu[menu][sub_menu]['up'] = self.next_menu
self.button_menu[menu][sub_menu]['down'] = self.prev_menu
menu = 0
sub_menu = False
self.button_menu[menu] = {}
self.button_menu[menu][sub_menu] = {}
self.button_menu[menu][sub_menu]['msg'] = self.disp_current_time
self.button_menu[menu][sub_menu]['select'] = None
self.button_menu[menu][sub_menu]['left'] = None
self.button_menu[menu][sub_menu]['right'] = None
self.button_menu[menu][sub_menu]['up'] = self.next_menu
self.button_menu[menu][sub_menu]['down'] = self.prev_menu
menu = 1
sub_menu = False
self.button_menu[menu] = {}
self.button_menu[menu][sub_menu] = {}
self.button_menu[menu][sub_menu]['msg'] = self.disp_door_times
self.button_menu[menu][sub_menu]['select'] = self.enter_submenu
self.button_menu[menu][sub_menu]['left'] = None
self.button_menu[menu][sub_menu]['right'] = None
self.button_menu[menu][sub_menu]['up'] = self.next_menu
self.button_menu[menu][sub_menu]['down'] = self.prev_menu
menu = 1
sub_menu = True
self.button_menu[menu][sub_menu] = {}
self.button_menu[menu][sub_menu]['msg'] = self.disp_light_times
self.button_menu[menu][sub_menu]['select'] = self.exit_submenu
self.button_menu[menu][sub_menu]['left'] = None
self.button_menu[menu][sub_menu]['right'] = None
self.button_menu[menu][sub_menu]['up'] = self.next_menu
self.button_menu[menu][sub_menu]['down'] = self.prev_menu
menu = 2
sub_menu = False
self.button_menu[menu] = {}
self.button_menu[menu][sub_menu] = {}
self.button_menu[menu][sub_menu]['msg'] = self.disp_door_override
self.button_menu[menu][sub_menu]['select'] = self.enter_submenu
self.button_menu[menu][sub_menu]['left'] = self.cancel_door_override
self.button_menu[menu][sub_menu]['right'] = self.cancel_door_override
self.button_menu[menu][sub_menu]['up'] = self.next_menu
self.button_menu[menu][sub_menu]['down'] = self.prev_menu
menu = 2
sub_menu = True
self.button_menu[menu][sub_menu] = {}
self.button_menu[menu][sub_menu]['msg'] = 'Override door\nUD:op/cls,LR:stp'
self.button_menu[menu][sub_menu]['select'] = self.exit_submenu
self.button_menu[menu][sub_menu]['left'] = self.door_stop
self.button_menu[menu][sub_menu]['right'] = self.door_stop
self.button_menu[menu][sub_menu]['up'] = self.override_door_raise
self.button_menu[menu][sub_menu]['down'] = self.override_door_lower
menu = 3
sub_menu = False
self.button_menu[menu] = {}
self.button_menu[menu][sub_menu] = {}
self.button_menu[menu][sub_menu]['msg'] = self.disp_light_override
self.button_menu[menu][sub_menu]['select'] = self.enter_submenu
self.button_menu[menu][sub_menu]['left'] = self.cancel_light_override
self.button_menu[menu][sub_menu]['right'] = self.cancel_light_override
self.button_menu[menu][sub_menu]['up'] = self.next_menu
self.button_menu[menu][sub_menu]['down'] = self.prev_menu
menu = 3
sub_menu = True
self.button_menu[menu][sub_menu] = {}
self.button_menu[menu][sub_menu]['msg'] = 'Override light\nUD:on/off'
self.button_menu[menu][sub_menu]['select'] = self.exit_submenu
self.button_menu[menu][sub_menu]['left'] = None
self.button_menu[menu][sub_menu]['right'] = None
self.button_menu[menu][sub_menu]['up'] = self.override_light_on
self.button_menu[menu][sub_menu]['down'] = self.override_light_off
menu = 4
sub_menu = False
self.button_menu[menu] = {}
self.button_menu[menu][sub_menu] = {}
self.button_menu[menu][sub_menu]['msg'] = self.disp_sensor_state
self.button_menu[menu][sub_menu]['select'] = None
self.button_menu[menu][sub_menu]['left'] = None
self.button_menu[menu][sub_menu]['right'] = None
self.button_menu[menu][sub_menu]['up'] = self.next_menu
self.button_menu[menu][sub_menu]['down'] = self.prev_menu
def check_buttons(self):
if self.lcd.left_button:
self.do_button('left')
elif self.lcd.up_button:
self.do_button('up')
elif self.lcd.down_button:
self.do_button('down')
elif self.lcd.right_button:
self.do_button('right')
elif self.lcd.select_button:
self.do_button('select')
def enter_submenu(self):
self.in_sub_menu = True
def exit_submenu(self):
self.in_sub_menu = False
def do_button(self,button):
time.sleep(0.1)
if self.button_menu[self.cur_menu][self.in_sub_menu][button] != None:
self.display_off_time = self.cur_time + dt.timedelta(seconds=settings.screen_on_time)
if not self.display_is_on:
self.display_on()
self.button_menu[self.cur_menu][self.in_sub_menu][button]()
self.display_message = self.button_menu[self.cur_menu][self.in_sub_menu]['msg']
def cancel_error(self):
self.in_sub_menu = False
self.cur_menu = 1
self.error_state = False
def update_display(self):
if self.display_is_on:
if type(self.display_message) == str:
self.msg = self.display_message
else:
self.msg = self.display_message()
if self.prev_display_message != self.msg:
self.lcd.clear()
self.lcd.message = self.msg
self.prev_display_message = self.msg
def override_door_raise(self):
self.print_state_trigger = self.cur_time - dt.timedelta(seconds=1)
self.door_state_override = True
self.door_raise()
def override_door_lower(self):
self.print_state_trigger = self.cur_time - dt.timedelta(seconds=1)
self.door_state_override = True
self.door_lower()
def cancel_door_override(self):
self.print_state_trigger = self.cur_time - dt.timedelta(seconds=1)
self.door_state_override = False
def door_stop(self):
GPIO.output(self.pins['door_raise'], GPIO.LOW)
GPIO.output(self.pins['door_lower'], GPIO.LOW)
self.door_is_opening = False
self.door_is_closing = False
self.door_move_end_time = self.cur_time+self.long_time
def door_raise(self):
GPIO.output(self.pins['door_lower'], GPIO.LOW)
time.sleep(0.25)
GPIO.output(self.pins['door_raise'], GPIO.HIGH)
self.door_is_opening = True
self.door_is_closing = False
self.door_move_end_time = self.cur_time+self.door_travel_time
def door_lower(self):
GPIO.output(self.pins['door_raise'], GPIO.LOW)
time.sleep(0.25)
GPIO.output(self.pins['door_lower'], GPIO.HIGH)
self.door_is_opening = False
self.door_is_closing = True
self.door_move_end_time = self.cur_time+self.door_travel_time
def override_light_on(self):
self.light_state_override = True
self.light_on()
def override_light_off(self):
self.light_state_override = True
self.light_off()
def cancel_light_override(self):
self.light_state_override = False
def light_on(self):
GPIO.output(self.pins['light'], GPIO.HIGH)
self.light_is_on = True
def light_off(self):
GPIO.output(self.pins['light'], GPIO.LOW)
self.light_is_on = False
def next_menu(self):
self.in_sub_menu = False
self.cur_menu = max(0,(self.cur_menu+1))
self.cur_menu %= (max(self.button_menu.keys())+1)
def prev_menu(self):
self.in_sub_menu = False
self.cur_menu -= 1
if self.cur_menu < 0:
self.cur_menu = max(self.button_menu.keys())
def disp_error_msg(self):
return self.error_msg
def disp_door_override(self):
if self.door_state_override:
msg = 'DOOR OVERRIDDEN\nL/R to cancel'
else:
msg = 'Door Override\nSel => manual'
return msg
def disp_light_override(self):
if self.light_state_override:
msg = 'LIGHT OVERRIDDEN\nL/R to cancel'
else:
msg = 'Light Override\nSel => manual'
return msg
def disp_override(self):
if self.door_state_override:
door_state = 'T'
else:
door_state = 'F'
if self.light_state_override:
light_state = 'T'
else:
light_state = 'F'
msg = 'Override Door: {}\n Light: {}'.format(door_state,light_state)
return msg
def disp_sensor_state(self):
if self.door_is_closed:
DC = 'cl'
else:
DC = 'op'
if self.door_is_open:
DO = 'cl'
else:
DO = 'op'
msg = 'Sensor state\nDC:{} DO:{}'.format(DC,DO)
return msg
def disp_current_time(self):
string,parts = self.get_datetime_string(self.cur_time)
msg = 'Current Time:\n{}'.format(string)
return msg
def disp_door_times(self):
string,parts = self.get_datetime_string(self.sunrise)
month,day,hour,minute,second = parts
l1 = 'Door=>Open {}:{}'.format(hour,minute)
string,parts = self.get_datetime_string(self.close_time)
month,day,hour,minute,second = parts
l2 = ' Close {}:{}'.format(hour,minute)
msg = '{}\n{}'.format(l1,l2)
return msg
def disp_light_times(self):
string,parts = self.get_datetime_string(self.sunrise)
month,day,hour,minute,second = parts
l1 = 'Light=>On {}:{}'.format(hour,minute)
string,parts = self.get_datetime_string(self.sunset)
month,day,hour,minute,second = parts
l2 = ' Off {}:{}'.format(hour,minute)
msg = '{}\n{}'.format(l1,l2)
return msg
def get_datetime_string(self,dto):
dto = dto.replace(tzinfo=dt.timezone.utc).astimezone(tz=None)
month = str(dto.month).zfill(2)
day = str(dto.day).zfill(2)
hour = str(dto.hour).zfill(2)
minute = str(dto.minute).zfill(2)
second = str(dto.second).zfill(2)
string = '{}-{} {}:{}'.format(month,day,hour,minute)
parts = (month,day,hour,minute,second)
return string,parts
def check_times(self):
self.door_open_time = (self.cur_time>self.sunrise) & (self.cur_time<self.close_time)
self.light_on_time = (self.cur_time>self.sunrise) & (self.cur_time<self.sunset)
self.check_send_notification_time()
self.check_display_time()
self.check_for_new_day()
self.check_door_move_time()
def check_send_notification_time(self):
if (self.cur_time > self.send_next_message_time) and (len(self.notification_list)>0):
self.send_next_message_time = self.cur_time + dt.timedelta(seconds=1.5)
self.send_next_notification()
def check_display_time(self):
self.display_time_exceeded = self.cur_time>self.display_off_time
def check_for_new_day(self):
string,parts = self.get_datetime_string(self.cur_time)
month,day,hour,minute,second = parts
if self.cur_day != day:
if settings.restart_daily:
restart()
else:
self.cur_day = day
self.get_sunrise_sunset()
def check_door_move_time(self):
if self.cur_time > self.door_move_end_time:
string,parts = self.get_datetime_string(self.cur_time)
if self.door_is_closing:
msg = 'Chicken Door Malfunction:\n Door Didn\'t close \n time: {}'.format(string)
self.error_msg = 'ERR: clse failed\nSelect ==> clear'
elif self.door_is_opening:
msg = 'Chicken Door Malfunction:\n Door Didn\'t open \n time: {}'.format(string)
self.error_msg = 'ERR: open failed\nSelect ==> clear'
else:
msg = 'Chicken Door Malfunction:\n Not sure what the problem is \n time: {}'.format(string)
self.error_msg = 'ERR: unk failure\nSelect ==> clear'
self.error_state = True
self.cur_menu = -3
self.in_sub_menu = False
self.door_stop()
self.queue_notification(msg)
def check_display_status(self):
if self.display_is_on:
if self.display_time_exceeded:
if self.door_state_override or self.light_state_override:
self.cur_menu = -2
self.display_message = self.disp_override()
self.in_sub_menu = False
self.update_display()
else:
self.display_off()
else:
self.update_display()
def init_display(self):
lcd_columns = 16
lcd_rows = 2
i2c = busio.I2C(board.SCL, board.SDA)
self.lcd = character_lcd.Character_LCD_RGB_I2C(i2c, lcd_columns, lcd_rows)
self.display_on()
self.display_off_time = self.cur_time + dt.timedelta(seconds=10)
# self.display_message = 'Welcome to the\nJungle!'
self.display_message = 'HI! Starting the\nstream'
self.prev_display_message = 'none'
def display_on(self):
self.display_is_on = True
self.lcd.color = [100, 0, 0]
def display_off(self):
self.display_is_on = False
self.cur_menu = -1
self.lcd.clear()
self.display_message = 'None'
self.msg = 'None'
self.in_sub_menu = False
self.lcd.color = [0, 0, 0]
def init_flags(self):
self.long_time = dt.timedelta(days=365*100)
string,parts = self.get_datetime_string(self.cur_time)
month,day,hour,minute,second = parts
self.cur_day = day
self.light_is_on = None
# self.door_is_open = False
# self.door_is_closed = False
self.door_is_opening = False
self.door_is_closing = False
self.door_travel_stop_time = self.cur_time+self.long_time
self.door_state_override = False
self.light_state_override = False
self.new_day = False
self.door_move_end_time = self.cur_time+self.long_time
self.cur_menu = -1
self.in_sub_menu = False
self.door_travel_time = dt.timedelta(seconds = settings.expected_door_travel_time)
self.notification_list = []
self.send_next_message_time = self.cur_time
self.error_state = False
self.msg = 'None'
def init_pins(self):
outputs = [13,20,21]
for output in outputs:
GPIO.setup(output,GPIO.OUT)
GPIO.output(output, GPIO.LOW)
inputs = [16,19]
for inpt in inputs:
GPIO.setup(inpt,GPIO.IN, pull_up_down=GPIO.PUD_UP)
self.pins = {
'light':13,
'door_raise':20,
'door_lower':21,
'door_closed':16,
'door_open':19}
def get_cur_time(self):
self.cur_time = dt.datetime.now(dt.timezone.utc)
def get_sunrise_sunset(self):
sun = Sun(settings.latitude, settings.longitude)
self.sunrise = sun.get_sunrise_time(self.cur_time)
self.sunset = sun.get_sunset_time(self.cur_time)
self.close_time = self.sunset + dt.timedelta(minutes=settings.wait_after_sunset)
self.print_sun_times()
def queue_notification(self,message):
for address in settings.phone_numbers:
self.notification_list.append((message,address))
def send_next_notification(self):
message,address = self.notification_list.pop(0)
try:
send_message_twilio(address,message)
except Exception as e:
with open(self.logfile_name, 'a') as file:
file.write('\nFAILED TO SEND TEXT NOTIFICATION. \nADDRESS:\n{}\nMESSAGE:\n{}\n\nTRACEBACK:\n'.format(address,message))
traceback.print_exc(limit=None, file=file, chain=True)
# msg= EmailMessage()
# my_address = settings.email
# app_generated_password = <PASSWORD> # gmail generated password
# msg["From"]= 'coop controller' #sender address
# msg["To"] = address #reciver address
# msg.set_content(message) #message body
# with smtplib.SMTP_SSL("smtp.gmail.com", 465) as smtp:
# smtp.login(my_address,app_generated_password) #login gmail account
# print("sending mail")
# smtp.send_message(msg) #send message
# print("mail has sent")
controller = coop_controller()
# controller.run()
try:
controller.run()
except Exception as e:
err_msg = str(e)
with open(controller.logfile_name, 'a') as file:
file.write('\nCONTROLLER CRASH TRACEBACK\n')
traceback.print_exc(limit=None, file=file, chain=True)
send_crash_notification(controller.logfile_name)
try:
controller.print_state('CONTROLLER CRASHED WITH:\n ERROR MESSAGE ==> {}\n\nState:\n'.format(err_msg))
except:
donothing = 1
restart()
```
|
{
"source": "jeremym-cfd/glmtools",
"score": 2
}
|
#### File: examples/grid/grid_sample_data.py
```python
import os, tempfile
import numpy as np
import xarray as xr
from numpy.testing import assert_equal
from glmtools.io.glm import GLMDataset
from glmtools.test.common import get_sample_data_path
sample_path = get_sample_data_path()
samples = [
"OR_GLM-L2-LCFA_G16_s20181830433000_e20181830433200_c20181830433231.nc",
"OR_GLM-L2-LCFA_G16_s20181830433200_e20181830433400_c20181830433424.nc",
"OR_GLM-L2-LCFA_G16_s20181830433400_e20181830434000_c20181830434029.nc",
]
samples = [os.path.join(sample_path, s) for s in samples]
def total_energy_in_L2(files, lon_range = (-102.5, -99.5),
lat_range = (31, 35)):
# Gather up all flashes over West Texas, which is all flashes in the
# mesoscale domain centered on that location at the time of the sample fiels
energy = 0.0
for sf in files:
glm = GLMDataset(sf)
flashes_subset = glm.subset_flashes(lon_range = lon_range,
lat_range = lat_range)
energy += flashes_subset.event_energy.sum().data
return energy
from make_GLM_grids import create_parser, grid_setup
def compare_to_sample_grids(tmpdirname, resulting_date_path, output_sizes):
for entry in os.scandir(os.path.join(tmpdirname, *resulting_date_path)):
target = output_sizes[entry.name]
actual = entry.stat().st_size
percent = 1
assert np.abs(target-actual) < int(target*percent/100)
# print(entry.name, entry.stat().st_size)
def grid_sample_data(grid_spec, output_sizes, dirname=None, save=None):
"""
grid_spec are all command line arguments except for:
-o dir_name
input data filenames
output_sizes is a dictionary mapping from output filename to the expected
size of the file, which may vary by up to 20%
"""
resulting_date_path = ('2018', 'Jul', '02')
with tempfile.TemporaryDirectory() as tmpdir:
tmpdirname = os.path.join(tmpdir, dirname)
cmd_args = ["-o", tmpdirname] + grid_spec + samples
parser = create_parser()
args = parser.parse_args(cmd_args)
from multiprocessing import freeze_support
freeze_support()
gridder, glm_filenames, start_time, end_time, grid_kwargs = grid_setup(args)
output_return = gridder(glm_filenames, start_time, end_time, **grid_kwargs)
print(output_return)
if save:
from shutil import copytree
copytree(tmpdirname, save)
# print("Output file sizes")
for entry in os.scandir(os.path.join(tmpdirname, *resulting_date_path)):
# File size should be close to what we expect, with some platform
# differences due to OS, compression, etc.
test_file = entry.name
is_unified_type = ('OR_GLM-L2-GLMC-M3_G16_s20181830433000' in test_file)
if is_unified_type:
valid_file_key = 'OR_GLM-L2-GLMC-M3_G16_s20181830433000_e20181830434000_c20182551446.nc'
else:
valid_file_key = test_file
# target = output_sizes[valid_file_key]
# actual = entry.stat().st_size
# percent = 1
# assert np.abs(target-actual) < int(target*percent/100)
# Now compare the contents directly
valid_file = os.path.join(sample_path, dirname,
*resulting_date_path, valid_file_key)
valid = xr.open_dataset(valid_file)
check = xr.open_dataset(entry.path)
xr.testing.assert_allclose(valid, check)
if (('total_energy' in output_sizes) &
('total_energy' in test_file)):
np.testing.assert_allclose(check.total_energy.sum().data,
output_sizes['total_energy'],
rtol=1e-6)
# An abundance of caution: validate the valid data, too!
np.testing.assert_allclose(valid.total_energy.sum().data,
output_sizes['total_energy'],
rtol=1e-6)
# for valid_var, valid_data in valid.variables.items():
# check_data = check.variables[valid_var]
# assert np.testing.all_close(valid_data.data[:], check_data.data[:])
# print(entry.name, entry.stat().st_size)
def test_fixed_grid_conus():
""" This is equivalent to running the following bash script, which produces
GLM grids with split events on the fixed grid at 2 km resolution in a
CONUS sector.
python make_GLM_grids.py -o /path/to/output/
--fixed_grid --split_events \
--goes_position east --goes_sector conus \
--dx=2.0 --dy=2.0 \
OR_GLM-L2-LCFA_G16_s20181830433000_e20181830433200_c20181830433231.nc \
OR_GLM-L2-LCFA_G16_s20181830433200_e20181830433400_c20181830433424.nc \
OR_GLM-L2-LCFA_G16_s20181830433400_e20181830434000_c20181830434029.nc \
start and end are sdetermined from the filenames
ctr_lon and ctr_lat are implicit in the CONUS grid
"""
grid_spec = ["--fixed_grid", "--split_events",
"--goes_position", "east", "--goes_sector", "conus",
"--dx=2.0", "--dy=2.0",
# "--ctr_lon=0.0", "--ctr_lat=0.0",
"--ellipse=0",
]
output_sizes = {
'GLM-00-00_20180702_043300_60_1src_056urad-dx_flash_extent.nc':123145,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_flash_centroid.nc':59715,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_footprint.nc':102662,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_group_area.nc':113400,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_group_extent.nc':128665,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_group_centroid.nc':63054,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_event.nc':128538,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_total_energy.nc':133139,
'OR_GLM-L2-GLMC-M3_G16_s20181830433000_e20181830434000_c20182551446.nc':577463,
}
grid_sample_data(grid_spec, output_sizes, dirname='conus'
, save='/data/tmp/glmtest/conus')
def test_fixed_grid_arbitrary():
""" This is equivalent to running the following bash script, which produces
GLM grids with split events on the fixed grid at 2 km resolution in a
sector defined by a center lon/lat point and a width and height.
python make_GLM_grids.py -o /path/to/output/
--fixed_grid --split_events \
--goes_position east --goes_sector conus \
--dx=2.0 --dy=2.0 "--width=1000.0", "--height=500.0" \
--ctr_lon=0.0 --ctr_lat=0.0 \
OR_GLM-L2-LCFA_G16_s20181830433000_e20181830433200_c20181830433231.nc \
OR_GLM-L2-LCFA_G16_s20181830433200_e20181830433400_c20181830433424.nc \
OR_GLM-L2-LCFA_G16_s20181830433400_e20181830434000_c20181830434029.nc \
start and end are skipped, since they are determined from the filenames
"""
grid_spec = ["--fixed_grid", "--split_events",
"--goes_position", "east",
"--dx=2.0", "--dy=2.0",
"--ctr_lat=33.5", "--ctr_lon=-101.5",
"--width=1000.0", "--height=500.0",
"--ellipse=0",
]
output_sizes = {
'GLM-00-00_20180702_043300_60_1src_056urad-dx_flash_extent.nc':24775,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_flash_centroid.nc':19576,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_footprint.nc':22235,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_group_area.nc':23941,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_group_extent.nc':25421,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_group_centroid.nc':19838,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_event.nc':25294,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_total_energy.nc':25820,
'OR_GLM-L2-GLMC-M3_G16_s20181830433000_e20181830434000_c20182551446.nc':95911,
}
grid_sample_data(grid_spec, output_sizes, dirname='customsize'
, save='/data/tmp/glmtest/customsize')
def test_fixed_grid_meso():
""" This is equivalent to running the following bash script, which produces
GLM grids with split events on the fixed grid at 2 km resolution in a
sector defined by a center lon/lat point and a width and height given by
using the mesoscale lookup.
python make_GLM_grids.py -o /path/to/output/
--fixed_grid --split_events \
--goes_position east --goes_sector meso \
--dx=2.0 --dy=2.0 \
--ctr_lon=-101.5 --ctr_lat=33.5 \
OR_GLM-L2-LCFA_G16_s20181830433000_e20181830433200_c20181830433231.nc \
OR_GLM-L2-LCFA_G16_s20181830433200_e20181830433400_c20181830433424.nc \
OR_GLM-L2-LCFA_G16_s20181830433400_e20181830434000_c20181830434029.nc \
start and end are determined from the filenames.
"""
grid_spec = ["--fixed_grid", "--split_events",
"--goes_position", "east", "--goes_sector", "meso",
"--dx=2.0", "--dy=2.0",
"--ctr_lat=33.5", "--ctr_lon=-101.5",
"--ellipse=0",
]
output_sizes = {
'GLM-00-00_20180702_043300_60_1src_056urad-dx_flash_extent.nc':26370,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_flash_centroid.nc':21052,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_footprint.nc':23939,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_group_area.nc':25637,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_group_extent.nc':27053,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_group_centroid.nc':21305,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_event.nc':26926,
'GLM-00-00_20180702_043300_60_1src_056urad-dx_total_energy.nc':27501,
'OR_GLM-L2-GLMC-M3_G16_s20181830433000_e20181830434000_c20182551446.nc':101023,
'total_energy':total_energy_in_L2(samples),
}
grid_sample_data(grid_spec, output_sizes, dirname='meso'
,save='/data/tmp/glmtest/meso')
```
#### File: examples/grid/test_grid_specs.py
```python
import os, tempfile
import numpy as np
import xarray as xr
from numpy.testing import assert_equal
from glmtools.test.common import get_sample_data_path
from glmtools.grid.make_grids import subdivided_fixed_grid
from lmatools.grid.fixed import get_GOESR_grid
sample_path = get_sample_data_path()
samples = [
"OR_GLM-L2-LCFA_G16_s20181830433000_e20181830433200_c20181830433231.nc",
"OR_GLM-L2-LCFA_G16_s20181830433200_e20181830433400_c20181830433424.nc",
"OR_GLM-L2-LCFA_G16_s20181830433400_e20181830434000_c20181830434029.nc",
]
samples = [os.path.join(sample_path, s) for s in samples]
from make_GLM_grids import create_parser, grid_setup
def grid_sample_data(grid_spec, dirname=None):
"""
grid_spec are all command line arguments except for:
-o dir_name
input data filenames
generates (yields) a sequence of filenames for each created file.
"""
resulting_date_path = ('2018', 'Jul', '02')
with tempfile.TemporaryDirectory() as tmpdir:
tmpdirname = os.path.join(tmpdir, dirname)
cmd_args = ["-o", tmpdirname] + grid_spec + samples
parser = create_parser()
args = parser.parse_args(cmd_args)
from multiprocessing import freeze_support
freeze_support()
gridder, glm_filenames, start_time, end_time, grid_kwargs = grid_setup(args)
gridder(glm_filenames, start_time, end_time, **grid_kwargs)
# gridder(glm_filenames, start_time, end_time, **grid_kwargs)
for entry in os.scandir(os.path.join(tmpdirname, *resulting_date_path)):
yield entry
def check_against_pug(check, pug_spec):
""" check is an xarray dataset.
pug_spec is as returned by get_GOESR_grid
"""
x = check.x.data
y = check.y.data
nx_valid = pug_spec['pixelsEW']
ny_valid = pug_spec['pixelsNS']
nx_actual, ny_actual = check.x.shape[0], check.y.shape[0]
assert(nx_actual==nx_valid)
assert(ny_actual==ny_valid)
# A simple delta of the first and second has some numerical noise in
# it, but taking the mean eliminates it.
dx_actual = np.abs((x[1:] - x[:-1]).mean())
dy_actual = np.abs((y[1:] - y[:-1]).mean())
np.testing.assert_allclose(dx_actual, pug_spec['resolution'], rtol=1e-5)
np.testing.assert_allclose(dy_actual, pug_spec['resolution'], rtol=1e-5)
# the x, y coordinates are the center points of the pixels, so
# the span of the image (to the pixel corners) is an extra pixel
# in each direction (1/2 pixel on each edge)
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
xspan_actual = np.abs(xmax - xmin) + dx_actual
yspan_actual = np.abs(ymax - ymin) + dy_actual
np.testing.assert_allclose(xspan_actual, pug_spec['spanEW'], rtol=1e-5)
np.testing.assert_allclose(yspan_actual, pug_spec['spanNS'], rtol=1e-5)
x_center_right = xmax -((xspan_actual-dx_actual)/2.0)
y_center_right = ymax -((yspan_actual-dy_actual)/2.0)
x_center_left = xmin + ((xspan_actual-dx_actual)/2.0)
y_center_left = ymin + ((yspan_actual-dy_actual)/2.0)
np.testing.assert_allclose(x_center_right, pug_spec['centerEW'],
rtol=1e-5, atol=1e-8)
np.testing.assert_allclose(y_center_right, pug_spec['centerNS'],
rtol=1e-5, atol=1e-8)
np.testing.assert_allclose(x_center_left, pug_spec['centerEW'],
rtol=1e-5, atol=1e-8)
np.testing.assert_allclose(y_center_left, pug_spec['centerNS'],
rtol=1e-5, atol=1e-8)
def test_conus_size():
"""
Test that calling examples/grid/make_glm_grids.py creates a CONUS domain
that matches the sizes described in the GOES-R PUG.
"""
position = 'east'
view = 'conus'
resolution = '2.0km'
pug_spec = get_GOESR_grid(position=position, view=view,
resolution=resolution)
grid_spec = ["--fixed_grid", "--split_events",
"--goes_position", position, "--goes_sector", view,
"--dx={0}".format(resolution[:-2]),
"--dy={0}".format(resolution[:-2])]
for entry in grid_sample_data(grid_spec, dirname=view):
check = xr.open_dataset(entry.path)
check_against_pug(check, pug_spec)
def test_meso_size():
"""
Test that calling examples/grid/make_glm_grids.py creates a Meso domain
that matches the sizes described in the GOES-R PUG.
"""
position = 'east'
view = 'meso'
resolution = '2.0km'
pug_spec = get_GOESR_grid(position=position, view=view,
resolution=resolution)
# For the GOES-E and a ctr_lon (below) of -75 (nadir) the center
# will be 0,0
pug_spec['centerEW'] = 0.0
pug_spec['centerNS'] = 0.0
grid_spec = ["--fixed_grid", "--split_events",
"--goes_position", position, "--goes_sector", view,
"--dx={0}".format(resolution[:-2]),
"--dy={0}".format(resolution[:-2]),
"--ctr_lat=0.0", "--ctr_lon=-75.0",
]
for entry in grid_sample_data(grid_spec, dirname=view):
check = xr.open_dataset(entry.path)
check_against_pug(check, pug_spec)
# For a mesoscale domain centered on the satellite subpoint the
# east and west deltas should be symmetric, and the center should
# be directly below the satellite.
np.testing.assert_allclose(check.x[-1], -check.x[0], rtol=1e-5)
np.testing.assert_allclose(check.y[-1], -check.y[0], rtol=1e-5)
```
#### File: examples/plot/plots.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import LogFormatter, LogLocator
import cartopy
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.io.shapereader import Reader as ShapeReader
display_params = {}
from matplotlib.cm import get_cmap
from matplotlib.colors import LogNorm, Normalize
glm_cmap = get_cmap('viridis')
# glm_cmap._init()
# alphas = np.linspace(1.0, 1.0, glm_cmap.N+3)
# glm_cmap._lut[:,-1] = alphas
# glm_cmap._lut[0,-1] = 0.0
display_params['flash_centroid_density'] = {
'product_label':"GOES-16 GLM Flash Centroid Density (count)",
'glm_norm':LogNorm(vmin=1, vmax=10),
'file_tag':'flash_centroid'
}
display_params['flash_extent_density'] = {
'product_label':"GOES-16 GLM Flash Extent Density (count)",
'glm_norm':LogNorm(vmin=1, vmax=50),
'file_tag':'flash_extent',
}
display_params['group_centroid_density'] = {
'product_label':"GOES-16 GLM Group Centroid Density (count)",
'glm_norm':LogNorm(vmin=1, vmax=10),
'file_tag':'group_centroid'
}
display_params['group_extent_density'] = {
'product_label':"GOES-16 GLM Group Extent Density (count)",
'glm_norm':LogNorm(vmin=1, vmax=500),
'file_tag':'group_extent',
}
display_params['event_density']=display_params['group_extent_density']
display_params['total_energy'] = {
'product_label':"GOES-16 GLM Total Energy (J)",
'glm_norm':LogNorm(vmin=1e-17, vmax=1e-12),
'file_tag':'total_energy'
}
display_params[ 'average_flash_area'] = {
'product_label':"GOES-16 GLM Average Flash Area (km$^2$)",
'glm_norm':LogNorm(vmin=50, vmax=.5e4),
'file_tag':'flash_area'
}
display_params['average_group_area'] = {
'product_label':"GOES-16 GLM Average Group Area (km$^2$)",
'glm_norm':LogNorm(vmin=50, vmax=.5e4),
'file_tag':'group_area'
}
label_string = """
{1} (max {0:3.0f})"""
def set_shared_geoaxes(fig):
mapax =[axi for axi in fig.axes if
type(axi)==cartopy.mpl.geoaxes.GeoAxesSubplot]
# for axi in fig.mapax[1:]:
mapax[0].get_shared_x_axes().join(*mapax)
mapax[0].get_shared_y_axes().join(*mapax)
return mapax
def plot_glm(fig, glm_grids, tidx, fields, subplots=(2,3),
axes_facecolor = (0., 0., 0.), map_color = (.8, .8, .8)):
fig.clf()
glmx = glm_grids.x.data[:]
glmy = glm_grids.y.data[:]
proj_var = glm_grids['goes_imager_projection']
x = glmx * proj_var.perspective_point_height
y = glmy * proj_var.perspective_point_height
glm_xlim = x.min(), x.max()
glm_ylim = y.min(), y.max()
country_boundaries = cfeature.NaturalEarthFeature(category='cultural',
name='admin_0_countries',
scale='50m', facecolor='none')
state_boundaries = cfeature.NaturalEarthFeature(category='cultural',
name='admin_1_states_provinces_lakes',
scale='50m', facecolor='none')
globe = ccrs.Globe(semimajor_axis=proj_var.semi_major_axis, semiminor_axis=proj_var.semi_minor_axis)
proj = ccrs.Geostationary(central_longitude=proj_var.longitude_of_projection_origin,
satellite_height=proj_var.perspective_point_height, globe=globe)
cbars=[]
for fi, f in enumerate(fields):
glm_norm = display_params[f]['glm_norm']
product_label = display_params[f]['product_label']
file_tag = display_params[f]['file_tag']
ax = fig.add_subplot(subplots[0], subplots[1], fi+1, projection=proj)
ax.background_patch.set_facecolor(axes_facecolor)
ax.coastlines('10m', color=map_color)
ax.add_feature(state_boundaries, edgecolor=map_color, linewidth=0.5)
ax.add_feature(country_boundaries, edgecolor=map_color, linewidth=1.0)
glm = glm_grids[f].sel(time=tidx).data
# Use a masked array instead of messing with colormap to get transparency
# glm = np.ma.array(glm, mask=(np.isnan(glm)))
# glm_alpha = .5 + glm_norm(glm)*0.5
glm[np.isnan(glm)] = 0
glm_img = ax.imshow(glm, extent=(x.min(), x.max(),
y.min(), y.max()), origin='upper',
# transform = ccrs.PlateCarree(),
cmap=glm_cmap, interpolation='nearest',
norm=glm_norm)#, alpha=0.8)
# Match the GLM grid limits, in fixed grid space
ax.set_xlim(glm_xlim)
ax.set_ylim(glm_ylim)
# Set a lat/lon box directly
# ax.set_extent([-103, -99.5, 32.0, 38.0])
# limits = ax.axis()
limits = [0.,1.,0.,1.]
ax.text(limits[0]+.02*(limits[1]-limits[0]), limits[2]+.02*(limits[3]-limits[2]),
tidx.isoformat().replace('T', ' ')+' UTC'+
label_string.format(glm.max(), product_label),
# transform = proj,
transform = ax.transAxes,
color=map_color)
cbars.append((ax,glm_img))
# Make the colorbar position match the height of the Cartopy axes
# Have to draw to force layout so that the ax position is correct
fig.tight_layout()
fig.canvas.draw()
for ax, glm_img in cbars:
posn = ax.get_position()
height_scale = .025*subplots[0]
top_edge = [posn.x0, posn.y0+posn.height*(1.0-height_scale),
posn.width, posn.height*height_scale]
cbar_ax = fig.add_axes(top_edge)
cbar = plt.colorbar(glm_img, orientation='horizontal', cax=cbar_ax,
format=LogFormatter(base=2), ticks=LogLocator(base=2))
cbar.outline.set_edgecolor(axes_facecolor)
ax.outline_patch.set_edgecolor(axes_facecolor)
cbar.ax.tick_params(direction='in', color=axes_facecolor, which='both',
pad=-14, labelsize=10, labelcolor=axes_facecolor)
mapax = set_shared_geoaxes(fig)
return mapax, [ax for (ax,img) in cbars]
```
#### File: glmtools/io/convert_lcfa.py
```python
import pandas as pd
import xarray as xr
"""
Example of data format:
D Flash #0 Start Time:543398228.069 End Time:543398228.403 Centroid Lat:-4.684 Centroid Lon:-47.148 Energy:1.838e-13 Footprint:715.9 Child_Count:8
D Group #0 Child #0 Start Time:543398228.069 End Time:543398228.069 Centroid Lat:-4.661 Centroid Lon:-47.120 Energy:9.202e-15 Footprint:179.2 Parent_ID:0 Child_Count:2
Event #0 Child #0 Time:543398228.069 X_Pixel:1197 Y_Pixel:748 Lat:-4.698 Lon:-47.116 Energy:4.811e-15 Parent_ID:0
Event #1 Child #1 Time:543398228.069 X_Pixel:1197 Y_Pixel:747 Lat:-4.621 Lon:-47.124 Energy:4.391e-15 Parent_ID:0
D Group #2 Child #1 Start Time:543398228.083 End Time:543398228.083 Centroid Lat:-4.669 Centroid Lon:-47.144 Energy:1.792e-14 Footprint:268.6 Parent_ID:0 Child_Count:3
Event #4 Child #0 Time:543398228.083 X_Pixel:1197 Y_Pixel:748 Lat:-4.698 Lon:-47.116 Energy:6.440e-15 Parent_ID:2
Event #5 Child #1 Time:543398228.083 X_Pixel:1197 Y_Pixel:747 Lat:-4.621 Lon:-47.124 Energy:6.664e-15 Parent_ID:2
Event #6 Child #2 Time:543398228.083 X_Pixel:1196 Y_Pixel:748 Lat:-4.697 Lon:-47.210 Energy:4.817e-15 Parent_ID:2
"""
flash_cols = (2, 4, 6, 8, 10, 11, 12, 13)
flash_col_names = ('flash_id',
'flash_time_offset_of_first_event', 'flash_time_offset_of_last_event',
'flash_lat', 'flash_lon', 'flash_energy', 'flash_area',
'flash_child_group_count')
group_cols = (2, 6, 10, 12, 13, 14, 15, 16)
group_col_names = ('group_id',
'group_time_offset',
'group_lat', 'group_lon', 'group_energy', 'group_area',
'group_parent_flash_id', 'group_child_event_count')
event_cols = (1, 4, 5, 6, 7, 8, 9, 10)
event_col_names = ('event_id',
'event_time_offset', 'x_pixel', 'y_pixel',
'event_lat', 'event_lon', 'event_energy',
'event_parent_group_id',)
# Columns treated as coordinate data in the NetCDF file
nc_coords = [
'event_id',
'event_time_offset',
'event_lat',
'event_lon',
'event_parent_group_id',
'group_id',
'group_time_offset',
'group_lat',
'group_lon',
'group_parent_flash_id',
'flash_id',
'flash_time_offset_of_first_event',
'flash_time_offset_of_last_event',
'flash_lat',
'flash_lon',
]
def gen_flash_data(filename):
"""
0 1 2 3 4 5 6 7 8 9 10 11 12 13
D Flash #0 Start Time:543398228.069 End Time:543398228.403 Centroid Lat:-4.684 Centroid Lon:-47.148 Energy:1.838e-13 Footprint:715.9 Child_Count:8
"""
with open(filename, 'r') as f:
for line in f:
if line.startswith("D Flash"):
cols = line.split()
out = (int(cols[2][1:]),
float(cols[4].split(':')[1]),
float(cols[6].split(':')[1]),
float(cols[8].split(':')[1]),
float(cols[10].split(':')[1]),
float(cols[11].split(':')[1]),
float(cols[12].split(':')[1]),
int(cols[13].split(':')[1]),
)
yield out
def gen_group_data(filename):
"""
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
D Group #0 Child #0 Start Time:543398228.069 End Time:543398228.069 Centroid Lat:-4.661 Centroid Lon:-47.120 Energy:9.202e-15 Footprint:179.2 Parent_ID:0 Child_Count:2
"""
with open(filename, 'r') as f:
for line in f:
if line.startswith(" D Group"):
cols = line.split()
out = (int(cols[2][1:]),
float(cols[6].split(':')[1]),
float(cols[10].split(':')[1]),
float(cols[12].split(':')[1]),
float(cols[13].split(':')[1]),
float(cols[14].split(':')[1]),
int(cols[15].split(':')[1]),
int(cols[16].split(':')[1]),
)
yield out
def gen_event_data(filename):
"""
0 1 2 3 4 5 6 7 8 9 10
Event #0 Child #0 Time:543398228.069 X_Pixel:1197 Y_Pixel:748 Lat:-4.698 Lon:-47.116 Energy:4.811e-15 Parent_ID:0
"""
with open(filename, 'r') as f:
for line in f:
if line.startswith(" Event"):
cols = line.split()
out = (int(cols[1][1:]),
float(cols[4].split(':')[1]),
int(cols[5].split(':')[1]),
int(cols[6].split(':')[1]),
float(cols[7].split(':')[1]),
float(cols[8].split(':')[1]),
float(cols[9].split(':')[1]),
int(cols[10].split(':')[1]),
)
yield out
def parse_LCFA_ASCII(filename):
"""
Read ASCII Flash, Group, Event data in filename and return
pandas dataframes (flashes, groups, events)
"""
# Right now this reads through the data file three times. Could use a coroutine approach to read file once
# if performance starts to take a hit.
flashes = pd.DataFrame.from_records(gen_flash_data(filename), columns=flash_col_names)#, index='flash_id')
groups = pd.DataFrame.from_records(gen_group_data(filename), columns=group_col_names)#, index='group_id')
events = pd.DataFrame.from_records(gen_event_data(filename), columns=event_col_names)#, index='event_id')
# Dimension names used in the NetCDF file
flashes.index.name = 'number_of_flashes'
groups.index.name = 'number_of_groups'
events.index.name = 'number_of_events'
return flashes, groups, events
def merge_dataframes_to_xarray(flashes, groups, events):
evx = events.to_xarray()
flx = flashes.to_xarray()
grx = groups.to_xarray()
egf = evx.merge(grx).merge(flx)
egf.set_coords(nc_coords, inplace=True)
return egf
def main(infile, outfile):
fl, gr, ev = parse_LCFA_ASCII(infile)
egf = merge_dataframes_to_xarray(fl, gr, ev)
egf.to_netcdf(outfile)
if __name__ == '__main__':
import sys
if len(sys.argv) < 3:
print("Usage: python convert_lcfa.py input_filename output_filename")
else:
infile = sys.argv[1]
outfile = sys.argv[2]
main(infile, outfile)
```
#### File: glmtools/io/imagery.py
```python
import os, itertools
from datetime import datetime, timedelta
import numpy as np
import xarray as xr
import pandas as pd
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
"""
SCALING of fields
---Flash extent density---
For 330 ms time separation critera, the max flash rate per minute in any pixel
is 181.8/min, or a max hourly value of 10860.
10860/65535 = 1/6
Another way to go: for an 8x8 km GLM pixel oversampled to 2x2 km,
the fractional area coverage is 1/16. This works out to 4095 flashes per minute
max, which seems plenty safe and covers the absolute max accumulation in 20 min.
2 bytes unsigned
scale=1/16=0.0625
offset=0
---Group extent density---
Have a max in relampago case of 1000/min (max is 30000/min for a 500 fps CCD),
so over 20 min this is 20000 events.
That is 5x the max we assume above in FED, which is equivalent to 5 groups/flash
assuming a full flash rate. And that full flash rate is unlikely to be reached!
4*1/16 = 1/4 would preserve a bit of antialiasing and multiplies nicely.
Set scale to 1/4 = 0.25
Offset = 0
---Flash and group centroid density---
GCD will be larger than FCD, so size with GCD. The max rate for CGD will likely
be the same as the GED - a maximum at the center of the cell. We can get away
with two bytes, scale and offset of 1 because there is no antialiasing.
scale=1
offset=0
---Average flash area, group area---
WMO record distance flash distance is 320 km, which squared is ~100,000 km^2.
Minimum observable flash area is 64 km^2, which divided by 16 (for smallest
fractional pixel coverage at 2 km) is 4 km^2.
A 2 km^2 scale factor gets us 131070 max in two bytes.
However, since 10000 km^2 is the max flash area in the L2 data, it will never
be exceeded, and so we set scale to 1 and offset to 0, for a max 65535.
2 bytes, unsigned
scale_factor = 1 km^2
offset = 0
---Total optical energy---
2 bytes, linear
scale_factor=1.52597e-15 J, add_offset=0 (range 0 to 1.0e-10 J)
1.0e-10 is also the max event_energy value in the L2 files, so we can’t have
more than one event that hits that level. However, I think this should be safe.
The fact that the flash_energy has the same scale factor as event_energy in L2
suggests that there is margin in the ceiling of 1e-10 J. And as Scott's stats
showed, pixels with total energy in excess of even 10e-12 J are quite rare.
"""
glm_scaling = {
'flash_extent_density':{'dtype':'uint16',
'scale_factor':0.0625, 'add_offset':0.0},
'flash_centroid_density':{'dtype':'uint16',
'scale_factor':1.0, 'add_offset':0.0},
'average_flash_area':{'dtype':'uint16',
'scale_factor':10.0, 'add_offset':0.0},
'minimum_flash_area':{'dtype':'uint16',
'scale_factor':10.0, 'add_offset':0.0},
'event_density':{'dtype':'uint16',
'scale_factor':0.25, 'add_offset':0.0},
# 'standard_deviation_flash_area',
'group_extent_density':{'dtype':'uint16',
'scale_factor':0.25, 'add_offset':0.0},
'group_centroid_density':{'dtype':'uint16',
'scale_factor':1.0, 'add_offset':0.0},
'average_group_area':{'dtype':'uint16',
'scale_factor':1.0, 'add_offset':0.0},
'total_energy':{'dtype':'uint16',
'scale_factor':1.52597e-6, 'add_offset':0.0,},
}
def get_goes_imager_subpoint_vars(nadir_lon):
""" Returns two xarray DataArrays containing the nominal satellite subpoint
latitude and longitude, as netCDF float variables.
returns subpoint_lon, subpoint_lat
"""
sublat_meta = {}
sublat_descr = "nominal satellite subpoint latitude (platform latitude)"
sublat_meta['long_name'] = sublat_descr
sublat_meta['standard_name'] = 'latitude'
sublat_meta['units'] = 'degrees_north'
sublat_enc = {'_FillValue':-999.0}
sublon_meta = {}
sublon_descr = "nominal satellite subpoint longitude (platform longitude)"
sublon_meta['long_name'] = sublon_descr
sublon_meta['standard_name'] = 'longitude'
sublon_meta['units'] = 'degrees_east'
sublon_enc = {'_FillValue':-999.0}
sublat = xr.DataArray(0.0, name='nominal_satellite_subpoint_lat',
attrs=sublat_meta)
sublat.encoding = sublat_enc
sublon = xr.DataArray(nadir_lon, name='nominal_satellite_subpoint_lon',
attrs=sublon_meta)
sublon.encoding = sublon_enc
return sublon, sublat
def get_goes_imager_proj(nadir_lon):
""" Returns an xarray DataArray containing the GOES-R series
goes_imager_projection data and metadata
"""
meta = {}
meta['long_name'] = "GOES-R ABI fixed grid projection"
meta['grid_mapping_name'] = "geostationary"
meta['perspective_point_height'] = 35786023.
meta['semi_major_axis'] = 6378137.
meta['semi_minor_axis'] = 6356752.31414
meta['inverse_flattening'] = 298.2572221
meta['latitude_of_projection_origin'] = 0.0
meta['longitude_of_projection_origin'] = nadir_lon
meta['sweep_angle_axis'] = "x"
encoding = {}
encoding['dtype'] = 'i4'
var = xr.DataArray(-2147483647, attrs=meta, name='goes_imager_projection')
var.encoding=encoding
return var
def get_goes_imager_all_valid_dqf(dims, n):
""" dims is a tuple of dimension names in the same order as n, the number
of elements along each dimension
Returns dqf, an xarray.DataArray of the GLM data quality
field, in the style of the GOES-R series DQF field """
meta = {}
meta['grid_mapping'] = "goes_imager_projection"
meta['number_of_qf_values'] = np.asarray(6, dtype='i4')
meta['units'] = "1"
meta['standard_name'] = "status_flag"
meta['long_name'] = "GLM data quality flags"
meta['flag_values'] = np.asarray((0,1), dtype='i4')
meta['flag_meanings'] = "valid, invalid"
dqf = np.zeros(n, dtype='u1')
enc = {}
enc['_FillValue'] = np.asarray(255, dtype='u1')
enc['_Unsigned'] = "true"
enc['dtype'] = 'i1'
enc['zlib'] = True # compress the field
dqf_var = xr.DataArray(dqf, dims=dims, attrs=meta, name="DQF")
dqf_var.encoding = enc
return dqf_var
def get_goes_imager_fixedgrid_coords(x, y, resolution='2km at nadir',
scene_id='FULL', fill=-999.0):
""" Create variables with metadata for fixed grid coordinates as defined
for the GOES-R series of spacecraft.
Assumes that imagery are at 2 km resolution (no other options are
implemented), and applies the scale and offset values indicated in the
GOES-R PUG for the full disk scene, guaranteeing that we cover all fixed
grid coordinates.
Arguments:
x, y: 1-dimensional arrays of coordinate values
resolution: like "2km at nadir"
scene_id: 'FULL' is the only allowed argument; other values will be ignored
Returns:
x_var, y_var: xarray.DataArray objects, with type inferred from x and y.
"""
scene_id='FULL'
# Values from the GOES-R PUG. These are signed shorts (int16).
two_km_enc = {
'FULL':{'dtype':'int16', 'x':{'scale_factor': 0.000056,
'add_offset':-0.151844,
'_FillValue':-999.0},
'y':{'scale_factor':-0.000056,
'add_offset':0.151844,
'_FillValue':-999.0},
},
# The PUG has specific values for the CONUS sector, and
# given the discretization of the coords to 2 km resolution, is it necessary
# to special-case each scene so the span of the image? Right now ONLY
# GOES-EAST CONUS IS IMPLEMENTED as a special case, with scene_id='CONUS'.
# 'CONUS':{'dtype':'int16', 'x':{'scale_factor': 0.000056,
# 'add_offset':-0.101332},
# 'y':{'scale_factor':-0.000056,
# 'add_offset':0.128212},
# }
# 'MESO1', 'MESO2', 'OTHER'
}
# two_km_enc['OTHER'] = two_km_enc['MESO1']
x_meta, y_meta = {}, {}
x_enc = two_km_enc['FULL']['x']
x_enc['dtype'] = two_km_enc[scene_id]['dtype']
y_enc = two_km_enc['FULL']['y']
y_enc['dtype'] = two_km_enc[scene_id]['dtype']
x_meta['axis'] = "X"
x_meta['long_name'] = "GOES fixed grid projection x-coordinate"
x_meta['standard_name'] = 'projection_x_coordinate'
x_meta['units'] = "rad"
y_meta['axis'] = "Y"
y_meta['long_name'] = "GOES fixed grid projection y-coordinate"
y_meta['standard_name'] = 'projection_y_coordinate'
y_meta['units'] = "rad"
x_coord = xr.DataArray(x, name='x', dims=('x',),
attrs=x_meta)
x_coord.encoding = x_enc
y_coord = xr.DataArray(y, name='y', dims=('y',),
attrs=y_meta)
y_coord.encoding = y_enc
return x_coord, y_coord
def get_glm_global_attrs(start, end, platform, slot, instrument, scene_id,
resolution, timeline, prod_env, prod_src, prod_site, ):
"""
Create the global metadata attribute dictionary for GOES-R series GLM
Imagery products.
Arguments:
start, end: datetime of the start and end times of image coverage
platform: one of G16, G17 or a follow-on platform
slot: the orbital slot ("GOES-East", "GOES-West", etc.)
instrument: one of "GLM-1", "GLM-2", or a follow on instrument.
scene_id: one of 'FULL', 'CONUS', 'MESO1', or 'MESO2' if compatible with
the ABI definitions or 'OTHER'.
resolution: like "2km at nadir"
prod_env: "OE", "DE", etc.
prod_src: "Realtime" or "Postprocessed"
prod_site: "NAPO", "TTU", etc.
The date_created is set to the time at which this function is run.
Returns: meta, a dictionary of metadata attributes.
"""
created = datetime.now()
modes = {'ABI Mode 3':'M3'}
# For use in the dataset name / filename
scenes = {'FULL':'F',
'CONUS':'C',
'MESO1':'M1',
'MESO2':'M2',
'OTHER':'M1'}
scene_names = {"FULL":"Full Disk",
"CONUS":"CONUS",
"MESO1":"Mesoscale",
"MESO2":"Mesoscale",
"OTHER":"Custom"}
# "OR_GLM-L2-GLMC-M3_G16_s20181011100000_e20181011101000_c20181011124580.nc
dataset_name = "OR_GLM-L2-GLM{5}-{0}_{1}_s{2}_e{3}_c{4}.nc".format(
modes[timeline], platform, start.strftime('%Y%j%H%M%S0'),
end.strftime('%Y%j%H%M%S0'), created.strftime('%Y%j%H%M%S0'),
scenes[scene_id]
)
meta = {}
# Properties that don't change
meta['cdm_data_type'] = "Image"
meta['Conventions'] = "CF-1.7"
meta['id'] = "93cb84a3-31ef-4823-89f5-c09d88fc89e8"
meta['institution'] = "DOC/NOAA/NESDIS > U.S. Department of Commerce, National Oceanic and Atmospheric Administration, National Environmental Satellite, Data, and Information Services"
meta['instrument_type'] = "GOES R Series Geostationary Lightning Mapper"
meta['iso_series_metadata_id'] = "f5816f53-fd6d-11e3-a3ac-0800200c9a66"
meta['keywords'] = "ATMOSPHERE > ATMOSPHERIC ELECTRICITY > LIGHTNING, ATMOSPHERE > ATMOSPHERIC PHENOMENA > LIGHTNING"
meta['keywords_vocabulary'] = "NASA Global Change Master Directory (GCMD) Earth Science Keywords, Version 7.0.0.0.0"
meta['license'] = "Unclassified data. Access is restricted to approved users only."
meta['Metadata_Conventions'] = "Unidata Dataset Discovery v1.0"
meta['naming_authority'] = "gov.nesdis.noaa"
meta['processing_level'] = "National Aeronautics and Space Administration (NASA) L2"
meta['project'] = "GOES"
meta['standard_name_vocabulary'] = "CF Standard Name Table (v25, 05 July 2013)"
meta['summary'] = "The Lightning Detection Gridded product generates fields starting from the GLM Lightning Detection Events, Groups, Flashes product. It consists of flash extent density, event density, average flash area, average group area, total energy, flash centroid density, and group centroid density."
meta['title'] = "GLM L2 Lightning Detection Gridded Product"
# Properties that change
meta['dataset_name'] = dataset_name
meta['date_created'] = created.isoformat()+'Z'
meta['instrument_ID'] = instrument
meta['orbital_slot'] = slot
meta['platform_ID'] = platform
meta['production_data_source'] = prod_src
meta['production_environment'] = prod_env
meta['production_site'] = prod_site
meta['scene_id'] = scene_names[scene_id]
meta['spatial_resolution'] = resolution
meta['time_coverage_end'] = end.isoformat()+'Z'
meta['time_coverage_start'] = start.isoformat()+'Z'
meta['timeline_id'] = timeline
return meta
def glm_image_to_var(data, name, long_name, units, dims, fill=0.0,
scale_factor=None, add_offset=None, dtype=None):
"""
data: array of data
name: the standard name, CF-compliant if possible
long_name: a more descriptive name
units: udunits string for the units of data
dims: tuple of coordinate names
dtype: numpy dtype of variable to be written after applying scale and offset
If dtype is not None, then the following are also checked
scale_factor, add_offset: floating point discretization and offset, as
commonly used in NetCDF datasets.
decoded = scale_factor * encoded + add_offset
Returns: data_var, xarray.DataArray objects, with type inferred from data
"""
enc = {}
meta = {}
enc['_FillValue'] = fill
enc['zlib'] = True # Compress the data
if dtype is not None:
orig_dtype = dtype
if orig_dtype[0] == 'u':
enc['_Unsigned'] = 'true'
dtype= dtype[1:]
enc['dtype'] = dtype
if scale_factor is not None:
enc['scale_factor'] = scale_factor
min_allowed = scale_factor
if add_offset is not None:
min_allowed += add_offset
tiny = (data > 0.0) & (data <= min_allowed)
data[tiny] = min_allowed
if add_offset is not None:
enc['add_offset'] = add_offset
meta['standard_name'] = name
meta['long_name'] = long_name
meta['units'] = units
meta['grid_mapping'] = "goes_imager_projection"
d = xr.DataArray(data, attrs=meta, dims=dims, name=name)
d.encoding = enc
return d
def new_goes_imagery_dataset(x, y, nadir_lon):
""" Create a new xarray.Dataset with the basic coordiante data, metadata,
and global attributes that matches the GOES-R series fixed grid imagery
format.
Arguments
x, y: 1-D arrays of coordinate positions
nadir_lon: longitude (deg) of the sub-satellite point
"""
# Dimensions
dims = ('y', 'x')
scene_id, nominal_resolution = infer_scene_from_dataset(x, y)
log.debug("Span of grid implies scene is {0}".format(scene_id))
# Coordinate data: x, y
xc, yc = get_goes_imager_fixedgrid_coords(x, y, scene_id=scene_id)
# Coordinate reference system
goes_imager_proj = get_goes_imager_proj(nadir_lon)
subpoint_lon, subpoint_lat = get_goes_imager_subpoint_vars(nadir_lon)
# Data quality flags
dqf = get_goes_imager_all_valid_dqf(dims, y.shape+x.shape)
v = {goes_imager_proj.name:goes_imager_proj,
dqf.name:dqf,
subpoint_lat.name:subpoint_lat,
subpoint_lon.name:subpoint_lon,
}
c = {xc.name:xc, yc.name:yc}
d = xr.Dataset(data_vars=v, coords=c)#, dims=dims)
# Attributes aren't carried over for xc and yc like they are for dqf, etc.
# so copy them over manually
d.x.attrs.update(xc.attrs)
d.y.attrs.update(yc.attrs)
return d, scene_id, nominal_resolution
def xy_to_2D_lonlat(gridder, x_coord, y_coord):
self = gridder
mapProj = self.mapProj
geoProj = self.geoProj
x_all, y_all = (a.T for a in np.meshgrid(x_coord, y_coord))
assert x_all.shape == y_all.shape
assert x_all.shape[0] == nx
assert x_all.shape[1] == ny
z_all = np.zeros_like(x_all)
lons, lats, alts = x,y,z = geoProj.fromECEF(
*mapProj.toECEF(x_all, y_all, z_all) )
lons.shape=x_all.shape
lats.shape=y_all.shape
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
def infer_scene_from_dataset(x, y):
"Infer whether the scene matches one of the GOES-R fixed grid domains."
from lmatools.grid.fixed import goesr_conus, goesr_meso, goesr_full, goesr_resolutions
rtol = 1.0e-2
# Try to match up the actual spacing in microradians with a known resolutions
dx = np.abs(x[1]-x[0])
resolution = '{:d}microradian at nadir'.format(int(np.round(dx*1e6)))
for km, microrad in goesr_resolutions.items():
if np.allclose(microrad, dx, rtol=rtol):
resolution = km.replace('.0', '') + ' at nadir'
spanEW = x.max() - x.min()
spanNS = y.max() - y.min()
log.debug("Inferring scene from spans x={0}, y={1}".format(spanEW, spanNS))
if (np.allclose(spanEW, goesr_full['spanEW'], rtol=rtol) &
np.allclose(spanNS, goesr_full['spanNS'], rtol=rtol) ):
scene_id = "FULL"
elif (np.allclose(spanEW, goesr_conus['spanEW'], rtol=rtol) &
np.allclose(spanNS, goesr_conus['spanNS'], rtol=rtol) ):
scene_id = "CONUS"
elif (np.allclose(spanEW, goesr_meso['spanEW'], rtol=rtol) &
np.allclose(spanNS, goesr_meso['spanNS'], rtol=rtol) ):
scene_id = "MESO1"
elif (np.allclose(spanEW, 0.172732, rtol=rtol)):
# This is the "expanded CONUS" domain for GOES-West;
# see lmatools.grid.fixed for further details.
scene_id = "CONUS"
else:
scene_id = "OTHER"
return scene_id, resolution
def write_goes_imagery(gridder, outpath='./{dataset_name}', pad=None, scale_and_offset=True):
""" pad is a tuple of x_slice, y_slice: slice objects used to index the
zeroth and first dimensions, respectively, of the grids in gridder.
scale_and_offset controls whether to write variables as scaled ints.
if False, floating point grids will be written.
outpath can be a template string; defaults to {'./{dataset_name}'}
Available named arguments in the template are:
dataset_name: standard GOES imagery format, includes '.nc'. Looks like
OR_GLM-L2-GLMM1-M3_G16_s20181830432000_e20181830433000_c20200461148520.nc
start_time, end_time: datetimes that can be used with strftime syntax, e.g.
'./{start_time:%y/%b/%d}/GLM_{start_time:%Y%m%d_%H%M%S}.nc'
Intermediate directories will be created to match outpath.
"""
self = gridder
if pad is not None:
x_slice, y_slice = pad
else:
x_slice, y_slice = (slice(None, None), slice(None, None))
fixProj = self.mapProj
geoProj = self.geoProj
# Center of the grid is the nadir longitude
subsat = (0.0, 0.0, 0.0)
nadir_lon, nadir_lat, nadir_alt = geoProj.fromECEF(*fixProj.toECEF(*subsat))
# Get 1D x and y coordinates, and corresponding lons and lats
spatial_scale_factor = self.spatial_scale_factor
xedge = self.xedge
yedge = self.yedge
x_coord = ((xedge[:-1] + xedge[1:])/2.0)[x_slice]
y_coord = ((yedge[:-1] + yedge[1:])/2.0)[y_slice]
file_iter = list(zip(self.outgrids, self.field_names,
self.field_descriptions, self.field_units, self.outformats))
# Write a separate file at each time.
all_outfiles = []
for ti, (t0, t1) in enumerate(pairwise(self.t_edges_seconds)):
start = self.t_ref + timedelta(0, t0)
end = self.t_ref + timedelta(0, t1)
log.info("Assembling NetCDF dataset for {0} - {1}".format(start, end))
# Need to flip the y coordinate to reverse order since y is defined as
# upper left in the GOES-R series L1b PUG (section 5.1.2.6 Product Data
# Structures). Later, to follow the image array convention will
# transpose the grids and then flipud.
dataset, scene_id, nominal_resolution = new_goes_imagery_dataset(x_coord,
np.flipud(y_coord), nadir_lon)
# Global metadata
l2lcfa_attrs = gridder.first_file_attrs
global_attrs = get_glm_global_attrs(start, end,
l2lcfa_attrs['platform_ID'], l2lcfa_attrs['orbital_slot'],
l2lcfa_attrs['instrument_ID'], scene_id,
nominal_resolution, "ABI Mode 3", "DE", "Postprocessed", "TTU"
)
dataset = dataset.assign_attrs(**global_attrs)
# log.debug("*** Checking x coordinate attrs initial")
# log.debug(dataset.x.attrs)
outfile = outpath.format(start_time=start, end_time=end,
dataset_name=dataset.attrs['dataset_name'])
enclosing_dir = os.path.dirname(outfile)
if os.path.exists(enclosing_dir) == False:
os.makedirs(enclosing_dir)
# Adding a new variable to the dataset below clears the coord attrs
# so hold on to them for now.
xattrs, yattrs = dataset.x.attrs, dataset.y.attrs
xenc, yenc = dataset.x.encoding, dataset.y.encoding
for i, (grid_allt, field_name, description, units, outformat) in enumerate(file_iter):
grid = grid_allt[x_slice,y_slice,ti]
if i in self.divide_grids:
denom = self.outgrids[self.divide_grids[i]][x_slice,y_slice,ti]
zeros = (denom == 0) | (grid == 0)
nonzeros = ~zeros
grid[nonzeros] = grid[nonzeros]/denom[nonzeros]
grid[zeros] = 0 # avoid nans
image_at_time = np.flipud(grid.T)
scale_kwargs = {}
if (field_name in glm_scaling) and scale_and_offset:
scale_kwargs.update(glm_scaling[field_name])
img_var = glm_image_to_var(image_at_time,
field_name, description, units,
('y', 'x'), **scale_kwargs)
# Why does this line clear the attrs on the coords?
# log.debug("*** Checking x coordinate attrs {0}a".format(i))
# log.debug(dataset.x.attrs)
dataset[img_var.name] = img_var
# log.debug("*** Checking x coordinate attrs {0}b".format(i))
# log.debug(dataset.x.attrs)
# Restore the cleared coord attrs
dataset.x.attrs.update(xattrs)
dataset.y.attrs.update(yattrs)
dataset.x.encoding.update(xenc)
dataset.y.encoding.update(yenc)
# log.debug("*** Checking x coordinate attrs final")
# log.debug(dataset.x.attrs)
log.info("Preparing to write NetCDF {0}".format(outfile))
dataset.to_netcdf(outfile)
log.info("Wrote NetCDF {0}".format(outfile))
all_outfiles.append(outfile)
return all_outfiles
def aggregate(glm, minutes, start_end=None):
""" Given a multi-minute glm imagery dataset (such as that returned by
glmtools.io.imagery.open_glm_time_series) and an integer number of minutes,
recalculate average and minimum flash area for that interval and sum all other
fields.
start_end: datetime objects giving the start and end edges of the interval to
be aggregated. This allows for a day-long dataset to be aggregated over an hour
of interest, for example. If not provided, the start of the glm dataset plus
*minutes* after the end of the glm dataset will be used.
This function expects the GLM data to have missing values (NaN) where
there is no lightning and relies on the skipna functionality of
DataArray.min() and .sum() to find the aggregation across frames where
each pixel has a mix of lightning and no-lightnign times. If these
frames instead had zero for minimum flash area the minimum value would
be zero, leading to data loss and a clear difference between FED and
MFA.
To restore the original time coordinate name, choose the left, mid, or right
endpoint of the time_bins coordinate produced by the aggregation step.
>>> agglm = aggregate(glm, 5)
>>> agglm['time_bins'] = [v.left for v in agglm.time_bins.values]
>>> glm_agg = agglm.rename({'time_bins':'time'})
"""
dt_1min = timedelta(seconds=60)
dt = dt_1min*minutes
if start_end is not None:
start = start_end[0]
end = start_end[1]
else:
start = pd.Timestamp(glm['time'].min().data).to_pydatetime()
end = pd.Timestamp(glm['time'].max().data).to_pydatetime() + dt
# The lines below might be necessary replacements for a future version
# start = pd.to_datetime(glm['time'].min().data).to_pydatetime()
# end = pd.to_datetime(glm['time'].max().data).to_pydatetime() + dt
# dt_np = (end - start).data
# duration = pd.to_timedelta(dt_np).to_pytimedelta()
duration = end - start
sum_vars = ['flash_extent_density', 'flash_centroid_density',
'total_energy',
'group_extent_density', 'group_centroid_density', ]
sum_vars = [sv for sv in sum_vars if sv in glm]
sum_data = glm[sum_vars]
# goes_imager_projection is a dummy int variable, and all we care about
# is the attributes.
min_vars = ['minimum_flash_area', 'goes_imager_projection',
'nominal_satellite_subpoint_lat', 'nominal_satellite_subpoint_lon']
min_vars = [mv for mv in min_vars if mv in glm]
min_data = glm[min_vars]
sum_data['total_flash_area'] = glm.average_flash_area*glm.flash_extent_density
sum_data['total_group_area'] = glm.average_flash_area*glm.flash_extent_density
t_bins = [start + dt*i for i in range(int(duration/dt)+1)]
t_groups_sum = sum_data.groupby_bins('time', bins=t_bins)
t_groups_min = min_data.groupby_bins('time', bins=t_bins)
aggregated_min = t_groups_min.min(dim='time', keep_attrs=True, skipna=True)
# Naively sum all variables … so average areas are now ill defined. Recalculate
aggregated = t_groups_sum.sum(dim='time', keep_attrs=True, skipna=True)
aggregated['average_flash_area'] = (aggregated.total_flash_area
/ aggregated.flash_extent_density)
aggregated['average_group_area'] = (aggregated.total_group_area
/ aggregated.group_extent_density)
for var in min_vars:
aggregated[var] = aggregated_min[var]
# direct copy of other useful attributes. This could be handled better, since it brings
# along the original time dimension, but the projection is a static value and is safe
# to move over. We skip 'DQF' since it's empty and not clear what summing it would mean.
# Someone can make that decision later when DQF is populated.
# for v in ['goes_imager_projection']:
# aggregated[v] = glm[v]
# time_bins is made up of Interval objects with left and right edges
aggregated.attrs['time_coverage_start'] = min(
[v.left for v in aggregated.time_bins.values]).isoformat()
aggregated.attrs['time_coverage_end'] = max(
[v.right for v in aggregated.time_bins.values]).isoformat()
return aggregated
def gen_file_times(filenames, time_attr='time_coverage_start'):
for s in filenames:
with xr.open_dataset(s) as d:
# strip off timezone information so that xarray
# auto-converts to datetime64 instead of pandas.Timestamp objects
yield pd.Timestamp(d.attrs[time_attr]).tz_localize(None)
def open_glm_time_series(filenames, chunks=None):
""" Convenience function for combining individual 1-min GLM gridded imagery
files into a single xarray.Dataset with a time dimension.
Creates an index on the time dimension.
The time dimension will be in the order in which the files are listed
due to the behavior of combine='nested' in open_mfdataset.
Adjusts the time_coverage_start and time_coverage_end metadata.
"""
# Need to fix time_coverage_start and _end in concat dataset
starts = [t for t in gen_file_times(filenames)]
ends = [t for t in gen_file_times(filenames, time_attr='time_coverage_end')]
d = xr.open_mfdataset(filenames, concat_dim='time', chunks=chunks, combine='nested')
d['time'] = starts
d = d.set_index({'time':'time'})
d = d.set_coords('time')
d.attrs['time_coverage_start'] = pd.Timestamp(min(starts)).isoformat()
d.attrs['time_coverage_end'] = pd.Timestamp(max(ends)).isoformat()
return d
```
|
{
"source": "jeremymchacon/cometspy",
"score": 3
}
|
#### File: cometspy/cometspy/params.py
```python
import pandas as pd
import os
def __isfloat(value):
try:
float(value)
return True
except ValueError:
return False
class params:
'''
object storing simulation-related and some default biological parameters
The params object is an essential object needed to start a comets
simulation, along with a layout object containing models. The params
object stores simulation information such as timeStep and maxCycles, as
well as many default biological parameters such as defaultKm and
defaultDiffC (the default metabolite diffusion constant).
All of the parameters are stored in a dictionary called 'all_params' and
can either be adjusted directly or using set_param().
When params are written to a COMETS object for COMETS to process, two
files are generated ('.current_global' and 'current_package')
Parameters
----------
global_params : str, optional
optional path to an existing 'global_params' file to load
package_params : str, optional
optional path to an existing 'package_params' file to load
Examples
--------
>>> # make a params object and change two params
>>> import cometspy as c
>>> params = c.params()
>>> params.set_param("timeStep", 0.01) # hours
>>> params.set_param("spaceWidth", 0.5) # cm
>>> # view all the params
>>> params.show_params()
>>> # access the params directly
>>> params.all_params # this is a dict
>>> # use a params object in a comets simulation
>>> import cobra.test
>>> model = c.model(cobra.test.create_test_model("textbook"))
>>> model.initial_pop = [0, 0, 1.e-7]
>>> model.open_exchanges()
>>> layout = c.layout([model])
>>> # normally we'd now alter the media in the layout
>>> layout.add_typical_trace_metabolites()
>>> layout.set_specific_metabolite('lcts_e', 0.05) # mmol
>>> sim = c.comets(layout, params)
>>> sim.run()
Attributes
----------
all_params : dict
contains every possible param as keys, with their value as the value
params_units : dict
dictionary containing the units for each possible parameter
'''
def show_params(self):
"""
utility function to show all possible parameters and their units
"""
pdparams = pd.DataFrame({'VALUE': pd.Series(self.all_params),
'UNITS': pd.Series(self.param_units)})
return pdparams
def set_param(self, name : str, value):
"""
alters a specific parameter value
See show_params() for a list of possible parameters to adjust.
Parameters
----------
name : str
the name of a specific parameter to change
value : variable
the type of the value depends on the parameter.
Examples
--------
>>> p = cometspy.params()
>>> p.set_param("writeBiomassLog", True)
>>> p.set_param("maxCycles", 100)
>>> p.set_param("BiomassLogRate",5)
>>> p.set_param("defaultVmax", 10.24)
"""
if name in self.all_params:
self.all_params[name] = value
else:
print('Parameter ' + name + ' does not exist')
def get_param(self, name : str) -> object:
"""
returns the value associated with the given parameter name
Parameters
----------
name : str
the name of the parameter whose value is desired
Returns
-------
object
the type of object depends on the parameter requested
"""
if name in self.all_params:
return self.all_params[name]
else:
print('Parameter ' + name + ' does not exist')
def __init__(self, global_params : str =None,
package_params : str =None):
self.all_params = {'writeSpecificMediaLog': False,
'specificMediaLogRate': 1,
'specificMedia': 'ac_e',
'SpecificMediaLogName': 'specific_media.txt',
'BiomassLogName': 'biomass.txt',
'BiomassLogRate': 1,
'biomassLogFormat': 'COMETS',
'FluxLogName': 'flux_out',
'FluxLogRate': 5,
'fluxLogFormat': 'COMETS',
'MediaLogName': 'media_out',
'MediaLogRate': 5,
'mediaLogFormat': 'COMETS',
'TotalBiomassLogName': 'total_biomass_out.txt',
'maxCycles': 100,
'saveslideshow': False,
'totalBiomassLogRate': 1,
'useLogNameTimeStamp': False,
'writeBiomassLog': False,
'writeFluxLog': False,
'writeMediaLog': False,
'writeTotalBiomassLog': True,
'batchDilution': False,
'dilFactor': 10,
'dilTime': 2,
'cellSize': 1e-13,
'allowCellOverlap': True,
'deathRate': 0,
'defaultHill': 1,
'defaultKm': 0.01,
'defaultVmax': 10,
'defaultAlpha': 1,
'defaultW': 10,
'defaultDiffConst': 1e-5,
'exchangestyle': 'Monod Style',
'flowDiffRate': 3e-9,
'growthDiffRate': 0,
'maxSpaceBiomass': 0.1,
'minSpaceBiomass': 0.25e-10,
'numDiffPerStep': 10,
'numRunThreads': 1,
'showCycleCount': True,
'showCycleTime': False,
'spaceWidth': 0.02,
'timeStep': 0.1,
'toroidalWorld': False,
'simulateActivation': False,
'activateRate': 0.001,
'randomSeed': 0,
'colorRelative': True,
'slideshowColorRelative': True,
'slideshowRate': 1,
'slideshowLayer': 0,
'slideshowExt': 'png',
'biomassMotionStyle': 'Diffusion 2D(Crank-Nicolson)',
'numExRxnSubsteps': 5,
'costlyGenome': False,
'geneFractionalCost': 1e-4,
'evolution': False,
'mutRate': 0,
'addRate': 0,
'metaboliteDilutionRate': 0.}
self.all_params = dict(sorted(self.all_params.items(),
key=lambda x: x[0]))
self.param_units = {'writeSpecificMediaLog': "logical",
'specificMediaLogRate': "cycles",
'specificMedia': "comma separated string",
'SpecificMediaLogName': '',
'BiomassLogName': '',
'BiomassLogRate': 'cycles',
'biomassLogFormat': '',
'FluxLogName': '',
'FluxLogRate': 'cycles',
'fluxLogFormat': '',
'MediaLogName': '',
'MediaLogRate': 'cycles',
'mediaLogFormat': '',
'TotalBiomassLogName': '',
'maxCycles': 'cycles',
'saveslideshow': 'logical',
'totalBiomassLogRate': 'cycles',
'useLogNameTimeStamp': 'logical',
'writeBiomassLog': 'logical',
'writeFluxLog': 'logical',
'writeMediaLog': 'logical',
'writeTotalBiomassLog': 'logical',
'batchDilution': 'logical',
'dilFactor': '',
'dilTime': 'hr.',
'cellSize': 'gr. cell dry weight',
'allowCellOverlap': 'logical',
'deathRate': 'percent/cycle',
'defaultHill': 'unitless',
'defaultKm': 'M (molar conc.)',
'defaultVmax': 'mmol /gr. CDW /hr',
'defaultAlpha': 'slope',
'defaultW': 'M (molar conc.)',
'defaultDiffConst': 'cm2/s',
'exchangestyle': 'uptake type',
'flowDiffRate': 'cm2/s',
'growthDiffRate': 'cm2/s',
'maxSpaceBiomass': 'gr. cell dry weight',
'minSpaceBiomass': 'gr. cell dry weight',
'numDiffPerStep': '',
'numRunThreads': '',
'showCycleCount': 'logical',
'showCycleTime': 'logical',
'spaceWidth': 'cm',
'timeStep': 'hr.',
'toroidalWorld': 'logical',
'simulateActivation': 'logical',
'activateRate': '',
'randomSeed': 'numeric',
'colorRelative': 'logical',
'slideshowColorRelative': 'logical',
'slideshowRate': 'cycles',
'slideshowLayer': 'numeric',
'slideshowExt': 'extension',
'biomassMotionStyle': 'type of motion',
'numExRxnSubsteps': 'numeric',
'costlyGenome': 'logical',
'geneFractionalCost': 'percent growth rate^(1/2) /reaction',
'evolution': 'logical',
'mutRate': 'deletions /reaction /generation',
'addRate': 'additions /generation',
'metaboliteDilutionRate': 'percent/cycle'}
self.param_units = dict(sorted(self.param_units.items(),
key=lambda x: x[0]))
self.all_type = {'writeSpecificMediaLog': 'global',
'specificMediaLogRate': 'global',
'specificMedia': 'global',
'SpecificMediaLogName': 'global',
'BiomassLogName': 'global',
'BiomassLogRate': 'global',
'biomassLogFormat': 'global',
'FluxLogName': 'global',
'FluxLogRate': 'global',
'fluxLogFormat': 'global',
'MediaLogName': 'global',
'MediaLogRate': 'global',
'mediaLogFormat': 'global',
'TotalBiomassLogName': 'global',
'maxCycles': 'package',
'saveslideshow': 'global',
'totalBiomassLogRate': 'global',
'useLogNameTimeStamp': 'global',
'writeBiomassLog': 'global',
'writeFluxLog': 'global',
'writeMediaLog': 'global',
'writeTotalBiomassLog': 'global',
'batchDilution': 'global',
'dilFactor': 'global',
'dilTime': 'global',
'cellSize': 'global',
'allowCellOverlap': 'package',
'deathRate': 'package',
'defaultHill': 'package',
'defaultKm': 'package',
'defaultVmax': 'package',
'defaultW': 'package',
'defaultAlpha': 'package',
'defaultDiffConst': 'package',
'exchangestyle': 'package',
'flowDiffRate': 'package',
'growthDiffRate': 'package',
'maxSpaceBiomass': 'package',
'minSpaceBiomass': 'package',
'numDiffPerStep': 'package',
'numRunThreads': 'package',
'showCycleCount': 'package',
'showCycleTime': 'package',
'spaceWidth': 'package',
'timeStep': 'package',
'toroidalWorld': 'package',
'simulateActivation': 'global',
'activateRate': 'global',
'randomSeed': 'global',
'colorRelative': 'global',
'slideshowColorRelative': 'global',
'slideshowRate': 'global',
'slideshowLayer': 'global',
'slideshowExt': 'global',
'biomassMotionStyle': 'package',
'numExRxnSubsteps': 'package',
'costlyGenome': 'global',
'geneFractionalCost': 'global',
'evolution': 'package',
'mutRate': 'package',
'addRate': 'package',
'metaboliteDilutionRate': 'package'}
self.all_type = dict(sorted(self.all_type.items(),
key=lambda x: x[0]))
# .. parse parameters files to python type variables
if global_params is not None:
with open(global_params) as f:
for line in f:
if '=' in line:
k, v = line.split(' = ')
if v.strip() == 'true':
self.all_params[k.strip()] = True
elif v.strip() == 'false':
self.all_params[k.strip()] = False
elif v.strip().isdigit():
self.all_params[k.strip()] = int(v.strip())
elif __isfloat(v.strip()):
self.all_params[k.strip()] = float(v.strip())
else:
self.all_params[k.strip()] = v.strip()
if package_params is not None:
with open(package_params) as f:
for line in f:
if '=' in line:
k, v = line.split(' = ')
if v.strip() == 'true':
self.all_params[k.strip()] = True
elif v.strip() == 'false':
self.all_params[k.strip()] = False
elif v.strip().isdigit():
self.all_params[k.strip()] = int(v.strip())
elif __isfloat(v.strip()):
self.all_params[k.strip()] = float(v.strip())
else:
self.all_params[k.strip()] = v.strip()
# Additional processing.
# If evolution is true, we dont want to write the total biomass log
if self.all_params['evolution']:
self.all_params['writeTotalBiomassLog'] = False
self.all_params['writeBiomassLog'] = True
def write_params(self, out_glb : str, out_pkg : str):
"""
writes params data to the specified files
Usually this is only used internally, though a user could use it to
pre-generate params files.
Parameters
----------
out_glb : str
the path and name of the 'global' parameters file to be written
out_pkg : str
the path and name of the 'package' parameters file to be written
"""
if os.path.isfile(out_glb):
os.remove(out_glb)
if os.path.isfile(out_pkg):
os.remove(out_pkg)
# convert booleans to java format before writing
towrite_params = {}
for k, v in self.all_params.items():
if v is True:
towrite_params[k] = 'true'
elif v is False:
towrite_params[k] = 'false'
else:
towrite_params[k] = str(v)
with open(out_glb, 'a') as glb, open(out_pkg, 'a') as pkg:
for k, v in towrite_params.items():
if self.all_type[k] == 'global':
glb.writelines(k + ' = ' + v + '\n')
else:
pkg.writelines(k + ' = ' + v + '\n')
```
|
{
"source": "jeremymcrae/bgen",
"score": 2
}
|
#### File: jeremymcrae/bgen/setup.py
```python
import io
from pathlib import Path
from setuptools import setup
import sys
import platform
from distutils.core import Extension
from distutils.ccompiler import new_compiler
from Cython.Build import cythonize
EXTRA_COMPILE_ARGS = ['-std=c++11', '-I/usr/include']
EXTRA_LINK_ARGS = []
if sys.platform == "darwin":
EXTRA_COMPILE_ARGS += ["-stdlib=libc++",
"-mmacosx-version-min=10.9",
"-I/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include",
]
EXTRA_LINK_ARGS += ["-stdlib=libc++", "-mmacosx-version-min=10.9"]
if platform.machine() == 'x86_64':
EXTRA_COMPILE_ARGS += ['-mavx', '-mavx2']
def flatten(*lists):
return [str(x) for sublist in lists for x in sublist]
def build_zstd():
''' compile zstd code to object files for linking with bgen c++ code
This needs to be compiles independently of the bgen c++ code, as zstd is in
c, so cannot be compiled directly with the bgen code. zstd is compiled to
object files, and these are staticly linked in with the bgen code.
I tried to link to a shared/dynamic zstd library, but couldn't specify a
location that would be found by the compiled bgen library after relocating
files into to a python package.
Returns:
list of paths to compiled object code
'''
folder = Path('src/zstd/lib')
include_dirs = ['src/zstd/lib/', 'src/zstd/lib/common']
sources = flatten(
(folder / 'common').glob('*.c'),
(folder / 'compress').glob('*.c'),
(folder / 'decompress').glob('*.c'),
(folder / 'dictBuilder').glob('*.c'),
(folder / 'deprecated').glob('*.c'),
(folder / 'legacy').glob('*.c'), # TODO: drop some legacy versions
)
extra_compile_args = ['-std=gnu11', '-fPIC']
cc = new_compiler()
return cc.compile(sources, include_dirs=include_dirs,
extra_preargs=extra_compile_args)
reader = cythonize([
Extension('bgen.reader',
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
sources=['src/bgen/bgen.pyx',
'src/bgen.cpp',
'src/genotypes.cpp',
'src/header.cpp',
'src/samples.cpp',
'src/utils.cpp',
'src/variant.cpp'],
extra_objects=build_zstd(),
include_dirs=['src/', 'src/zstd/lib'],
libraries=['z'],
library_dirs=['bgen'],
language='c++'),
])
setup(name='bgen',
description='Package for loading data from bgen files',
long_description=io.open('README.md', encoding='utf-8').read(),
long_description_content_type='text/markdown',
version='1.2.14',
author='<NAME>',
author_email='<EMAIL>',
license="MIT",
url='https://github.com/jeremymcrae/bgen',
packages=['bgen'],
package_dir={'': 'src'},
install_requires=[
'numpy',
],
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
ext_modules=reader,
test_loader='unittest:TestLoader',
)
```
#### File: bgen/tests/test_bgenfile.py
```python
from pathlib import Path
import unittest
import numpy as np
from bgen.reader import BgenFile
from tests.utils import load_gen_data
class TestBgenFile(unittest.TestCase):
''' class to make sure BgenFile works correctly
'''
@classmethod
def setUpClass(cls):
cls.gen_data = load_gen_data()
def setUp(self):
''' set path to folder with test data
'''
self.folder = Path(__file__).parent / "data"
def test_context_handler_closed_bgen_samples(self):
''' no samples available from exited BgenFile
'''
path = self.folder / 'example.16bits.zstd.bgen'
with BgenFile(path) as bfile:
self.assertTrue(len(bfile.samples) > 0)
with self.assertRaises(ValueError):
bfile.samples
def test_context_handler_closed_bgen_varids(self):
''' no varids available from exited BgenFile
'''
path = self.folder / 'example.16bits.zstd.bgen'
with BgenFile(path) as bfile:
self.assertTrue(len(bfile.varids()) > 0)
with self.assertRaises(ValueError):
bfile.varids()
def test_context_handler_closed_bgen_rsids(self):
''' no rsids available from exited BgenFile
'''
path = self.folder / 'example.16bits.zstd.bgen'
with BgenFile(path) as bfile:
self.assertTrue(len(bfile.rsids()) > 0)
with self.assertRaises(ValueError):
bfile.rsids()
def test_context_handler_closed_bgen_positions(self):
''' no positions available from exited BgenFile
'''
path = self.folder / 'example.16bits.zstd.bgen'
with BgenFile(path) as bfile:
self.assertTrue(len(bfile.positions()) > 0)
with self.assertRaises(ValueError):
bfile.positions()
def test_context_handler_closed_bgen_length(self):
''' error raised if accessing length of exited BgenFile
'''
path = self.folder / 'example.16bits.zstd.bgen'
with BgenFile(path) as bfile:
self.assertTrue(len(bfile) > 0)
with self.assertRaises(ValueError):
len(bfile)
def test_context_handler_closed_bgen_slice(self):
''' error raised if slicing variant from exited BgenFile
'''
path = self.folder / 'example.16bits.zstd.bgen'
with BgenFile(path) as bfile:
self.assertTrue(len(bfile) > 0)
with self.assertRaises(ValueError):
var = bfile[0]
def test_context_handler_closed_bgen_at_position(self):
''' error raised if getting variant at position from exited BgenFile
'''
path = self.folder / 'example.16bits.zstd.bgen'
with BgenFile(path) as bfile:
self.assertTrue(len(bfile) > 0)
with self.assertRaises(ValueError):
var = bfile.at_position(100)
def test_context_handler_closed_bgen_with_rsid(self):
''' error raised if getting variant with rsid from exited BgenFile
'''
path = self.folder / 'example.16bits.zstd.bgen'
with BgenFile(path) as bfile:
self.assertTrue(len(bfile) > 0)
with self.assertRaises(ValueError):
var = bfile.with_rsid('rs111')
def test_fetch(self):
''' can fetch variants within a genomic region
'''
chrom, start, stop = '01', 5000, 50000
bfile = BgenFile(self.folder / 'example.16bits.bgen')
self.assertTrue(bfile._check_for_index(str(self.folder / 'example.16bits.bgen')))
self.assertTrue(list(bfile.fetch('02')) == [])
def test_fetch_whole_chrom(self):
''' fetching just with chrom gives all variants on chromosome
'''
chrom, start, stop = '01', 5000, 50000
bfile = BgenFile(self.folder / 'example.16bits.bgen')
# test fetching a whole chromosome
sortkey = lambda x: (x.chrom, x.pos)
for x, y in zip(sorted(bfile.fetch(chrom), key=sortkey), sorted(self.gen_data, key=sortkey)):
self.assertEqual(x.rsid, y.rsid)
self.assertEqual(x.chrom, y.chrom)
self.assertEqual(x.pos, y.pos)
def test_fetch_after_position(self):
''' fetching variants with chrom and start gives all variants after pos
'''
chrom, start, stop = '01', 5000, 50000
bfile = BgenFile(self.folder / 'example.16bits.bgen')
sortkey = lambda x: (x.chrom, x.pos)
gen_vars = [x for x in sorted(self.gen_data, key=sortkey) if start <= x.pos]
for x, y in zip(sorted(bfile.fetch(chrom, start), key=sortkey), gen_vars):
self.assertEqual(x.rsid, y.rsid)
self.assertEqual(x.chrom, y.chrom)
self.assertEqual(x.pos, y.pos)
def test_fetch_in_region(self):
''' fetching variants with chrom, start, stop gives variants in region
'''
chrom, start, stop = '01', 5000, 50000
bfile = BgenFile(self.folder / 'example.16bits.bgen')
sortkey = lambda x: (x.chrom, x.pos)
gen_vars = [x for x in sorted(self.gen_data, key=sortkey) if start <= x.pos <= stop]
for x, y in zip(sorted(bfile.fetch(chrom, start, stop), key=sortkey), gen_vars):
self.assertEqual(x.rsid, y.rsid)
self.assertEqual(x.chrom, y.chrom)
self.assertEqual(x.pos, y.pos)
# check that we don't get any variants in a region without any
self.assertEqual(list(bfile.fetch(chrom, start * 1000, stop * 1000)), [])
```
#### File: bgen/tests/test_index.py
```python
from pathlib import Path
import unittest
import numpy as np
from bgen.reader import BgenFile
from bgen.index import Index
from tests.utils import load_gen_data
class TestBgenIndex(unittest.TestCase):
''' class to make sure bgen.index.Index works correctly
'''
@classmethod
def setUpClass(cls):
cls.gen_data = load_gen_data()
def setUp(self):
''' set path to folder with test data
'''
self.folder = Path(__file__).parent / "data"
def test_index_opens(self):
''' loads index when available
'''
bfile = BgenFile(self.folder / 'example.15bits.bgen')
self.assertFalse(bfile._check_for_index(str(self.folder / 'example.15bits.bgen')))
bfile = BgenFile(self.folder / 'example.16bits.bgen')
self.assertTrue(bfile._check_for_index(str(self.folder / 'example.16bits.bgen')))
def test_index_fetch(self):
''' fetches file offsets
'''
chrom = '01'
start = 5000
stop = 50000
index = Index(self.folder / 'example.16bits.bgen.bgi')
self.assertTrue(len(list(index.fetch(chrom))) == len(self.gen_data))
self.assertTrue(len(list(index.fetch('02'))) == 0)
self.assertTrue(len(list(index.fetch(chrom, start * 100, stop * 100))) == 0)
# check for a whole chromosome
chrom_offsets = list(index.fetch(chrom))
self.assertTrue(len(chrom_offsets) > 0)
self.assertTrue(len(chrom_offsets) == len(self.gen_data))
# check for all variants following a position
after_pos_offsets = list(index.fetch(chrom, start))
self.assertTrue(len(after_pos_offsets) > 0)
self.assertTrue(len(after_pos_offsets) == len([x for x in self.gen_data if start <= x.pos]))
# check for all variants within a region
in_region_offsets = list(index.fetch(chrom, start, stop))
self.assertTrue(len(in_region_offsets) > 0)
self.assertTrue(len(in_region_offsets) == len([x for x in self.gen_data if start <= x.pos <= stop]))
# make sure the queries return different lists
self.assertTrue(len(chrom_offsets) != len(after_pos_offsets))
self.assertTrue(len(chrom_offsets) != len(in_region_offsets))
self.assertTrue(len(after_pos_offsets) != len(in_region_offsets))
```
#### File: bgen/tests/test_load_example_files.py
```python
from pathlib import Path
import unittest
import numpy as np
from bgen.reader import BgenFile
from tests.utils import load_gen_data, load_vcf_data, load_haps_data, arrays_equal
class TestExampleBgens(unittest.TestCase):
''' class to make sure we can load bgen files
'''
@classmethod
def setUpClass(cls):
cls.gen_data = load_gen_data()
cls.vcf_data = load_vcf_data()
cls.haps_data = load_haps_data()
def setUp(self):
''' set path to folder with test data
'''
self.folder = Path(__file__).parent / "data"
def test_load_example_genotypes_bit_depths(self):
''' check parsing genotypes from the example files with different bit depths
'''
for path in self.folder.glob('example.*bits.bgen'):
bit_depth = int(path.stem.split('.')[1].strip('bits'))
bfile = BgenFile(str(path))
for var, g in zip(bfile, self.gen_data):
self.assertEqual(g, var)
self.assertTrue(arrays_equal(g.probabilities, var.probabilities, bit_depth))
def test_zstd_compressed(self):
''' check we can parse genotypes from zstd compressed geno probabilities
'''
path = self.folder / 'example.16bits.zstd.bgen'
bfile = BgenFile(str(path))
for var, g in zip(bfile, self.gen_data):
self.assertEqual(g, var)
self.assertTrue(arrays_equal(g.probabilities, var.probabilities, 16))
def test_v11(self):
''' check we can open a bgen in v1.1 format, and parse genotypes correctly
'''
path = self.folder / 'example.v11.bgen'
bfile = BgenFile(str(path))
bit_depth = 16
for var, g in zip(bfile, self.gen_data):
self.assertEqual(g, var)
self.assertTrue(arrays_equal(g.probabilities, var.probabilities, bit_depth))
def test_Path(self):
''' check we can open bgen files from Path objects
'''
path = self.folder / 'example.v11.bgen'
bfile = BgenFile(path)
def test_load_haplotypes_bgen(self):
''' check we can open a bgen with haplotypes, and parse genotypes correctly
'''
path = self.folder / 'haplotypes.bgen'
bfile = BgenFile(str(path))
bit_depth = 16
for var, g in zip(bfile, self.haps_data):
self.assertEqual(g, var)
self.assertTrue(arrays_equal(g.probabilities, var.probabilities, bit_depth))
def test_load_complex_file(self):
''' make sure we can open a complex bgen file
'''
path = self.folder / 'complex.bgen'
bfile = BgenFile(path)
bit_depth = 16
for var, g in zip(bfile, self.vcf_data):
self.assertEqual(g, var)
self.assertTrue(arrays_equal(g.probabilities, var.probabilities, bit_depth))
self.assertTrue(all(x == y for x, y in zip(g.ploidy, var.ploidy)))
def test_load_complex_files(self):
''' make sure we can open the complex bgen files
'''
for path in self.folder.glob('complex.*.bgen'):
bit_depth = int(path.stem.split('.')[1].strip('bits'))
bfile = BgenFile(path)
for var, g in zip(bfile, self.vcf_data):
self.assertEqual(g, var)
self.assertTrue(arrays_equal(g.probabilities, var.probabilities, bit_depth))
def test_load_missing_file(self):
''' check passing in a path to a missing file fails gracefully
'''
with self.assertRaises(ValueError):
BgenFile('/zzz/jjj/qqq.bgen')
# def test_load_missing_sample_file(self):
# path = str(self.folder / 'example.8bits.bgen')
# bfile = BgenFile(path, '/zzz/jjj/qqq.sample')
```
|
{
"source": "jeremymcrae/denovonear",
"score": 3
}
|
#### File: denovonear/denovonear/ensembl_cache.py
```python
import os
import sqlite3
import sys
import time
import random
import zlib
from pathlib import Path
import json
from datetime import datetime
from urllib.parse import urlparse
class EnsemblCache(object):
""" Instead of repeatedly re-acquiring data from Ensembl each run, cache
the requested data for faster retrieval
"""
today = datetime.today()
def __init__(self, cache_folder):
""" initialise the class with the local cache folder
Args:
cache_folder: path to the cache
"""
cache_folder = Path(cache_folder)
if not cache_folder.exists():
cache_folder.mkdir()
# generate a database with tables if it doesn't already exist
path = cache_folder / "ensembl_cache.db"
if not path.exists():
try:
with sqlite3.connect(str(path)) as conn:
with conn as cursor:
cursor.execute("CREATE TABLE ensembl " \
"(key text PRIMARY KEY, genome_build text, " \
"cache_date text, api_version text, data blob)")
except sqlite3.OperationalError:
time.sleep(random.uniform(1, 5))
self.conn = sqlite3.connect(str(path))
self.conn.row_factory = sqlite3.Row
self.cursor = self.conn.cursor()
def get_cached_data(self, url):
""" get cached data for a url if stored in the cache and not outdated
Args:
url: URL for the Ensembl REST service
Returns:
data if data in cache, else None
"""
key, build = self.get_key_from_url(url)
self.cursor.execute("SELECT * FROM ensembl WHERE key=? AND genome_build=?",
(key, build))
row = self.cursor.fetchone()
# if the data has been cached, check that it is not out of date
if row is not None:
data = zlib.decompress(row["data"])
diff = self.today - datetime.strptime(row["cache_date"], "%Y-%m-%d")
if diff.days < 180:
return data
return None
def cache_url_data(self, url, data, attempt=0):
""" cache the data retrieved from ensembl
Args:
url: URL for the Ensembl REST service
data: response data from Ensembl, in bytes form
"""
if attempt > 5:
raise ValueError('too many attempts at writing to the cache')
key, build = self.get_key_from_url(url)
current_date = datetime.strftime(self.today, "%Y-%m-%d")
compressed = zlib.compress(data)
t = (key, build, current_date, '9', compressed)
cmd = "INSERT OR REPLACE INTO ensembl " \
"(key, genome_build, cache_date, api_version, data) VALUES (?,?,?,?,?)"
try:
with self.conn as conn:
conn.execute(cmd, t)
except sqlite3.OperationalError:
# if we hit a sqlite locking error, wait a random time so conflicting
# instances are less likely to reconflict, then retry
time.sleep(random.uniform(1, 10))
self.cache_url_data(url, data, attempt + 1)
def get_key_from_url(self, url):
""" parses the url into a list of folder locations
We take a URL like:
http://rest.ensembl.org/sequence/id/ENST00000538324?type=genomic;expand_3prime=10
and turn it into 'sequence.id.ENST00000538324.genomic'
Args:
url: URL for the Ensembl REST service
Returns:
a parsed unique database key for the URLs data
"""
url = urlparse(url)
path = url.path.strip('/').replace('/', '.')
# find the build from the url, but convert the default server to a build
build = url.netloc.split('.')[0]
build = 'grch38' if build == 'rest' else build
# convert "LONG_ID?feature=transcript" to ['LONG_ID', "transcript"] etc
suffix = url.query.split(';')[0]
if "=" in suffix:
_, suffix = suffix.split("=")
key = path + '.' + suffix if suffix != '' else path
# replace characters not tolerated in keys
return key.replace(':', '_'), build
```
#### File: denovonear/denovonear/frameshift_rate.py
```python
import math
import sys
def include_frameshift_rates(path):
""" add per-gene frameshift mutation rates to the output file
We make a crude estimate of the frameshift mutation rate by assessing the
total frameshift burden (across all genes in a dataset) as 1.25 times the
nonsense burden. Then apportion the frameshift burden proportional to each
genes CDS length.
This as as per Nature Genetics 46:944-950 (2014) doi:10.1038/ng.3050.
Note that this is an approximation, and doesn't allow for divergence due to
the base compostion of the CDS.
Args:
path: path to the output mutation rates (for the nonsense, missense etc)
"""
if path == sys.stdout:
raise ValueError("can't get frameshift rates when writing to standard out")
with open(path) as handle:
lines = [ x.strip().split('\t') for x in handle ]
nonsense = sum([ 10**(float(x[4])) for x in lines[1:] if x[4] != 'NA' ])
length = sum([ int(x[2]) for x in lines[1:] if x[2] != 'NA' ])
# add the frameshift rates to each line in turn, while writing the output
# back to the output path
frameshift_sum = nonsense * 1.25
with open(path, "w") as handle:
for line in lines:
if line[0] == "transcript_id":
line.append("frameshift_rate")
elif line[4] == 'NA':
line.append('NA')
else:
# estimate the frameshift rate for the gene
frameshift = (float(line[2])/length) * frameshift_sum
frameshift = math.log10(frameshift)
line.append(str(frameshift))
line = "\t".join(line) +"\n"
handle.write(line)
```
#### File: denovonear/tests/test_simulations.py
```python
import math
import unittest
from denovonear.weights import get_distances, geomean, WeightedChoice, \
analyse_de_novos, simulate_distribution
class TestSimulationsPy(unittest.TestCase):
""" unit test the simulation functions
"""
def setUp(self):
"""
"""
# set up a range of possible positions, all with a uniform probability
# of being selected
self.choices = WeightedChoice()
for x in range(1000):
self.choices.add_choice(x, 0.0001)
self.iterations = 100000
def test_analyse_de_novos_dispersed(self):
""" test analyse_de_novos() works correctly for dispersed de novos
"""
# spread sites throughout a 1000 bp transcript
positions = [100, 300, 600]
distances = get_distances(positions)
observed = geomean(distances)
p_val = analyse_de_novos(self.choices, self.iterations, len(positions), observed)
self.assertAlmostEqual(p_val, 0.635, places=2)
def test_analyse_de_novos_clustered(self):
""" test analyse_de_novos() works correctly for clustered de novos
"""
# cluster sites within 20 bp in a 1000 bp transcript
positions = [100, 110, 120]
distances = get_distances(positions)
observed = geomean(distances)
p_val = analyse_de_novos(self.choices, 1000000, len(positions), observed)
self.assertAlmostEqual(p_val, 0.002, places=3)
def test_simulate_distribution(self):
''' check that simulate_distribution works correctly
'''
# repeated function calls should give different samples
first = simulate_distribution(self.choices, iterations=5, de_novos_count=3)
second = simulate_distribution(self.choices, iterations=5, de_novos_count=3)
self.assertNotEqual(first, second)
```
|
{
"source": "jeremymcrae/dnm_cohorts",
"score": 3
}
|
#### File: dnm_cohorts/cohorts/jin_nature_genetics.py
```python
import random
import pandas
from dnm_cohorts.person import Person
url = 'https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5675000/bin/NIHMS906719-supplement-supp_datasets.xlsx'
def open_jin_nature_genetics_cohort():
""" gets individual level data for Jin et al congenital heart disease
Supplementary Table 1 from:
Jin et al. Nature Genetics 49: 1593-1601, doi: 10.1038/ng.3970
"""
random.seed(1)
data = pandas.read_excel(url, 'S1', skiprows=1)
data['person_id'] = data['Blinded ID'].astype(str) + '|jin'
# get male fraction in trios from cohort sex counts in supplemental table 2
male_fraction = 1691 / (1691 + 1180)
study = ['10.1038/ng.3970']
persons = set()
for i, row in data.iterrows():
status = ['HP:0001627']
sex = 'male' if random.random() < male_fraction else 'female'
if row['NDD'] == 'Yes':
status.append('HP:0001263')
person = Person(row.person_id, sex, status, study)
persons.add(person)
return persons
```
#### File: dnm_cohorts/cohorts/kaplanis_nature.py
```python
import random
import pandas
from dnm_cohorts.person import Person
from dnm_cohorts.mock_probands import add_mock_probands
url = 'https://static-content.springer.com/esm/art%3A10.1038%2Fs41586-020-2832-5/MediaObjects/41586_2020_2832_MOESM3_ESM.txt'
def subcohort(rows, counts, prefix, suffix, study):
'''
'''
phenotype = ['HP:0001249']
total = sum(counts.values())
male_fraction = counts['male'] / total
persons = set()
for i, row in rows.iterrows():
sex = 'male' if random.random() < male_fraction else 'female'
person = Person(row['person_id'], sex, phenotype, study)
persons.add(person)
# account for individuals without exomic de novo mutations
return add_mock_probands(persons, total, prefix, suffix, phenotype, study)
def kaplanis_nature_cohort():
""" get proband details for Kaplanis et al Nature 2019
Kaplanis et al Nature 2020
doi: 10.1038/s41586-020-2832-5
"""
random.seed(1)
data = pandas.read_table(url)
# define male and female numbers for each of the subcohorts (DDD, geneDx and
# RUMC). These use the subcohort totals from the supplementary information.
# For DDD and geneDx male and female counts were estimated from length of
# male bars in supp fig 2A. The total cohort had 8 duplicates removed, so
# I dropped 2 from DDD, 3 from geneDx, and 2 from RUMC.
counts = {'DDD': {'male': 5659, 'female': 4197},
'GDX': {'male': 10387, 'female': 8399},
'RUMC': {'male': 1377, 'female': 1039}}
data['person_id'] = data['id'] + '|' + data['study']
phenotype = ['HP:0001249']
doi = ['10.1038/s41586-020-2832-5']
persons = set()
for study in counts:
rows = data[data['study'] == study]
persons |= subcohort(rows, counts[study], study.lower(), study, doi)
return persons
```
#### File: dnm_cohorts/de_novos/homsy_science.py
```python
import os
import logging
import tempfile
from zipfile import ZipFile
import pandas
from dnm_cohorts.download_file import download_file
from dnm_cohorts.de_novo import DeNovo
url = 'http://science.sciencemag.org/highwire/filestream/646355/field_highwire_adjunct_files/1/aac9396_SupportingFile_Other_seq1_v4.zip'
async def homsy_science_de_novos(result):
""" get de novo variants for Homsy et al Science 2015
Supplementary Database 1 from:
Homsy et al. Science 350: 1262-1266, doi: 10.1126/science.aac9396
"""
logging.info('getting Homsy et al Science 2015 de novos')
zipf = tempfile.NamedTemporaryFile()
download_file(url, zipf.name)
with ZipFile(zipf.name) as zipped:
handle = zipped.open('homsy_database_S02.xlsx')
data = pandas.read_excel(handle, 'Database S2', skiprows=1)
data['person_id'] = data['Blinded ID'].astype(str)
data['person_id'] += '|homsy'
data['chrom'] = data['CHROM'].astype(str)
data['pos'] = data['POS']
data['ref'] = data['REF']
data['alt'] = data['ALT']
data['study'] = '10.1126/science.aac9396'
data['confidence'] = 'high'
vars = set()
for i, row in data.iterrows():
var = DeNovo(row.person_id, row.chrom, row.pos, row.ref, row.alt,
row.study, row.confidence, 'grch37')
vars.add(var)
result.append(vars)
```
#### File: dnm_cohorts/de_novos/iossifov_nature.py
```python
import logging
import tempfile
import math
import re
from zipfile import ZipFile
import pandas
from dnm_cohorts.download_file import download_file
from dnm_cohorts.fix_hgvs import fix_coordinates
from dnm_cohorts.de_novo import DeNovo
url = "https://static-content.springer.com/esm/art%3A10.1038%2Fnature13908/MediaObjects/41586_2014_BFnature13908_MOESM117_ESM.zip"
def get_sample_ids(fams):
""" create a ditionary mapping family ID to sample, to subID
Returns:
e.g {'10000': {'p': 'p1', 's': 's1'}, ...}
"""
sample_ids = {}
for i, row in fams.iterrows():
ids = set()
for col in ['CSHL', 'UW', 'YALE']:
col = 'SequencedAt' + col
if type(row[col]) == float:
continue
ids |= set(row[col].split(','))
# we want to refer to the individuals by 'p' or 's' for proband or
# sibling, since that is how they are represented in the de novo table
sample_ids[str(row.familyId)] = dict( (x[0], x) for x in ids )
return sample_ids
def get_person_ids(data, sample_ids):
fam_ids = data['familyId'].astype(str)
children = data.inChild.str.split('M|F')
person_ids = []
for fam, samples in zip(fam_ids, children):
persons = ( sample_ids[fam][x] for x in samples if x != '' )
persons = [ f'{fam}.{x}' for x in persons ]
person_ids.append(persons)
return person_ids
def tidy_families(data):
''' Tidy de novo data to one line per individual
'''
cleaned = []
for i, row in data.iterrows():
ids = row.person_id[:]
for person_id in ids:
temp = row.copy()
temp.person_id = person_id
cleaned.append(temp)
return pandas.DataFrame.from_records(cleaned)
async def iossifov_nature_de_novos(result):
""" get de novo variants fromn Iossifov et al., Nature 2014
Nature (2014) 515: 216-221, doi:10.1038/nature13908
Variants sourced from Supplementary tables S2, with person IDs sourced from
Table S1.
"""
logging.info('getting Iossifov et al Nature 2014 de novos')
temp = tempfile.NamedTemporaryFile()
download_file(url, temp.name)
handle = ZipFile(temp.name)
# obtain the dataframe of de novo variants
data = pandas.read_excel(handle.open('nature13908-s2/Supplementary Table 2.xlsx'))
fams = pandas.read_excel(handle.open('nature13908-s2/Supplementary Table 1.xlsx'))
chrom, pos, ref, alt = fix_coordinates(data['location'], data['vcfVariant'])
data['chrom'], data['pos'], data['ref'], data['alt'] = chrom, pos, ref, alt
sample_ids = get_sample_ids(fams)
data['person_id'] = get_person_ids(data, sample_ids)
data = tidy_families(data)
data['person_id'] += '|asd_cohorts'
data['study'] = "10.1038/nature13908"
data['confidence'] = 'high'
vars = set()
for i, row in data.iterrows():
var = DeNovo(row.person_id, row.chrom, row.pos, row.ref, row.alt,
row.study, row.confidence, 'grch37')
vars.add(var)
result.append(vars)
```
#### File: dnm_cohorts/de_novos/kaplanis_nature.py
```python
import logging
import pandas
from dnm_cohorts.de_novo import DeNovo
from dnm_cohorts.de_novos.lelieveld_nn import fix_alleles
url = 'https://static-content.springer.com/esm/art%3A10.1038%2Fs41586-020-2832-5/MediaObjects/41586_2020_2832_MOESM3_ESM.txt'
async def kaplanis_nature_de_novos(result, limiter):
""" load de novo mutations from Kaplanis et al Nature 2020
These de novos are loaded from Supplementary Table 1 from
Kaplanis et al Nature 2020
doi: 10.1038/s41586-020-2832-5
Returns:
dataframe of de novo mutations
"""
logging.info('getting Kaplanis et al Nature 2019 de novos')
data = pandas.read_table(url)
data['person_id'] = data['id'] + '|' + data['study']
data['chrom'] = data['chrom'].astype(str)
data['study'] = '10.1038/s41586-020-2832-5'
data['confidence'] = 'high'
data['build'] = 'grch37'
# fix RUMC indels, as insertions lack ref alleles and deletions lack alts
data['ref'], data['alt'] = await fix_alleles(limiter, data)
vars = set()
for i, row in data.iterrows():
var = DeNovo(row.person_id, row.chrom, row.pos, row.ref, row.alt,
row.study, row.confidence, row.build)
vars.add(var)
result.append(vars)
```
#### File: dnm_cohorts/de_novos/sanders_nature.py
```python
import trio
import logging
import pandas
from dnm_cohorts.ensembl import parallel_sequence
from dnm_cohorts.fix_alleles import fix_het_alleles
from dnm_cohorts.de_novo import DeNovo
url = 'https://static-content.springer.com/esm/art%3A10.1038%2Fnature10945/MediaObjects/41586_2012_BFnature10945_MOESM6_ESM.xls'
async def fix_alleles(limiter, data):
alt = data['alt'].copy()
ref = data['ref'].copy()
idx = alt.str.contains(':')
seqs = {}
alts_coords = [(x.chrom, x.pos, x.pos, x.build) for i, x in data[idx].iterrows()]
refs_coords = [(x.chrom, x.pos, x.pos + len(x.alt.split(':')[1]), x.build) for i, x in data[idx].iterrows()]
async with trio.open_nursery() as nursery:
for x in alts_coords + refs_coords:
nursery.start_soon(parallel_sequence, limiter, *x[:3], seqs, x[3])
alt[idx] = [seqs[x] for x in alts_coords]
ref[idx] = [seqs[x] for x in refs_coords]
return list(ref), list(alt)
async def sanders_nature_de_novos(result, limiter):
""" get de novo data from the Sanders et al autism exome study
Supplementary table 2 (where the excel sheets for the probands and
siblings have been combined) from:
Sanders et al. (2012) Nature 485:237-241
doi: 10.1038/nature10945
Returns:
data frame of de novos, with standardised genome coordinates and VEP
consequences for each variant
"""
logging.info('getting Sanders et al Nature 2012 de novos')
probands = pandas.read_excel(url, sheet_name='Probands', engine='xlrd')
siblings = pandas.read_excel(url, sheet_name='Siblings', engine='xlrd')
data = probands.append(siblings, ignore_index=True)
data['person_id'] = data['Child_ID'].astype(str)
data['chrom'] = data['Chr'].str.replace('chr', '')
data['pos'] = data['Pos (hg19)']
data['ref'] = data['Ref']
data['alt'] = data['Alt']
data['build'] = 'grch37'
# clean up the alleles
data['ref'], data['alt'] = await fix_alleles(limiter, data)
alleles = [ fix_het_alleles(x.ref, x.alt) for i, x in data.iterrows() ]
data['ref'], data['alt'] = list(zip(*alleles))
data['person_id'] += "|asd_cohorts"
data['study'] = "10.1038/nature10945"
data['confidence'] = 'high'
vars = set()
for i, row in data.iterrows():
var = DeNovo(row.person_id, row.chrom, row.pos, row.ref, row.alt,
row.study, row.confidence, row.build)
vars.add(var)
result.append(vars)
```
#### File: dnm_cohorts/dnm_cohorts/exclude_duplicates.py
```python
from itertools import groupby
consequences = ["transcript_ablation", "splice_donor_variant",
"splice_acceptor_variant", "stop_gained", "frameshift_variant",
"initiator_codon_variant", "stop_lost", "start_lost",
"transcript_amplification", 'conserved_exon_terminus_variant',
"inframe_insertion", "inframe_deletion", "missense_variant",
"protein_altering_variant", "splice_region_variant",
"incomplete_terminal_codon_variant", "stop_retained_variant",
"synonymous_variant", "coding_sequence_variant", "mature_miRNA_variant",
"5_prime_UTR_variant", "3_prime_UTR_variant", "non_coding_exon_variant",
"non_coding_transcript_exon_variant", "intron_variant",
"NMD_transcript_variant", "non_coding_transcript_variant",
"nc_transcript_variant", "upstream_gene_variant", "downstream_gene_variant",
"TFBS_ablation", "TFBS_amplification", "TF_binding_site_variant",
"regulatory_region_ablation", "regulatory_region_amplification",
"regulatory_region_variant", "feature_elongation", "feature_truncation",
"intergenic_variant"]
severity = dict(zip(consequences, range(len(consequences))))
def drop_inperson_duplicates(de_novos):
""" get independent mutation events per person
Occasionally an individual will have multiple de novo mutations within a
gene. We only want to count the mutated gene once per individual, while
preferring to count the most damaging mutations.
Args:
list of DeNovo objects
"""
# make sure the dataset is sorted by person ID and symbol, so we can group
# consecutive variants in the same person and gene
de_novos = sorted(de_novos, key=lambda x: (x.person_id, x.symbol))
included = []
for (_, symbol), group in groupby(de_novos, key=lambda x: (x.person_id, x.symbol)):
group = list(group)
if symbol != '' and symbol is not None:
cq = min(( x.consequence for x in group ), key=lambda x: severity[x])
group = [ x for x in group if x.consequence == cq ]
group = group[:1]
included += group
return included
```
#### File: dnm_cohorts/dnm_cohorts/fix_hgvs.py
```python
import re
from hgvs.parser import Parser
from dnm_cohorts.ensembl import genome_sequence
from dnm_cohorts.fix_alleles import fix_substitution, fix_deletion, fix_insertion
hgvs_parser = Parser()
async def fix_hgvs_coordinates(limiter, coords):
""" extract genomic coordinates for variants encoded as HGVS genomic
Args:
variants: data frame of variants
column: name of column containing HGVS coordinate
Returns:
a data frame with chrom, pos, and allele columns.
Examples:
fix_hgvs_coordinates(['chr7:g.155556643G>A',
'chr3:g.11060365_11060365del', 'chr13:g.50057690_50057691insA'])
"""
chroms, positions, refs, alts = [], [], [], []
for coord in coords:
var = hgvs_parser.parse_hgvs_variant(coord)
chrom = var.ac.replace('chr', '')
pos = var.posedit.pos.start.base
end = var.posedit.pos.end.base
type = var.posedit.edit.type
ref = var.posedit.edit.ref
if type == 'del':
ref = await genome_sequence(limiter, chrom, pos, end + 1)
alt = ref[0]
elif type == 'delins':
ref = await genome_sequence(limiter, chrom, pos, end)
alt = var.posedit.edit.alt
elif type == 'ins':
ref = await genome_sequence(limiter, chrom, pos, end)
alt = ref + var.posedit.edit.alt
elif type == 'sub':
alt = var.posedit.edit.alt
elif type == 'dup':
ref = await genome_sequence(limiter, chrom, pos, end + 1)
alt = ref + ref
chroms.append(chrom)
positions.append(pos)
refs.append(ref)
alts.append(alt)
return chroms, positions, refs, alts
def fix_coordinates(coords, alleles):
"""
fix_coordinates(["chr1:10000"], ["chr1:10000:A:G"])
fix_coordinates(["chr1:10000"], ["chr1:10003:ATGC:G"])
"""
chroms, positions, refs, alts = [], [], [], []
for a, b in zip(coords, alleles):
chrom, pos = a.split(':')
_, _, ref, alt = b.split(':')
chroms.append(chrom.upper().replace('CHR', ''))
positions.append(int(pos))
refs.append(ref)
alts.append(alt)
return chroms, positions, refs, alts
async def fix_coordinates_with_allele(limiter, coords, alleles):
"""
fix_coordinates_with_allele(["chr1:10000"], ["A/G"])
fix_coordinates_with_allele(["chr1:10000", "chr2:20000"], ["A/G", "T/C"])
fix_coordinates_with_allele(["chr1:10000"], ["ATGC/G"])
fix_coordinates_with_allele(["chr1:10000"], ["sub(G->T)"])
fix_coordinates_with_allele(["chr1:10000"], ["del(1)"])
fix_coordinates_with_allele(["chr1:10000"], ["ins(ATG)"])
"""
chroms, positions, refs, alts = [], [], [], []
for coord, allele in zip(coords, alleles):
chrom, start = coord.split(':')
chrom = chrom.upper().replace('CHR', '')
start = int(start)
end = start
if 'sub' in allele:
ref, alt = await fix_substitution(chrom, start, end, allele)
elif 'del' in allele:
ref, alt = await fix_deletion(limiter, chrom, start, end, allele)
elif 'ins' in allele:
ref, alt = await fix_insertion(limiter, chrom, start, end, allele)
else:
ref, alt = allele.split('/')
chroms.append(chrom)
positions.append(int(start))
refs.append(ref)
alts.append(alt)
return chroms, positions, refs, alts
```
#### File: dnm_cohorts/dnm_cohorts/person.py
```python
class Person(dict):
def __init__(self, person_id, sex, phenotype, studies):
self.person_id = str(person_id)
self.sex = sex
self.phenotype = phenotype
self.studies = studies
def __repr__(self):
return f'Person("{self.person_id}", "{self.sex}", {self.phenotype}, {self.studies})'
def __str__(self):
return f'{self.person_id}\t{self.sex}\t{",".join(self.phenotype)}\t{",".join(self.studies)}'
def __hash__(self):
return hash(f'{self.person_id}')
def __eq__(self, other):
return hash(self) == hash(other)
def __gt__(self, other):
return self.person_id > other.person_id
```
|
{
"source": "jeremymcrae/mup",
"score": 2
}
|
#### File: mup/mupit/gene_enrichment.py
```python
from scipy.stats import poisson
from mupit.count_de_novos import get_de_novo_counts
from mupit.mutation_rates import get_expected_mutations
from mupit.plot_enrichment import plot_enrichment
import pandas
def analyse_enrichment(de_novos, trios, rates=None, plot_path=None):
""" analyse whether de novo mutations are enriched in genes
Args:
de_novos: data frame containing all the observed de novos for all the
genes
trios: dictionary of male and female proband counts in the population
plot_path: path to save enrichment plots to, or None
rates: gene-based mutation rates data frame, or None
Returns:
data frame containing results from testing for enrichment of de
in each gene with de novos in it.
"""
observed = get_de_novo_counts(de_novos)
expected = get_expected_mutations(rates, trios["male"], trios["female"])
# calculate p values for each gene using the mutation rates
enrichment = gene_enrichment(expected, observed)
# make a manhattan plot of enrichment P values
if plot_path is not None:
num_tests = 18500
plot_enrichment(enrichment, num_tests, plot_path, p_columns=["p_lof", "p_func"])
# remove the position column (which is only used to be able to locate the
# gene's position on a chromosome on a Manhattan plot).
del enrichment["start_pos"]
return enrichment
def gene_enrichment(expected, observed):
""" tests whether genes are enriched with de novo mutations
Args:
expected: pandas dataframe of expected numbers of mutations per gene,
given expected mutation rates for each gene.
observed: pandas data frame with tally of de novo mutations per gene
for each of the mutation types: lof_snv, lof_indel, missense_snv,
missense_indel.
Returns:
pandas data frame with P-values from testing for enrichment.
"""
lof_columns = ["lof_indel", "lof_snv"]
func_columns = ["lof_indel", "lof_snv", "missense_snv", "missense_indel"]
enriched = observed.copy()
enriched["p_lof"] = test_enrich(expected, observed, lof_columns)
enriched["p_func"] = test_enrich(expected, observed, func_columns)
return enriched
def test_enrich(expected, observed, columns):
""" tests whether genes are enriched with de novo mutations
Args:
expected: pandas dataframe of expected numbers of mutations per gene,
given expected mutation rates for each gene.
observed: pandas data frame with tally of de novo mutations per gene
for each of the mutation types: lof_snv, lof_indel, missense_snv,
missense_indel.
columns: list of columns to use to calculate enrichment within, such as
the loss-of-function columns ["lof_snv", "lof_indel"].
Returns:
pandas Series of P-values from testing for enrichment.
"""
# recode the columns in the expected mutations table, so merging the
# expected and observed datasets doesn't have conflicting column names.
expected_columns = [ x + "_expected" for x in columns ]
rename = dict(zip(columns, expected_columns))
expected = expected.rename(columns=rename)
if 'hgnc' not in observed:
observed['hgnc'] = observed['symbol']
enriched = observed.merge(expected, how="left", on=["hgnc", "chrom"])
# account for how different pandas versions sum series with only NA
kwargs = {}
if pandas.__version__ >= '0.22.0':
kwargs = {'min_count': 1}
# sum the observed and expected de novo mutations per gene
observed = enriched[columns].sum(axis=1, **kwargs)
expected = enriched[expected_columns].sum(axis=1, **kwargs)
# calculate the probability of getting the observed number of de novos,
# given the expected rate of mutations.
return poisson.sf(observed - 1, expected)
```
#### File: mup/mupit/gtf.py
```python
from collections import defaultdict
import gzip
import re
import tempfile
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
import pandas
from mupit.util import is_url
GTF_HEADER = ['seqname', 'source', 'feature', 'start', 'end', 'score',
'strand', 'frame']
R_SEMICOLON = re.compile(r'\s*;\s*')
R_COMMA = re.compile(r'\s*,\s*')
R_KEYVALUE = re.compile(r'(\s+|\s*=\s*)')
def convert_gtf(path):
"""Open an optionally gzipped GTF file and return a pandas.DataFrame.
"""
# Each column is a list stored as a value in this dict.
result = defaultdict(list)
for i, line in enumerate(lines(path)):
for key in line.keys():
# This key has not been seen yet, so set it to None for all
# previous lines.
if key not in result:
result[key] = [None] * i
# Ensure this row has some value for each column.
for key in result.keys():
result[key].append(line.get(key, None))
return pandas.DataFrame(result)
def lines(path):
"""Open an optionally gzipped GTF file and generate a dict for each line.
"""
fn_open = gzip.open if path.endswith('.gz') else open
if is_url(path):
# if the path refers to a URL, download the file first
temp = tempfile.NamedTemporaryFile()
urlretrieve(path, temp.name)
path = temp.name
with fn_open(path) as handle:
for line in handle:
line = line.decode('utf8')
if line.startswith('#'):
continue
elif line.split('\t', 3)[2] != 'gene':
continue
else:
yield parse(line)
def parse(line):
"""Parse a single GTF line and return a dict.
"""
result = {}
fields = line.rstrip().split('\t')
for i, col in enumerate(GTF_HEADER):
result[col] = _get_value(fields[i])
# INFO field consists of "key1=value;key2=value;...".
infos = [x for x in re.split(R_SEMICOLON, fields[8]) if x.strip()]
for i, info in enumerate(infos, 1):
# It should be key="value".
try:
key, _, value = re.split(R_KEYVALUE, info, 1)
# But sometimes it is just "value".
except ValueError:
key = 'INFO{}'.format(i)
value = info
# Ignore the field if there is no value.
if value:
result[key] = _get_value(value)
return result
def _get_value(value):
if not value:
return None
# Strip double and single quotes.
value = value.strip('"\'')
# Return a list if the value has a comma.
if ',' in value:
value = re.split(R_COMMA, value)
# These values are equivalent to None.
elif value in ['', '.', 'NA']:
return None
return value
```
#### File: mup/mupit/plot_enrichment.py
```python
from pandas import Series
from numpy import log10
import matplotlib
matplotlib.use("Agg")
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot
def plot_enrichment(enriched, num_tests, path, chrom="chrom", symbol="hgnc",
position="start_pos", p_columns=None):
""" make Manhattan plots for loss-of-function and functional variants
Args:
enriched: data frame containing columns for chr, coord (position), and
P-values from testing for enrichment with loss-of-function and
functional consequence variants.
num_tests: number of tests performed (used by Bonferroni and FDR
correction).
path: path to save the plot to.
chrom: column name for chromosome labels
symbol: column name for gene symbols
position: column name for nucleotide position coordinate
p_columns: list of columns of p-values to plot (one page of pdf per
p-value column)
"""
if p_columns is None:
raise ValueError("No p-value columns defined!")
if type(p_columns) == str:
p_columns = [p_columns]
enriched = enriched[[symbol, chrom, position] + p_columns].copy()
# recode the sex chromosomes as integers
chroms = enriched[chrom].astype(str)
chroms[chroms == "X"] = "23"
enriched[chrom] = chroms.astype(int)
# drop rows with missing values
for column in enriched.columns:
enriched = enriched[~enriched[column].isnull()]
# sort the table by genome coordinates
enriched = enriched.sort_values([chrom, position])
enriched = enriched.reset_index()
enriched["pos"] = adjust_coordinates(enriched, chrom, position)
with PdfPages(path) as pdf:
for column in p_columns:
make_figure(enriched, num_tests, column, pdf, chrom)
def make_figure(enriched, num_tests, test, pdf, chrom="chrom", symbol="hgnc"):
"""
Args:
enriched: data frame containing columns for chr, coord (position), and
P-values from testing for enrichment with loss-of-function and
functional consequence variants.
num_tests: number of tests performed (used by Bonferroni and FDR
correction).
test: column name for p-values for the test to be plotted
pdf: PdfPages plotting device, for optional multipage output
chrom: column name for chromosome labels
symbol: column name for gene symbols
"""
fig = pyplot.figure(figsize=(10, 6))
title = fig.suptitle(test)
ax = fig.gca()
ticks = get_ticks(enriched, chrom)
enriched["log10_p"] = -log10(enriched[test])
threshold = 0.01/num_tests
# plot each chromosome one by one, in alternating colors
colors = {True: "lightblue", False: "darkblue"}
color = True
for key in sorted(set(enriched[chrom])):
points = enriched[enriched[chrom] == key]
line2d = pyplot.plot(points["pos"], points["log10_p"],
axes=ax, linestyle='None', marker="o", markeredgewidth=0,
markeredgecolor=colors[color], markerfacecolor=colors[color])
color = not color
# expand the axis limits slightly
xlim = ax.set_xlim([min(enriched["pos"]) * -0.03, max(enriched["pos"]) * 1.03])
ylim = ax.set_ylim([0, max(enriched["log10_p"]) * 1.03])
# add a horizontal line to indicate genomewide significance, and annotate
# all the genes which exceed that threshold
hline = ax.axhline(y=-log10(threshold), linestyle="dashed", color="red")
signif = enriched[enriched["log10_p"] > -log10(threshold)]
for (key, row) in signif.iterrows():
text = ax.text(row["pos"], row["log10_p"], row[symbol],
fontsize="xx-small", fontstyle="italic")
annotate_plot(ax, ticks, sorted(set(enriched[chrom])))
pdf.savefig(bbox_inches='tight', pad_inches=0, transparent=True)
def annotate_plot(ax, ticks, chroms):
""" annotate a manhattan plot with axis labels, tickmarks etc
Args:
ax: matplotlib Axes for the current plot
chrom: column name for chromosome labels
ticks: list of positions for chromosome tick marks.
"""
# Hide the right, top and bottom spines
e = ax.spines['right'].set_visible(False)
e = ax.spines['top'].set_visible(False)
e = ax.spines['bottom'].set_visible(False)
e = ax.tick_params(direction='out', length=10, width=1.2, labelsize='large')
# Only show ticks on the left and bottom spines
e = ax.yaxis.set_ticks_position('left')
e = ax.xaxis.set_ticks_position('bottom')
# add in the chromosome ticks and labels
e = ax.set_xticks(ticks)
e = ax.set_xticklabels(chroms)
# define the axes labels
e = ax.set_xlabel("Chromosome", fontsize='large')
e = ax.set_ylabel("-log10(P)", fontsize='large')
def adjust_coordinates(coords, chrom="chrom", position="start_pos"):
""" get sequential positions across successive chromosomes
Manhattan plots have successive chromosomes plotted one after another. But
nucleotide coordinates reset between chromosomes. We need to create a new
coordinate, where the position takes into account the chromosomes that have
come before.
Args:
coords: pandas DataFrame containing rows with genome coordinates
chrom: column name for chromosome labels
position: column name for nucleotide position coordinates
Returns:
pandas Series of adjusted positions.
"""
pos = Series([0] * len(coords))
for key in sorted(set(coords[chrom])):
rows = coords[chrom] == key
pos[rows] = coords[position][rows] + max(pos)
return pos
def get_ticks(coords, chrom="chrom"):
""" identify the positions for tick marks for chromosomes
Args:
coords: pandas DataFrame containing rows with genome coordinates
chrom: column name for chromosome labels
Returns:
list of positions for chromosome tick marks.
"""
ticks = []
for key in sorted(set(coords[chrom])):
rows = coords[chrom] == key
pos = coords["pos"][rows]
midpoint = float(min(pos[rows]) + max(pos[rows]))/2
ticks.append(midpoint)
return ticks
```
#### File: mup/mupit/write_json_probands.py
```python
import os
import json
from mupit import LOF_CQ, MISSENSE_CQ
def write_probands_by_gene(de_novos, fp):
""" Write a json-formatted list of probands per gene
We want a correctly-formatted JSON list of probands per gene, for analysis
of phenotype similarity between probands, see
https://github.com/jeremymcrae/hpo_similarity.
Args:
de_novos: dataframe of de-novo mutations per proband
fp: path or file handle to write the json data to.
"""
de_novos = de_novos.copy()
de_novos = de_novos[de_novos['consequence'].isin(LOF_CQ | MISSENSE_CQ)]
probands_by_gene = {}
for (gene, group) in de_novos.groupby("hgnc", sort=True):
probands_by_gene[gene] = sorted(group["person_id"])
try:
with open(fp, 'w') as handle:
json.dump(probands_by_gene, handle, indent=True)
except TypeError:
json.dump(probands_by_gene, fp, indent=True)
```
#### File: mup/tests/compare_dataframes.py
```python
import unittest
import math
import numpy
class CompareTables(unittest.TestCase):
def compare_tables(self, first, second):
""" check if two Dataframes contain the same information.
Note that the dataframes don't have to have the same column order.
Args:
first: pandas DataFrame
second: pandas DataFrame
"""
# make sure the two tables have the same columns, and number of rows
self.assertEqual(set(first.columns), set(second.columns))
self.assertEqual(len(first), len(second))
# make sure the values in the columns are identical between tables.
# We have to run through all the values one by one, to make sure that
# NaN values compare correctly.
for column in first:
for pos in range(len(first)):
first_val = first[column][pos]
second_val = second[column][pos]
# prepare a suitable diagnostic error message if the values are
# not equal
if type(first_val) == str or first_val is None or type(first_val) == numpy.bool_:
msg = "{} != {} at position {} in {}".format(first_val,
second_val, pos, column)
else:
msg = "{:.20f} != {:.20f} at position {} in {}".format( \
first_val, second_val, pos, column)
if type(first_val) in [float, numpy.float64] and math.isnan(first_val):
# if one is nan, check that the other is also nan, since
# nan entries cannot be chacked for eqaulity.
self.assertTrue(math.isnan(second_val), msg)
elif type(first_val) in [float, numpy.float64]:
# Only check if floats are nearly equal to get around float
# precision issues.
self.assertAlmostEqual(first_val, second_val, msg=msg)
else:
self.assertEqual(first_val, second_val, msg)
```
|
{
"source": "jeremymcrae/peds",
"score": 3
}
|
#### File: peds/peds/person.py
```python
class Person(object):
__slots__ = ('family', 'id', 'mom', 'dad', 'sex', 'phenotype', 'data', 'inferred')
male_codes = set(['1', 'm', 'M', 'male'])
female_codes = set(['2', 'f', 'F', 'female'])
unknown_codes = set(['0', 'NA', 'unknown', '.', '-9'])
def __init__(self, family, id, dad, mom, sex, phenotype, *args, **kwargs):
self.family = family
self.id = id
self.mom = mom
self.dad = dad
self.sex = sex
self.phenotype = phenotype
self.data = args
self.inferred = kwargs.get('inferred', False)
if self.sex not in self.male_codes | self.female_codes | self.unknown_codes:
raise ValueError('unknown sex code: {}'.format(self.sex))
if self.phenotype not in set(['1', '2']) | self.unknown_codes:
raise ValueError('unknown phenotype: {}'.format(self.phenotype))
def __repr__(self):
data = ''
if len(self.data) > 0:
temp = [ '"{}"'.format(x) for x in self.data ]
data = ', {}'.format(", ".join(temp))
return 'Person("{}", "{}", "{}", "{}", "{}", "{}"{})'.format(self.family,
self.id, self.dad, self.mom, self.sex, self.phenotype, data)
def __str__(self):
""" convert the object back to a ped file line
"""
data = ''
if len(self.data) > 0:
data = '\t' + '\t'.join(self.data)
return '{}\t{}\t{}\t{}\t{}\t{}{}\n'.format(self.family, self.id,
self.dad, self.mom, self.sex, self.phenotype, data)
def __hash__(self):
return hash((self.family, self.id))
def __eq__(self, other):
return hash(self) == hash(other)
def is_affected(self):
return self.phenotype == "2"
def is_male(self):
return self.sex in self.male_codes
def unknown_sex(self):
return self.sex in self.unknown_codes
def _is_inferred(self):
return self.inferred
```
|
{
"source": "jeremymcrae/regressor",
"score": 3
}
|
#### File: regressor/tests/test_regressor_multiple.py
```python
import unittest
from time import time
import numpy
from scipy.linalg import lstsq
from statsmodels.api import OLS
from sklearn.linear_model import LinearRegression
from regressor import linregress
def difference(first, second):
return abs(first - second)
def relative_diff(first, second):
delta = first / second
below = delta < 1
delta[below] = 1 / delta[below]
return delta
def make_arrays(n, k):
''' make two arrays or random numbers, one for x-values, one for y-values
We make this easier for the fits by formatting as 32-bit floats in fortran
ordered arrays.
Returns:
tuple of (x_values, y_values, x_values with an intercept column)
'''
x = numpy.asfortranarray(numpy.random.random((n, k)).astype(numpy.float32))
y = numpy.asfortranarray(numpy.random.random(n).astype(numpy.float32))
x_with_intercept = numpy.concatenate([x, numpy.ones(len(x), dtype=numpy.float32)[:, None]], axis=1)
return x, y, x_with_intercept
class TestRegressorMulitple(unittest.TestCase):
''' check that the results from this package match other regression systems
'''
def test_multiple_regression(self):
''' check a single large regression with many different linear models.
Skip p-value checks for other tests, as statsmodels is too slow. Also,
this uses arrasy of random numbers, i.e. random betas and p-values.
'''
x, y, x_with_intercept = make_arrays(n=5000, k=250)
# mainly compare lstsq from scipy with regressor, but lstsq lacks p-values
# so we need statsnmodels OLS for p-values, but that is very slow
lstsq_fit = lstsq(x_with_intercept, y)
regressor_fit = linregress(x, y)
sm_fit = OLS(y, x_with_intercept).fit()
sk_fit = LinearRegression().fit(x, y)
# check that the betas are very tightly correlated
corr = numpy.corrcoef(lstsq_fit[0][:-1], regressor_fit.coef_[:-1])[0, 1] ** 2
self.assertTrue(corr > 0.9999999)
# check that lstsq betas correlate with sk_fit betas for extra sanity
corr = numpy.corrcoef(lstsq_fit[0][:-1], sk_fit.coef_)[0, 1] ** 2
self.assertTrue(corr > 0.9999999)
# check the beta values are very close. They aren't identical, as this
# package uses 32-bit floats, but the others convert to 64-bit doubles.
# Differences should be on the order of 1e-8, which is the usual delta
# between a 64-bit float and its 32-bit representation (for values
# around 0.5). Float differences accumulate to around 2e-6 at most,
# which makes a bigger relative difference for betas near zero.
abs_diff = difference(lstsq_fit[0][:-1], regressor_fit.coef_[:-1])
self.assertTrue(abs_diff.max() < 5e-6)
# check the p-values are nearly identical in log10 space, and correlate
p_delta = abs(numpy.log10(regressor_fit.pvalue) - numpy.log10(sm_fit.pvalues))
self.assertTrue(p_delta.max() < 1e-3)
corr = numpy.corrcoef(numpy.log10(regressor_fit.pvalue), numpy.log10(sm_fit.pvalues))[0, 1] ** 2
self.assertTrue(corr > 0.9999999)
def test_multiple_regression_big_small(self):
''' test multiple regression with a small array, and a large array
'''
for n in [50, 500000]:
x, y, x_with_intercept = make_arrays(n=n, k=10)
lstsq_fit = lstsq(x_with_intercept, y)
regressor_fit = linregress(x, y)
# check the betas are tightly correlated
corr = numpy.corrcoef(lstsq_fit[0][:-1], regressor_fit.coef_[:-1])[0, 1] ** 2
self.assertTrue(corr > 0.99999)
abs_diff = difference(lstsq_fit[0][:-1], regressor_fit.coef_[:-1])
self.assertTrue(abs_diff.max() < 5e-6)
def test_regression_correlated(self):
''' check multiple regresion, where y-values depend on the x columns
'''
x, y, x_with_intercept = make_arrays(n=50000, k=10)
# define some effect sizes (which decline across the columns)
betas = numpy.logspace(0, -10, num=x.shape[1])
y = (x * betas).sum(axis=1)
lstsq_fit = lstsq(x_with_intercept, y)
regressor_fit = linregress(x, y)
# check differences versus the predefined betas
diff = difference(betas, regressor_fit.coef_[:-1])
self.assertTrue(diff.max() < 5e-6)
corr = numpy.corrcoef(betas, regressor_fit.coef_[:-1])[0, 1] ** 2
self.assertTrue(corr > 0.9999999)
# and check difference versus lstsq fit
corr = numpy.corrcoef(lstsq_fit[0][:-1], regressor_fit.coef_[:-1])[0, 1] ** 2
self.assertTrue(corr > 0.9999999)
```
|
{
"source": "jeremymcrae/vcfremapper",
"score": 2
}
|
#### File: vcfremapper/tests/test_remap.py
```python
import unittest
# from pyfaidx import Fasta
from liftover import get_lifter
from vcfremapper.remap import get_new_coords, remap
from vcfremapper.variant import Variant
class TestRemap(unittest.TestCase):
''' test remapping functions
'''
@classmethod
def setUpClass(cls):
cls.lift = get_lifter('hg19', 'hg38')
def test_get_new_coords(self):
''' check getting new coordinates for a variant
'''
data = get_new_coords(self.lift, 'chr1', 1000000)
self.assertEqual(data, ('chr1', 1064619, '+'))
# check a pos which swaps strand
data = get_new_coords(self.lift, 'chr1', 317720)
self.assertEqual(data, ('chr1', 501616, '-'))
# positions no longer in the new genome build raise errors
with self.assertRaises(ValueError):
get_new_coords(self.lift, 'chr1', 1)
# unprefixed chromosomes can be converted
data = get_new_coords(self.lift, '1', 1000000)
self.assertEqual(data, ('chr1', 1064619, '+'))
def test_remap(self):
''' check remapping a variant
'''
genome = None
var = Variant('1\t1000000\t.\tA\tG\t100\tPASS\tAC=100\n')
var = remap(self.lift, var, genome)
self.assertEqual(var.chrom, '1')
self.assertEqual(var.pos, 1064620)
def test_remap_missing_alt(self):
''' check remapping a variant without alts
'''
genome = None
var = Variant('1\t1000000\t.\tA\t.\t100\tPASS\tAC=100\n')
var = remap(self.lift, var, genome)
self.assertEqual(var.chrom, '1')
self.assertEqual(var.pos, 1064620)
def test_remap_not_in_build(self):
''' check a variant where the position is not in the new build
'''
genome = None
var = Variant('1\t1\t.\tA\tG\t100\tPASS\tAC=100\n')
self.assertIsNone(remap(self.lift, var, genome))
def test_remap_unknown_chrom(self):
''' check a variant where the variant maps to a non-standard chromosome
'''
genome = None
var = Variant('1\t13027995\t.\tA\tG\t100\tPASS\tAC=100\n')
self.assertIsNone(remap(self.lift, var, genome))
```
#### File: vcfremapper/tests/test_sort_vcf.py
```python
import unittest
import tempfile
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
from vcfremapper.sort_vcf import sort_vcf
class TestSortVcf(unittest.TestCase):
''' test sort_vcf function
'''
def test_sort_vcf(self):
''' check sort_vcf function
'''
lines = ['1\t100\t.\tA\tG\t100\tPASS\tAC=100\n',
'2\t150\t.\tA\tG\t100\tPASS\tAC=100\n',
'2\t150\t.\tA\tC\t100\tPASS\tAC=100\n',
'1\t200\t.\tA\tG\t100\tPASS\tAC=100\n',
'1\t180\t.\tA\tG\t100\tPASS\tAC=100\n']
input = tempfile.NamedTemporaryFile(mode='w+t')
output = tempfile.NamedTemporaryFile(mode='w+t')
input.writelines(lines)
input.flush()
header = '##fileformat=VCFv4.1\n' \
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n'
# define the byte offsets for the lines by their coordinates
coords = {
'1': {
(100, 'A', ('G',)): 0,
(200, 'A', ('G',)): 84,
(180, 'A', ('G',)): 112,
},
'2': {
(150, 'A', ('G',)): 28,
(150, 'A', ('C',)): 56,
},
}
# sort the VCF
sort_vcf(coords, input, output, header)
output.seek(0)
self.assertEqual(output.read(), header + ''.join(sorted(lines)))
```
|
{
"source": "jeremymcrae/vcfsyncer",
"score": 3
}
|
#### File: vcfsyncer/vcfsyncer/vcf.py
```python
import pysam
from vcfsyncer.variant import MultiVariantRecord
CHROMS = list(map(str, range(1, 23))) + ['X', 'Y', 'MT']
class VCFSyncer:
""" this synchronises the coordinates of multiple VCFs
I want to be able to take multiple VCFs and iterate through the unique set
of variants in all VCFs, but keep the VCFs synchronised, despite the VCFs
potentially containing different variants.
"""
def __init__(self, *args, chromosomes=None):
""" initialize class with VCFs and defined chromosomes
Args:
args: list of VCF paths, or pysam.VariantFile objects
chromosomes: list of chromosomes (sorted correctly) that we want to
consider variants from. Without this the VCFs can desynchronize
if they contain calls on non-standard chromosomes.
"""
if chromosomes is None:
chromosomes = CHROMS
# ensure VCFs sort by chrom correctly. Strip any 'chr' prefix, then
# convert to dict, chrom map to list position, and finally add another
# set of chromosomes with chr prefixes, but with same sort order.
chromosomes = [x.lstrip('chr') for x in chromosomes]
self.chroms = dict(zip(chromosomes, range(len(chromosomes))))
for k in list(self.chroms):
self.chroms['chr' + k] = self.chroms[k]
self.vcfs = list(args)
if type(self.vcfs[0]) == str:
self.vcfs = [pysam.VariantFile(x) for x in self.vcfs]
# add header field, so we can store alts within INFO dicts from each VCF
for x in self.vcfs:
x.header.add_line('##INFO=<ID=alts,Number=A,Type=String,Description="Alternate alleles">')
self.variants = [ next(x) for x in self.vcfs ]
self.leading = self._first()
self.cache = {'chrom': None, 'pos': None, 'refs': {}}
def __repr__(self):
vcfs = ', '.join(map(str, self.vcfs))
return 'VCFSyncer({})'.format(self.vcfs)
def _match(self, a, b):
""" check if two variants are at the same site and share a ref allele
"""
if b is None:
return False
return a.chrom == b.chrom and a.pos == b.pos and a.ref == b.ref
def _int(self, chrom):
''' convert a chromosome to an integer for correct sorting
'''
try:
return self.chroms[chrom]
except KeyError:
# allow for non-standard contigs
return abs(hash(chrom))
def _first(self):
""" find which of the vcfs have a variant at the lowest genome position
I'm assuming the VCFs are sorted, and we want to use variants which
come first in the genome (ie are on an earlier chromosome and have a
lower nucleotide position).
Returns:
list of booleans showing if each variant is at the lowest position
"""
vars = ( x for x in self.variants if x is not None )
var = min(vars, key=lambda x: (self._int(x.chrom), x.pos))
return [ self._match(var, x) for x in self.variants ]
def __iter__(self):
return self
def __next__(self):
""" iterate through vcfs, returning lowest site variants at each step
"""
if all( x is None for x in self.vcfs ):
raise StopIteration
refs = sorted(self.cache['refs'])
if len(refs) == 0:
self._fill_cache()
refs = sorted(self.cache['refs'])
ref = refs[0]
vars = self.cache['refs'][ref]
del self.cache['refs'][ref]
return MultiVariantRecord(*vars)
def _increment(self):
""" increment the vcfs/variants ranked first (so iteration works)
"""
for i, in_lead in enumerate(self.leading):
if in_lead and self.vcfs[i] is not None:
try:
self.variants[i] = next(self.vcfs[i])
except StopIteration:
self.vcfs[i] = None
self.variants[i] = None
# avoid variants on non-standard chromosomes, otherwise we easily
# get out of sync
while self.variants[i] is not None and self.variants[i].chrom not in self.chroms:
try:
self.variants[i] = next(self.vcfs[i])
except StopIteration:
self.vcfs[i] = None
self.variants[i] = None
try:
self.leading = self._first()
except ValueError:
self.leading = []
def fetch(self, contig=None, start=None, stop=None, region=None, reopen=False, end=None, reference=None):
''' imitate pysam fetch behavior
'''
iterators = (x.fetch(contig, start, stop, region, reopen, end, reference) for x in self.vcfs)
self.variants = [ next(x) for x in iterators ]
self.leading = self._first()
for x in self:
if x.chrom != contig or stop is not None and x.pos > stop:
break
yield x
def _fill_cache(self):
""" fill cache with variants at the same site
"""
if len(self.cache['refs']) == 0:
var = [ x for x, y in zip(self.variants, self.leading) if y ][0]
self.cache['chrom'] = var.chrom
self.cache['pos'] = var.pos
while True:
vars = [ x for x, y in zip(self.variants, self.leading) if y ]
if not any( x.pos == self.cache['pos'] for x in vars):
break
for x in vars:
if x.ref not in self.cache['refs']:
self.cache['refs'][x.ref] = []
self.cache['refs'][x.ref].append(x)
self._increment()
```
|
{
"source": "jeremymcrews/swimlane_record_updater",
"score": 2
}
|
#### File: swimlane_record_updater/swimlane_records_updater/SruRecords.py
```python
from slackclient import SlackClient
from swimlane import Swimlane
from swimlane.core.search import EQ, NOT_EQ, CONTAINS, EXCLUDES, GT, GTE, LT, LTE
import json
import os
class Setup:
def __init__(self, sw_config, sw_inputs, sw_user=None):
for k, v in sw_inputs.iteritems():
setattr(self, k, v)
for k, v in sw_config.iteritems():
setattr(self, k, v)
#for k, v in sw_user.iteritems():
# setattr(self, k, v)
def mergeTwoDicts(self, x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
class Records(Setup):
def __init__(self, config_file, sw_config, sw_inputs, proxySet=False, slackNotify=False):
Setup.__init__(self, sw_config, sw_inputs)
self.config = json.loads(config_file)
self.app = None
self.appRaw = None
self.crossApp = None
self.crossAppId = None
self.crossAppRaw = None
self.crossRecordId = None
self.proxySet = proxySet
if self.proxySet:
os.environ['HTTPS_PROXY'] = self.proxyUrl
self.recordData = None
self.recordKeys = []
self.records = None
self.recordsFieldOnly = {}
self.report = None
self.slackApiResults = {}
self.slackNotify = slackNotify
if self.slackNotify:
self.sc = SlackClient(self.slackToken)
self.swimlane = Swimlane(self.swimlaneApiHost, self.swimlaneApiUser, self.swimlaneApiKey, verify_ssl=False)
self.sw_config = sw_config
self.sw_inputs = sw_inputs
def getApp(self, crossAppId=None):
if crossAppId is not None:
self.crossAppId = crossAppId
self.app = self.swimlane.apps.get(id=self.crossAppId)
else:
self.app = self.swimlane.apps.get(id=self.ApplicationId)
def getAppRaw(self, crossAppId=None):
if crossAppId is not None:
self.crossAppId = crossAppId
self.app = self.swimlane.apps.get(id=self.crossAppId)
else:
self.app = self.swimlane.apps.get(id=self.ApplicationId)
self.appRaw = self.app._raw
def getRecord(self, crossAppId=None, crossRecordId=None):
if crossAppId is not None and crossRecordId is not None:
self.getApp(crossAppId)
self.crossRecordId = crossRecordId
self.records = self.app.records.get(id=self.crossRecordId)
else:
self.getApp()
self.records = self.app.records.get(id=self.RecordId)
def getRecordKeys(self, records):
keys = []
for r in records:
keys.append(r[0])
self.recordKeys = keys
def getReport(self, reportName, filters=None, crossAppId=None, limit=50):
if crossAppId is not None:
self.getApp(crossAppId)
else:
self.getApp()
self.report = self.app.reports.build(reportName, limit=limit)
if filters is not None:
for f in filters:
self.report.filter(f[0], f[1], f[2])
def pullFieldsFromRecords(self, crossAppId=None, crossRecordId=None, fields=None):
if crossAppId is not None and crossRecordId is not None:
self.getRecord(crossAppId, crossRecordId)
else:
self.getRecord()
self.getRecordKeys(self.records)
if fields:
oldFields = self.records
newFields = {}
for f in fields:
if f in self.recordKeys:
newFields[f] = oldFields[f]
self.recordsFieldOnly = newFields
return self.recordsFieldOnly
else:
return self.records
def buildSwOutputs(self, integrationId, includedFields, staticFields=None):
self.pullFieldsFromRecords(includedFields)
if staticFields:
self.recordData = self.mergeTwoDicts(self.mergeTwoDicts(self.mergeTwoDicts(self.sw_config, self.sw_inputs), self.recordsFieldOnly), staticFields)
else:
self.recordData = self.mergeTwoDicts(self.mergeTwoDicts(self.sw_config, self.sw_inputs), self.recordsFieldOnly)
if self.slackNotify:
self.sendSlackMessage(self.formatSlackMessage(integrationId))
return self. recordData
def formatSlackMessage(self, integrationId):
if self.slackNotify:
return self.config['Slack'][integrationId].format(self.ApplicationId, self.RecordId)
def sendSlackMessage(self, message):
if self.slackNotify:
self.setSlackChannel()
slackChannel = self.recordData['Slack Channel']
if self.recordData['Slack TS'] is not None:
threadTs = self.recordData['Slack TS']
self.slackApiResults = self.sc.api_call("chat.postMessage", channel=slackChannel, text=message, thread_ts=threadTs)
else:
self.slackApiResults = self.sc.api_call("chat.postMessage", channel=slackChannel, text=message)
self.recordData['Slack TS'] = self.slackApiResults['message']['ts']
def setSlackChannel(self):
if self.slackNotify:
slackChannel = self.recordData["Slack Channel"]
if slackChannel is None:
self.recordData["Slack Channel"] = self.config['Slack']['primaryChannel']
```
|
{
"source": "jeremymdunne/rocketengine",
"score": 3
}
|
#### File: rocketengine/parser/parser.py
```python
import os.path
from . import raspparser
def parseEngineFile(filepath):
# look at the extension to determine which parser to use
extension = os.path.splitext(filepath)[1]
if '.eng' in extension:
return raspparser.parseRaspFile(filepath)
else:
pass
#TODO throw exceptions
```
#### File: rocketengine/parser/raspparser.py
```python
def parseRaspFile(filepath):
headerpassed = False
data = {
'motor name':None,
'motor diameter':None,
'motor length':None,
'motor delay': None,
'propellant weight': None,
'total weight': None,
'manufacturer': None,
'thrust data': {'time':[],'thrust':[]}
}
with open(filepath) as enginefile:
while True:
line = enginefile.readline()
if not line:
break
if not headerpassed:
if line[0].isalpha():
headerpassed = True
name, diam, length, delay, prop_weight, total_weight, manufacturer = parseRaspHeader(line)
data['motor name'] = name
data['motor diameter'] = diam
data['motor length'] = length
data['motor delay'] = delay
data['propellant weight'] = prop_weight
data['total weight'] = total_weight
data['manufacturer'] = manufacturer
else:
try:
time, thrust = parseRaspDataLine(line)
data['thrust data']['time'].append(time)
data['thrust data']['thrust'].append(thrust)
except:
break
enginefile.close()
return data
def parseRaspDataLine(line):
# first float is the time
# second is the thrust (N)
contents = line.split()
time = float(contents[0])
thrust = float(contents[1])
return time, thrust
def parseRaspHeader(header):
# the content of the header is as follows:
# 1. Motor Name
# 2. Motor Diameter
# 3. Motor Length
# 4. Motor Delay
# 5. Propellant Weight
# 6. Total Weight
# 7. Manufacturer
# parse by spaces
headercontents = header.split()
motor_name = headercontents[0]
motor_diameter = float(headercontents[1])
motor_length = float(headercontents[2])
motor_delay = float(headercontents[3])
propellant_weight = float(headercontents[4])
total_weight = float(headercontents[5])
manufacturer = headercontents[6]
return motor_name, motor_diameter, motor_length, motor_delay, propellant_weight, total_weight, manufacturer
```
|
{
"source": "jeremymdunne/standardatmo",
"score": 3
}
|
#### File: standardatmo/standardatmo/standardatmo.py
```python
from os import path # for relative path import
import csv # for atmo data reading
######### CONSTANTS ##########
ATMO_ALTITUDE_INDEX = 0
ATMO_GEOPOTENTIAL_ALTITUDE_INDEX = 1
ATMO_TEMPERATURE_INDEX = 2
ATMO_PRESSURE_INDEX = 3
ATMO_DENSITY_INDEX = 4
ATMO_SPEED_OF_SOUND_INDEX = 5
ATMO_GRAVITY_INDEX = 6
ATMO_DYNAMIC_VISCOSITY_INDEX = 7
ATMO_KINEMATIC_VISCOSITY_INDEX = 8
ATMO_NUMBER_DENSITY_INDEX = 9
ATMO_PARTICLE_SPEED_INDEX = 10
ATMO_COLLISION_FREQUENCY_INDEX = 11
ATMO_MEAN_FREE_PATH_INDEX = 12
ATMO_THERMAL_CONDUCTIVITY_INDEX = 13
ATMO_MOLECULAR_WEIGHT_INDEX = 14
ATMO_PRESSURE_SCALE_HEIGHT_INDEX = 15
ATMO_INTERPOLATION_METHOD = "NDD"
ATMO_INTERPOLATION_ORDER = 10
__atmodata = {}
########### CODE ##########
def loadAtmoData():
global __atmodata
basepath = path.dirname(__file__)
filepath = path.abspath(path.join(basepath, "..", "resources", "data.csv"))
__atmodata = {
"altitude":[], "geopotential_altitude":[], "temperature": [], "pressure":[],
"density": [], "speed_of_sound":[], "gravity": [], "dynamic_viscosity":[],
"kinematic_viscosity":[],"number_density":[], "particle_speed":[],
"collision_frequency":[],"mean_free_path":[], "thermal_conductivity":[],
"molecular_weight":[],"pressure_scale_height":[]
}
with open(filepath, newline='') as atmofile:
atmoreader = csv.reader(atmofile, delimiter=',')
for row in atmoreader:
try:
linedata = []
for i in row:
linedata.append(float(i))
__atmodata["altitude"].append(linedata[ATMO_ALTITUDE_INDEX] * 1000)
__atmodata["geopotential_altitude"].append(linedata[ATMO_GEOPOTENTIAL_ALTITUDE_INDEX] * 1000)
__atmodata["temperature"].append(linedata[ATMO_TEMPERATURE_INDEX])
__atmodata["pressure"].append(linedata[ATMO_PRESSURE_INDEX])
__atmodata["density"].append(linedata[ATMO_DENSITY_INDEX])
__atmodata["speed_of_sound"].append(linedata[ATMO_SPEED_OF_SOUND_INDEX])
__atmodata["gravity"].append(linedata[ATMO_GRAVITY_INDEX])
__atmodata["dynamic_viscosity"].append(linedata[ATMO_DYNAMIC_VISCOSITY_INDEX])
__atmodata["kinematic_viscosity"].append(linedata[ATMO_KINEMATIC_VISCOSITY_INDEX])
__atmodata["number_density"].append(linedata[ATMO_NUMBER_DENSITY_INDEX])
__atmodata["particle_speed"].append(linedata[ATMO_PARTICLE_SPEED_INDEX])
__atmodata["collision_frequency"].append(linedata[ATMO_COLLISION_FREQUENCY_INDEX])
__atmodata["mean_free_path"].append(linedata[ATMO_MEAN_FREE_PATH_INDEX])
__atmodata["thermal_conductivity"].append(linedata[ATMO_THERMAL_CONDUCTIVITY_INDEX])
__atmodata["molecular_weight"].append(linedata[ATMO_MOLECULAR_WEIGHT_INDEX])
__atmodata["pressure_scale_height"].append(linedata[ATMO_PRESSURE_SCALE_HEIGHT_INDEX])
except:
pass
atmofile.close()
def getAtmoData(altitude, key = None):
if key is not None:
return atmoInterpolate(altitude, key)
else:
# return all data at that point
dict = {
}
for keys in __atmodata.keys():
if keys is not "altitude":
dict[keys] = atmoInterpolate(altitude, keys)
return dict
def newtonsDividedDifferenceInterpolation(x, x_data, y_data):
order = ATMO_INTERPOLATION_ORDER
# bias to closest side
arr = [] # rows, cols
n = len(x_data) - 1# find index under the data
for i in range(0,len(x_data) - 1):
if x_data[i+1] > x:
n = i
break
# correct for low data
if x < x_data[0]:
n = 0
num_points = order + 1
# bias order to closest data set
if abs(x_data[n] - x) < abs(x_data[n+1]):
# offset towards rear
n_below = int(num_points/2 + 0.5)
n_above = int(num_points/2)
else:
# offset above
n_below = int(num_points/2 + 0.5)
n_above = int(num_points/2)
if n < n_below:
n_below = n
n_above = num_points - n_below
elif (len(x_data) -1 - n) < n_above:
n_above = len(x_data) - 1 - n
n_below = num_points - n_above
n_start = n - n_below
for i in range(0, num_points):
arr.append([n_start + i])
arr[i].append(y_data[n_start + i])
# print(arr)
for i in range(2,order + 1): # col
# construct the column
for n in range(0, order - i + 2): # row
# value = (arr[n][i-1] - arr[i+1][n-1]) / (x_data[arr[i][0]] - x_data[arr[i+1][0]])
value = (arr[n][i-1] - arr[n+1][i-1]) / (x_data[arr[n][0]] - x_data[arr[n+i-1][0]])
arr[n].append(value)
# construct the final
# print(arr)
value = arr[0][1]
for i in range(1, order): # col
pre = 1
for n in range(0, i): # row
pre *= (x - x_data[arr[n][0]])
# print(pre, ' ', arr[0][i+1])
value += pre * arr[0][i+1]
# print(value)
return value
def atmoInterpolate(altitude, key):
# use a fourth order newton's divided difference interpolation method
arr = [] # rows, cols
# find the closest lower index
y_data = __atmodata[key]
x_data = __atmodata['altitude']
val = newtonsDividedDifferenceInterpolation(altitude, x_data, y_data)
return val
loadAtmoData()
if __name__ == '__main__':
# print(getAtmoData(1200))
print(atmoInterpolate(3500, 'pressure'))
# print(newtonsDividedDifferenceInterpolation(301, [300,304,305,307],[2.4771,2.4829,2.4843,2.4871]))
# newtonsDividedDifferenceInterpolation(3499, [1000,2000,3000,4000,5000,6000],[])
```
|
{
"source": "jeremymeteorologo/Jeremy_Meteorologo",
"score": 2
}
|
#### File: Jeremy_Meteorologo/tests/test.py
```python
from starry_night import skycam
from nose.tools import eq_
import numpy as np
import pandas as pd
def test_findLocalMaxPos():
img = np.zeros((480,640))
img[10,30] = 1
# find max value if we are on top of it
pos = skycam.findLocalMaxPos(img, 30,10,1)
eq_((pos.maxX,pos.maxY), (30,10), 'Right on Maximum')
# return center, if max is not in range
pos = skycam.findLocalMaxPos(img, 3,12,1)
eq_((pos.maxX,pos.maxY), (3,12), 'Max not in range')
# radius too big should not be a problem
pos = skycam.findLocalMaxPos(img, 0,0,300)
eq_((pos.maxX,pos.maxY), (30,10), 'Array overflow1')
# nans in image should not be a problem...
img = np.ones((480,640))*np.nan
img[10,30] = 1
pos = skycam.findLocalMaxPos(img, 0,0,300)
eq_((pos.maxX,pos.maxY), (30,10), 'Nan in range')
# ... even if there is no max in range
pos = skycam.findLocalMaxPos(img, 0,0,3)
eq_((pos.maxX,pos.maxY), (0,0), 'Nan outside range. x,y:{}'.format((pos.maxX,pos.maxY)))
def test_isInRange():
star1 = pd.Series({'x':2, 'y':3})
star2 = pd.Series({'x':6, 'y':6})
b = skycam.isInRange(star1, star2, rng=5, unit='pixel')
eq_(b, True, 'On Range failed')
b = skycam.isInRange(star1, star2, rng=4.999, unit='pixel')
eq_(b, False, 'Outside Range failed')
b = skycam.isInRange(star1, star2, rng=5.001, unit='pixel')
eq_(b, True, 'In Range failed')
star1 = pd.Series({'ra':0, 'dec':0})
star2 = pd.Series({'ra':0, 'dec':5/180*np.pi})
b = skycam.isInRange(star1, star2, rng=5, unit='deg')
eq_(b, True, 'On Range failed')
b = skycam.isInRange(star1, star2, rng=4.999, unit='deg')
eq_(b, False, 'Outside Range failed')
b = skycam.isInRange(star1, star2, rng=5.001, unit='deg')
eq_(b, True, 'In Range failed')
def test_getBlobsize():
image = np.ones((51,51))
b = skycam.getBlobsize(image, 0.5, limit=10)
eq_(b, 10, 'Exeed limit failed: {}'.format(b))
b = skycam.getBlobsize(image, 0.5)
eq_(b, 51*51, 'Total blob failed')
image[25,25] = 20
image[26,24] = 20
image[27,24] = 20
b = skycam.getBlobsize(image, 2)
eq_(b, 3, 'Diagonal blob failed: {}'.format(b))
image[:,22:26] = 20
b = skycam.getBlobsize(image, 2)
eq_(b, 204, 'Blob at border failed: {}'.format(b))
image[25,24]=np.NaN
image[26,25]=-np.NaN
b = skycam.getBlobsize(image, 2)
eq_(b, 202, 'Blob at border failed: {}'.format(b))
```
|
{
"source": "jeremymeyers/spotify_images",
"score": 3
}
|
#### File: jeremymeyers/spotify_images/images.py
```python
import api
import os
import requests
from zipfile import ZipFile
def rename(name):
return name.lower().replace(' ','_').replace('/','_').replace('[','(').replace(']',')')
def zip_images(directory):
zip_this = ZipFile(directory + '.zip', 'w')
os.chdir(directory)
for root, dirs, files in os.walk(os.getcwd()):
for file in files:
zip_this.write(file)
zip_this.close()
def url_to_uri(uri, typeof):
offset = uri.find(typeof)
return 'spotify:' + uri[offset:].replace('/',':')
def get_images(url, directory=None, verbose=False, zip_this=False):
typeof = ''
results = ''
if 'artist' in url:
typeof = 'artist'
results = api.get_artist(url)
elif 'playlist' in url:
typeof = 'playlist'
results = api.get_playlist(url)
if results == '':
print("No results found, check URL and try again.")
exit(1)
if typeof == 'artist':
name = results['items'][0]['artists'][0]['name']
results = results['items']
elif typeof == 'playlist':
name = results['name']
results = results['tracks']['items']
if verbose:
print('Name: ' + name + '\nType: ' + typeof)
if directory:
directory = directory + '/' + rename(name)
else:
directory = 'results/' + rename(name)
if not os.path.exists(directory):
os.makedirs(directory)
count = 0
pics = []
for track in results:
if typeof == 'artist':
url = track['images'][0]['url']
name = rename(track['name'])
elif typeof == 'playlist':
url = track['track']['album']['images'][0]['url']
name = rename(track['track']['album']['name'])
path = directory + '/' + name + '.jpeg'
if os.path.exists(path):
continue
pic = requests.get(url, allow_redirects=True)
if verbose:
print(path)
open(path, 'wb').write(pic.content)
count += 1
pics.append(pic)
print(str(len(pics)) + " saved to " + directory)
if zip_this:
zip_images(directory)
return directory
```
|
{
"source": "jeremymill/aws-app-mesh-controller-for-k8s",
"score": 2
}
|
#### File: test_app/backend/app.py
```python
import os
import config
import time
from flask import Flask, request
from aws_xray_sdk.core import patch_all, xray_recorder
from aws_xray_sdk.ext.flask.middleware import XRayMiddleware
app = Flask(__name__)
xray_recorder.configure(
context_missing='LOG_ERROR',
service=config.XRAY_APP_NAME,
)
patch_all()
XRayMiddleware(app, xray_recorder)
@app.route('/defaultroute')
def default():
print('----------------')
print(request.headers)
print('----------------')
time.sleep(config.TIMEOUT_VALUE)
return config.WHO_AM_I
@app.route('/timeoutroute')
def timeout():
print('----------------')
print(request.headers)
print('----------------')
time.sleep(config.TIMEOUT_VALUE)
return config.WHO_AM_I
@app.route('/tlsroute')
def tlsroute():
print('----------------')
print(request.headers)
print('----------------')
return config.WHO_AM_I
if __name__ == '__main__':
app.run(host='0.0.0.0', port=config.PORT, debug=config.DEBUG_MODE)
```
|
{
"source": "jeremymiller00/learning-map",
"score": 3
}
|
#### File: jeremymiller00/learning-map/app.py
```python
import sys
import typer
import sqlite3
from uuid import uuid4
from src.learningmap.crud import *
from configs.learningmap.config import set_config
app = typer.Typer()
@app.command()
def main(cmd: str,):
interactive = False
config = set_config()
cur, con = connect(config.get("db_path"))
if cmd == "interactive":
interactive = True
while interactive:
cmd = input("Enter a command (add, getall, query, quit): ")
if cmd == "quit":
break
else:
execute(cmd, cur, con)
else:
execute(cmd, cur, con)
def execute(cmd, cur, con):
if cmd == "add":
try:
add_item(cur, con)
except sqlite3.OperationalError:
print("Exception caught")
elif cmd == "getall":
for i in get_all(cur, con):
print(i)
elif cmd == "query":
try:
for i in query(cur, con):
print(i)
except sqlite3.OperationalError as e:
print(f"Exception raised: {e}")
else:
typer.echo(f"Entered {cmd}")
if __name__ == "__main__":
app()
```
#### File: tests/learningmap/test_crud.py
```python
import pytest
from src.learningmap.crud import *
def test_add():
assert add(3,4) == 7
```
|
{
"source": "jeremymiller00/modelingtools",
"score": 3
}
|
#### File: modelingtools/tests/test_testingtools.py
```python
from unittest import TestCase
from modelingtools.testingtools import BayesianABTesting
import matplotlib
class TestBayesianABTesting(TestCase):
def setUp(self) -> None:
self.data = {
"a_trials": 100,
"a_successes": 10,
"b_trials": 1000,
"b_successes": 120
}
self.likelihood_function = "binomial"
self.tester = BayesianABTesting(self.likelihood_function, self.data)
def test__test_binom(self):
winner, diff, prob = self.tester._test_binom(metric="metric", verbose=1, labels=["A", "B"], plot=False)
self.assertIsInstance(winner, str, "Invalid type, should be str")
self.assertIsInstance(diff, float, "Invalid numeric type")
self.assertIsInstance(prob, float, "Invalid numeric type")
self.assertIn(winner, ["A", "B"], "Invalid value for winner")
self.assertAlmostEqual(diff, 0.0129, delta=0.0001)
self.assertAlmostEqual(prob, 0.53, delta=0.01)
def test__plot_posteriors(self):
fig = self.tester._plot_posteriors(
self.data.get("a_successes"),
self.data.get("a_trials") - self.data.get("a_successes"),
self.data.get("b_successes"),
self.data.get("b_trials") - self.data.get("b_successes"),
["A", "B"]
)
self.assertIsInstance(fig, matplotlib.figure.Figure,
"Invalid object returned, matplotlib.figure.Figure required")
```
#### File: modelingtools/tests/test_trainingtools.py
```python
import unittest
import numpy as np
import statsmodels
import statsmodels.api as sm
from sklearn.linear_model import LogisticRegression
from modelingtools import trainingtools as tt
class TestTrainingTools(unittest.TestCase):
def setUp(self):
self.linreg = tt.StatsmodelsWrapper()
self.logreg = tt.StatsmodelsWrapper(is_logit=True)
self.logreg_thresh = tt.StatsmodelsWrapper(threshold=0.5, is_logit=True)
self.X_dummy = np.random.rand(100)
self.X_dummy_2d = np.random.rand(100, 20)
self.X_dummy_predict = np.random.rand(100)
self.y_dummy_continuous = np.random.rand(100)
self.y_dummy_categorical = np.random.randint(low=0, high=2, size=100)
def test_init(self):
self.assertEqual(self.linreg.is_logit, False)
self.assertEqual(self.logreg.is_logit, True)
self.assertEqual(self.logreg_thresh.threshold, 0.5)
self.assertEqual(self.logreg_thresh.is_logit, True)
def test_fit(self):
self.linreg.fit(self.X_dummy, self.y_dummy_continuous)
self.assertIsInstance(self.linreg._fit_model,
statsmodels.regression.linear_model.RegressionResultsWrapper)
self.logreg.fit(self.X_dummy, self.y_dummy_categorical)
self.assertIsInstance(self.logreg._fit_model,
statsmodels.discrete.discrete_model.BinaryResultsWrapper)
def test_predict(self):
self.linreg.fit(self.X_dummy, self.y_dummy_continuous)
linreg_predictions = self.linreg.predict(self.X_dummy_predict)
self.assertEqual(len(linreg_predictions), 100)
self.logreg.fit(self.X_dummy, self.y_dummy_categorical)
logreg_predictions = self.logreg.predict(self.X_dummy_predict)
self.assertEqual(len(logreg_predictions), 100)
def test_threshold(self):
self.logreg_thresh.fit(self.X_dummy, self.y_dummy_categorical)
logreg_predictions = self.logreg_thresh.predict(self.X_dummy_predict)
for item in logreg_predictions:
self.assertIn(item, [0, 1], "Threshold Error")
def test_loo_cv(self):
y_true, y_pred = tt.loo_cv(self.X_dummy_2d, self.y_dummy_categorical)
self.assertEqual(len(y_true), len(y_pred))
model = LogisticRegression(C=10000000)
# predictions from model without CV
model.fit(self.X_dummy_2d, self.y_dummy_categorical)
preds = model.predict_proba(self.X_dummy_2d)
self.assertNotEqual(y_pred, preds)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeremymiller00/Success-in-Online-Education",
"score": 3
}
|
#### File: src/models/train_model_rf.py
```python
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
# from rfpimp import *
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import make_scorer, confusion_matrix, recall_score, roc_auc_score, roc_curve, recall_score, classification_report
from sklearn.model_selection import GridSearchCV, cross_val_score, cross_validate
from sklearn.ensemble import RandomForestClassifier
def standard_confusion_matrix(y_true, y_pred):
"""Make confusion matrix with format:
-----------
| TP | FP |
-----------
| FN | TN |
-----------
Parameters
----------
y_true : ndarray - 1D
y_pred : ndarray - 1D
Returns
-------
ndarray - 2D
"""
[[tn, fp], [fn, tp]] = confusion_matrix(y_true, y_pred)
return np.array([[tp, fp], [fn, tn]])
def print_roc_curve(y_test, probabilities, model_type):
'''
Calculates and prints a ROC curve given a set of test classes and probabilities from a trained classifier
'''
tprs, fprs, thresh = roc_curve(y_test, probabilities)
plt.figure(figsize=(12,10))
plt.plot(fprs, tprs,
label=model_type,
color='red')
plt.plot([0,1],[0,1], 'k:')
plt.legend()
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.title("ROC Curve AUC: {} Recall: {}".format(roc_auc, recall))
plt.show()
######################################################################
if __name__ == '__main__':
# change path to get appropriate cutoff (first_quarter, first_quarter, first_quarter; CHANGE PATH IN WRITE OUT!)
X_train = pd.read_csv('data/processed/first_quarter/X_train.csv')
y_train = pd.read_csv('data/processed/first_quarter/y_train.csv')
y_train = y_train['module_not_completed']
X_test = pd.read_csv('data/processed/first_quarter/X_test.csv')
y_test = pd.read_csv('data/processed/first_quarter/y_test.csv')
y_test = y_test['module_not_completed']
X_train.fillna(value = 0, inplace = True)
y_train.fillna(value = 0, inplace = True)
X_test.fillna(value = 0, inplace = True)
y_test.fillna(value = 0, inplace = True)
# estimator
rf = RandomForestClassifier()
# GridSearch parameters
rf_params = {
'n_estimators': [50, 100, 1000],
'max_depth': [5, 10, 50, 100],
'min_samples_split': [1.0, 10, 100],
'min_samples_leaf': [1, 10, 100],
'max_features': ['auto', 'sqrt', 'log2']
}
rf_clf = GridSearchCV(rf, param_grid=rf_params,
scoring='roc_auc',
n_jobs=-1,
cv=5)
rf_clf.fit(X_train, y_train)
rf_model = rf_clf.best_estimator_
# best model as determined by grid search
# rf_model = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=50, max_features='auto', max_leaf_nodes=None,min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=5, min_samples_split=5, min_weight_fraction_leaf=0.0, n_estimators=1000, n_jobs=-1, oob_score=False, random_state=None, verbose=0, warm_start=False)
# rf_model.fit(X_train, y_train)
# save model
pickle.dump(rf_model, open('models/random_forest_completion_first_quarter.p', 'wb'))
# save for flask
# pickle.dump(rf_model, open('web_app/model.p', 'wb'))
# sys.exit()
# cross validate
cv = cross_validate(rf_model, X_train, y_train, scoring = 'roc_auc', cv=5, return_train_score=1)
print(cv)
# evaluation
roc_auc_cv = (cross_val_score(rf_model, X_train, y_train, scoring = 'roc_auc', cv=5))
recall_cv = cross_val_score(rf_model, X_train, y_train, scoring = 'recall', cv=5)
precision_cv = cross_val_score(rf_model, X_train, y_train, scoring = 'precision', cv=5)
accuracy_cv = cross_val_score(rf_model, X_train, y_train, scoring = 'accuracy', cv=5)
f1_cv = cross_val_score(rf_model, X_train, y_train, scoring = 'f1_micro', cv=5)
print('Best Model: {}'.format(rf_model))
# print('Best Model parameters: {}'.format(rf_model.best_params_))
print('Roc Auc: {}'.format(roc_auc_cv))
print('Recall Score: {}'.format(recall_cv))
print('Precision Score: {}'.format(precision_cv))
print('Accuracy Score: {}'.format(accuracy_cv))
print('F1 Micro: {}'.format(f1_cv))
'''
# final model evaluation (see jupyter notebook)
predictions = rf_model.predict(X_test)
roc_auc = roc_auc_score(y_test, predictions)
probas = rf_model.predict_proba(X_test)[:, :1]
tprs, fprs, thresh = roc_curve(y_test, probas)
recall = recall_score(y_test, predictions)
conf_mat = standard_confusion_matrix(y_test, predictions)
class_report = classification_report(y_test, predictions)
print_roc_curve(y_test, probas, 'Random Forest')
print('Best Model: {}'.format(rf_model))
print('\nRoc Auc: {}'.format(roc_auc))
print('\nRecall Score: {}'.format(recall))
print('\nClassification Report:\n {}'.format(class_report))
print('\nConfusion Matrix:\n {}'.format(standard_confusion_matrix(y_test, predictions)))
# feature importances
feat_imp = importances(rf_model, X_test, y_test)
feat_imp.sort_values(by='Importance', ascending=False)[0:10]
pd.DataFrame(data={'fprs': fprs, 'tprs': tprs, 'Thresholds': thresh}).loc[300:1000:25]
'''
```
|
{
"source": "JeremyMoeglich/Python-Cmd-Screen",
"score": 3
}
|
#### File: src/list_screen/dynamic_element.py
```python
class DynamicElement:
def __init__(self, frame: int = 0) -> None:
self.frame = frame
def render(self) -> str:
raise NotImplementedError()
def increment_frame(self):
self.frame += 1
```
#### File: list_screen/DynamicElements/loading_symbol.py
```python
from dynamic_element import DynamicElement
class LoadingSymbol(DynamicElement):
def __init__(self, frame:int =0) -> None:
self.frame = frame
def render(self) -> str:
match self.frame % 4:
case 0:
return '/'
case 1:
return '-'
case 2:
return "\\"
case 3:
return '|'
return ''
def increment_frame(self):
self.frame += 1
```
|
{
"source": "jeremy-moffitt/public-cloud-info-service",
"score": 2
}
|
#### File: public-cloud-info-service/pint_server/database.py
```python
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from pint_server.models import (
AlibabaImagesModel,
AmazonImagesModel,
AmazonServersModel,
Base,
GoogleImagesModel,
GoogleServersModel,
ImageState,
MicrosoftImagesModel,
MicrosoftRegionMapModel,
MicrosoftServersModel,
OracleImagesModel,
ServerType,
VersionsModel
)
def get_environ_or_bust(key_name):
assert key_name in os.environ, 'Environment variable %s is required.' % (
key_name)
return os.environ.get(key_name)
def create_postgres_url_from_env():
db_user = get_environ_or_bust('POSTGRES_USER')
db_password = get_environ_or_bust('POSTGRES_PASSWORD')
db_name = get_environ_or_bust('POSTGRES_DB')
db_host = get_environ_or_bust('POSTGRES_HOST')
db_port = os.environ.get('POSTGRES_PORT', 5432)
db_ssl_mode = os.environ.get('POSTGRES_SSL_MODE', None)
db_root_cert = os.environ.get('POSTGRES_SSL_ROOT_CERTIFICATE', None)
ssl_mode = ''
if db_ssl_mode:
# see
# https://www.postgresql.org/docs/11/libpq-connect.html#
# LIBPQ-CONNECT-SSLMODE
ssl_mode = '?sslmode=%s' % (db_ssl_mode)
if db_root_cert:
ssl_mode += '&sslrootcert=%s' % (db_root_cert)
return ('postgresql://%(user)s:%(password)s@%(host)s:%(port)s/'
'%(db)s%(ssl)s' % {
'user': db_user,
'password': <PASSWORD>,
'db': db_name,
'host': db_host,
'port': db_port,
'ssl': ssl_mode})
def init_db():
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
if os.environ.get('DATABASE_URI', None):
engine = create_engine(os.environ['DATABASE_URI'],
convert_unicode=True)
else:
engine = create_engine(
create_postgres_url_from_env(), convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base.query = db_session.query_property()
Base.metadata.create_all(bind=engine)
return db_session
```
#### File: tests/loadtest/loadtest.py
```python
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime, timedelta
import argparse
import random
import requests
import sys
import time
'''
To run this program,
python loadtest.py --help displays the options the program takes
If you do not provide any options, there are default values for the options
python loadtest.py --num_requests 100 --duration 1 --url http://localhost:5000
Here the duration is in minutes
'''
urls_to_hit = ["/v1/providers",
"/v1/amazon/images", "/v1/amazon/servers",
"/v1/google/images", "/v1/google/servers",
"/v1/oracle/images", "/v1/oracle/servers",
"/v1/microsoft/images", "/v1/microsoft/servers",
"/v1/amazon/regions",
"/v1/amazon/us-east-2/images",
"/v1/microsoft/regions",
"/v1/providers.xml",
"/v1/alibaba/images.xml"]
time_buckets = {}
def task(base_url):
print("Executing the Task")
url = base_url + random.choice(urls_to_hit)
resp = requests.get(url, verify=False)
increment_time_bucket(resp.elapsed.total_seconds())
#print("Task Executed {}".format(threading.current_thread()))
def increment_time_bucket(seconds):
index = int(seconds // 0.5)
if index in time_buckets.keys():
time_buckets[index] += 1
else:
time_buckets[index] = 1
def print_time_buckets():
print("========== response time histogram (seconds) ==========")
for key in sorted (time_buckets.keys()) :
timeStart = key * 0.5
timeEnd = (key + 1) * 0.5
print(timeStart , " - ", timeEnd, " : " , time_buckets[key])
def main(argv):
#handle command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--num_requests',
required=False,
type=int,
default=100,
dest="num_requests",
help="Number of requests to hit the API")
parser.add_argument('-d', '--duration',
required=False,
type=int,
default=1,
dest="duration",
help="Duration in minutes")
parser.add_argument('-u', '--url',
required=False,
type=str,
default="http://localhost:5000",
dest="base_url",
help="base url for the service")
args = parser.parse_args()
duration = args.duration
num_requests = args.num_requests
base_url = args.base_url
start_time = datetime.now()
end_time = start_time + timedelta(minutes=duration)
executor = ThreadPoolExecutor(max_workers=100)
num_chunks = 0
min_chunks = 5
'''
Dividing the number of requests into chunks so the requests can be
spaced out evenly over the duration. In each chunk of time, fire
the allocated number of requests for that chunk into the thread pool
to execute.
'''
chunks = num_requests/duration
if (chunks < min_chunks):
chunks = min_chunks
while (num_chunks < chunks):
num_requests_in_chunk_counter=0
while (num_requests_in_chunk_counter < num_requests / chunks and datetime.now() < end_time):
num_requests_in_chunk_counter = num_requests_in_chunk_counter + 1
executor.submit(task(base_url))
while (datetime.now() < (start_time + timedelta(minutes=(duration/chunks)*num_chunks))):
time.sleep(1)
num_chunks = num_chunks + 1
print_time_buckets()
if __name__ == '__main__':
main(sys.argv[1:])
```
#### File: tests/unit/conftest.py
```python
import mock
import os
import pytest
import sqlalchemy
os.environ = mock.MagicMock()
sqlalchemy.create_engine = mock.MagicMock()
from pint_server import app
@pytest.fixture(scope='session')
def client():
flask_app = app.app
flask_app.config['TESTING'] = True
with flask_app.test_client() as client:
yield client
```
|
{
"source": "jeremymoreau/mnisiscom",
"score": 2
}
|
#### File: mnisiscom/mnisiscom/mnisiscom.py
```python
import os
import platform
import random
import shutil
import subprocess
import sys
from os.path import join
from pathlib import Path
if not platform.system() == 'Darwin':
from tkinter import Tk, filedialog
import eel
from colorama import Back, Fore, Style, deinit, init
import siscom
@eel.expose
def get_nii_file():
# Use AppleScript instead of Tk filedialog on Mac
# Tk filedialog is currently buggy on Mac OS
if platform.system() == 'Darwin':
ascript = 'tell application "SystemUIServer" to return POSIX path of (choose file with prompt "Choose a NIfTI (.nii) file" of type {"nii"})'
raw_path = subprocess.run(['/usr/bin/osascript', '-e', ascript], capture_output=True)
file_path = raw_path.stdout.decode('utf-8').rstrip()
else:
root = Tk()
root.withdraw()
root.wm_attributes('-topmost', 1)
root.update()
file_path = filedialog.askopenfilename(
title="Select file", filetypes=[("NIfTI Files", "*.nii")], parent=root)
root.update()
root.destroy()
return file_path
@eel.expose
def get_folder():
# Use AppleScript instead of Tk filedialog on Mac
# Tk filedialog is currently buggy on Mac OS
if platform.system() == 'Darwin':
ascript = 'tell application "SystemUIServer" to return POSIX path of (choose folder with prompt "Choose the output folder")'
raw_path = subprocess.run(['/usr/bin/osascript', '-e', ascript], capture_output=True)
folder_path = raw_path.stdout.decode('utf-8').rstrip()
else:
root = Tk()
root.withdraw()
root.wm_attributes('-topmost', 1)
root.update()
folder_path = filedialog.askdirectory(parent=root)
root.update()
root.destroy()
return folder_path
@eel.expose
def get_mcr_folder():
# Use AppleScript instead of Tk filedialog on Mac
# Tk filedialog is currently buggy on Mac OS
if platform.system() == 'Darwin':
ascript = 'tell application "SystemUIServer" to return POSIX path of (choose folder with prompt "Select the MCR folder")'
raw_path = subprocess.run(['/usr/bin/osascript', '-e', ascript], capture_output=True)
folder_path = raw_path.stdout.decode('utf-8').rstrip()
else:
root = Tk()
root.withdraw()
root.wm_attributes('-topmost', 1)
root.update()
folder_path = filedialog.askdirectory()
root.update()
root.destroy()
# Check if valid MCR folder is selected
if os.path.isdir(folder_path):
folder_content = os.listdir(folder_path)
if 'bin' in folder_content:
return folder_path
else:
return 'Invalid MCR folder.'
else:
return 'Invalid MCR folder.'
@eel.expose
def get_spm_bin():
# Use AppleScript instead of Tk filedialog on Mac
# Tk filedialog is currently buggy on Mac OS
if platform.system() == 'Darwin':
ascript = 'tell application "SystemUIServer" to return POSIX path of (choose file with prompt "Select SPM12 (run_spm12.sh)" of type {"sh"})'
raw_path = subprocess.run(['/usr/bin/osascript', '-e', ascript], capture_output=True)
spm12_path = raw_path.stdout.decode('utf-8').rstrip()
else:
root = Tk()
root.withdraw()
root.wm_attributes('-topmost', 1)
spm12_path = ''
if platform.system() == 'Windows':
root.update()
spm12_path = filedialog.askopenfilename(title='Select SPM12 ("spm12_win64.exe")', filetypes=[
("SPM12", "spm12_win64.exe")])
root.update()
root.destroy()
else:
# Linux
root.update()
spm12_path = filedialog.askopenfilename(
title='Select SPM12 ("run_spm12.sh")', filetypes=[("SPM12", "run_spm12.sh")])
root.update()
root.destroy()
return spm12_path
@eel.expose
def save_settings(settings_str):
home_dir = str(Path.home())
settings_dir = os.path.join(home_dir, '.mnisiscom')
if not os.path.isdir(settings_dir):
os.mkdir(settings_dir)
settings_file = os.path.join(home_dir, '.mnisiscom', 'settings.json')
with open(settings_file, 'w') as f:
f.write(settings_str)
@eel.expose
def load_settings():
home_dir = str(Path.home())
settings_file = os.path.join(home_dir, '.mnisiscom', 'settings.json')
if os.path.isfile(settings_file):
with open(settings_file, 'r') as f:
settings = f.read()
return(settings)
else:
return ''
@eel.expose
def run_siscom(param_dict):
init() # start colorama
# Get param values
out = param_dict['output_path']
t1 = param_dict['t1_mri_path']
interictal = param_dict['interictal_spect_path']
ictal = param_dict['ictal_spect_path']
spm12_path = param_dict['spm12_path']
mcr_path = param_dict['mcr_path']
skipcoreg = param_dict['skip_coreg']
mripanel = param_dict['mri_panel']
glassbrain = param_dict['glass_brain']
mripanel_t1window = tuple([float(x) for x in param_dict['mri_window']])
mripanel_spectwindow = tuple([float(x)
for x in param_dict['spect_window']])
mripanel_siscomwindow = tuple([float(x)
for x in param_dict['siscom_window']])
mripanel_thickness = int(float(param_dict['slice_thickness']))
mripanel_transparency = float(param_dict['overlay_transparency'])
siscom_threshold = float(param_dict['siscom_threshold'])
mask_threshold = float(param_dict['mask_threshold'])
# Create output directory
siscom_dir = siscom.create_output_dir(out)
# Copy original T1, interictal, and ictal volumes to siscom_dir
t1_nii = shutil.copy(
t1, join(siscom_dir, 'T1' + ''.join(Path(t1).suffixes)))
interictal_nii = shutil.copy(interictal, join(
siscom_dir, 'interictal' + ''.join(Path(interictal).suffixes)))
ictal_nii = shutil.copy(ictal, join(
siscom_dir, 'ictal' + ''.join(Path(ictal).suffixes)))
if not skipcoreg:
# Coregister i/ii to t1, then coregister ri to rii (for better alignment)
print(Fore.GREEN + 'Coregistering interictal/ictal SPECT images to T1 with SPM (~1-5 minutes)...')
print(Style.RESET_ALL)
eel.update_progress_bar('Coregistering images...', 0)
siscom.spm_coregister(
t1_nii, [interictal_nii, ictal_nii], spm12_path, mcr_path)
eel.update_progress_bar('Coregistering images...', 20)
rinterictal_nii = join(siscom_dir, 'rinterictal.nii')
rictal_nii = join(siscom_dir, 'rictal.nii')
siscom.spm_coregister(
rinterictal_nii, [rictal_nii], spm12_path, mcr_path)
rrictal_nii = join(siscom_dir, 'rrictal.nii')
else:
rinterictal_nii = interictal_nii
rrictal_nii = ictal_nii
t1_nii = t1
# Run SISCOM
eel.update_progress_bar('Computing SISCOM...', 40)
print(Fore.GREEN + 'Computing SISCOM images (~5-30s)...')
print(Style.RESET_ALL)
siscom.compute_siscom(rinterictal_nii, rrictal_nii, siscom_dir,
threshold=siscom_threshold, mask_cutoff=mask_threshold)
# Get paths of result nii files
interictal_z = join(siscom_dir, 'interictal_z.nii.gz')
ictal_z = join(siscom_dir, 'ictal_z.nii.gz')
siscom_z = join(siscom_dir, 'siscom_z.nii.gz')
mask = join(siscom_dir, 'interictal_mask.nii.gz')
# Make MRI panels
if mripanel:
eel.update_progress_bar('Plotting MRI panel results...', 50)
print(Fore.GREEN + 'Plotting MRI panel results (~30s-1 minute)...')
print(Style.RESET_ALL)
# Create list of slice orientations if 'all' option is selected
panel_slices = ['ax', 'cor', 'sag']
for panel_slice in panel_slices:
siscom.make_mri_panel(t1_nii, interictal_z, ictal_z, siscom_z, mask, siscom_dir, slice_orientation=panel_slice,
slice_thickness=mripanel_thickness, alpha=mripanel_transparency,
panel_type='all', t1_window=mripanel_t1window,
spect_window=mripanel_spectwindow, siscom_window=mripanel_siscomwindow)
# Make glass brain
if glassbrain:
eel.update_progress_bar('Plotting glass brain results...', 70)
print(Fore.GREEN + 'Plotting glass brain results (~30s-2 minutes)...')
print(Style.RESET_ALL)
siscom.make_glass_brain(
t1_nii, siscom_z, siscom_dir, spm12_path, mcr_path)
# Clean output dir
eel.update_progress_bar('Cleaning up...', 90)
print(Fore.GREEN + 'Cleaning up result files... (~30s)')
print(Style.RESET_ALL)
siscom.clean_output_dir(siscom_dir)
deinit() # stop colorama
# Show done message in GUI
eel.update_progress_bar('Done!', 100)
eel.show_done_message()
print(Fore.GREEN + 'Done!')
print(Style.RESET_ALL)
# Try opening the results folder (Check that dir exists before calling subprocess)
if os.path.isdir(siscom_dir):
if platform.system() == 'Windows':
try:
os.startfile(siscom_dir)
except OSError:
pass
elif platform.system() == 'Darwin':
subprocess.Popen(['open', siscom_dir],
stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
else:
subprocess.Popen(['xdg-open', siscom_dir],
stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
def start_gui():
"""Start the mnisiscom GUI
This function is used to start the mnisiscom GUI via the 'mnisiscom_gui' console_scripts entry point.
"""
cwd = os.path.dirname(os.path.abspath(__file__))
eel.init(join(cwd, 'gui'))
eel.start('main.html', mode='chrome', size=(1000, 700), port=0)
if __name__ == '__main__':
if getattr(sys, 'frozen', False):
# Path for PyInstaller
eel.init(join('mnisiscom_data', 'gui'))
else:
# Path if started as script
cwd = os.path.dirname(os.path.abspath(__file__))
eel.init(join(cwd, 'gui'))
eel.start('main.html', mode='chrome', size=(1000, 700), port=0)
```
|
{
"source": "jeremymturner/pytle",
"score": 3
}
|
#### File: pytle/pytle/cli.py
```python
from pytle import pytle
from pytle.argsc import argsc
import json
def handler(args, tle):
if args.args.output == "text":
for sat in args.args.sats:
print(tle.get_sat_info_text(sat))
elif args.args.output == "json":
response = []
for sat in args.args.sats:
response.append(tle.get_sat_info(sat))
print(json.dumps({'sats': response}))
def main():
import sys
args = argsc(sys.argv[1:])
tle = pytle(keps_url="http://www.amsat.org/amsat/ftp/keps/current/nasabare.txt", cache=True)
handler(args, tle)
if __name__ == '__main__':
main()
```
|
{
"source": "jeremynac/sholl",
"score": 3
}
|
#### File: sholl/sholl/util.py
```python
def dash_to_studly(s):
l = list(s)
l[0] = l[0].upper()
delims = "-_"
for i, c in enumerate(l):
if c in delims:
if (i+1) < len(l):
l[i+1] = l[i+1].upper()
out = "".join(l)
for d in delims:
out = out.replace(d, "")
return out
```
|
{
"source": "jeremyng123/cobaltstrike",
"score": 3
}
|
#### File: jeremyng123/cobaltstrike/scan.py
```python
import argparse
import requests
import urllib3
from urllib.parse import urljoin
from .lib import decrypt_beacon, decode_config, JsonEncoder
def tek_scan(host):
ua = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if not host.startswith("http"):
host = "http://{}/".format(host)
print("Assuming that you meant {}".format(host))
print("Checking {}".format(host))
try:
r = requests.get(urljoin(host, "/aaa9"), headers={'user-agent': ua}, verify=False, timeout=5)
if r.status_code == 200:
data = r.content
if data.startswith(b"\xfc\xe8"):
beacon = decrypt_beacon(data)
if beacon:
config = decode_config(beacon)
if config:
print("Configuration of the x86 payload:")
for d in config:
if isinstance(config[d], bytearray):
print("{:30} {}".format(d, config[d].hex()))
else:
print("{:30} {}".format(d, config[d]))
print("")
else:
print("x86: Impossible to extract the configuration")
else:
print("x86: Impossible to extract beacon")
elif data.startswith(b"MZ"):
# Sometimes it returns a PE directly
config = decode_config(data)
if config:
print("Configuration of the x86 payload")
for d in config:
if isinstance(config[d], bytearray):
print("{:30} {}".format(d, config[d].hex()))
else:
print("{:30} {}".format(d, config[d]))
print("")
else:
print("x86: Impossible to extract the configuration")
else:
print("x86: Payload not found")
else:
print("x86: HTTP Status code {}".format(r.status_code))
r = requests.get(urljoin(host, "aab9"), headers={'user-agent': ua}, verify=False, timeout=5)
if r.status_code == 200:
data = r.content
if data.startswith(b"\xfc\xe8"):
beacon = decrypt_beacon(data)
if beacon:
config = decode_config(beacon)
if config:
for d in config:
print("Configuration of the x86_64 payload")
if isinstance(config[d], bytearray):
print("{:30} {}".format(d, config[d].hex()))
else:
print("{:30} {}".format(d, config[d]))
else:
print("x86_64: Impossible to extract the configuration")
else:
print("x86_64: Impossible to extract beacon")
elif data.startswith(b"MZ"):
# Sometimes it returns a PE directly
config = decode_config(data)
if config:
print("Configuration of the x86_64 payload:")
for d in config:
if isinstance(config[d], bytearray):
print("{:30} {}".format(d, config[d].hex()))
else:
print("{:30} {}".format(d, config[d]))
print("")
else:
print("x86_64: Impossible to extract the configuration")
else:
print("x86_64: Payload not found")
else:
print("x86_64: HTTP Status code {}".format(r.status_code))
except (requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout, requests.exceptions.TooManyRedirects, requests.exceptions.ContentDecodingError):
print("Request failed")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Extract Cobalt Strike beacon and configuration from a server')
parser.add_argument('HOST', help='Domain or IP address of the Cobalt Strike server')
parser.add_argument('--dump', '-D', help='Extract the beacon (only if the beacon is encrypted)')
args = parser.parse_args()
ua = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if not args.HOST.startswith("http"):
args.HOST = "http://{}/".format(args.HOST)
print("Assuming that you meant {}".format(args.HOST))
print("Checking {}".format(args.HOST))
try:
r = requests.get(urljoin(args.HOST, "/aaa9"), headers={'user-agent': ua}, verify=False, timeout=5)
if r.status_code == 200:
data = r.content
if data.startswith(b"\xfc\xe8"):
beacon = decrypt_beacon(data)
if beacon:
config = decode_config(beacon)
if config:
print("Configuration of the x86 payload:")
for d in config:
if isinstance(config[d], bytearray):
print("{:30} {}".format(d, config[d].hex()))
else:
print("{:30} {}".format(d, config[d]))
print("")
else:
print("x86: Impossible to extract the configuration")
else:
print("x86: Impossible to extract beacon")
elif data.startswith(b"MZ"):
# Sometimes it returns a PE directly
config = decode_config(data)
if config:
print("Configuration of the x86 payload")
for d in config:
if isinstance(config[d], bytearray):
print("{:30} {}".format(d, config[d].hex()))
else:
print("{:30} {}".format(d, config[d]))
print("")
else:
print("x86: Impossible to extract the configuration")
else:
print("x86: Payload not found")
else:
print("x86: HTTP Status code {}".format(r.status_code))
r = requests.get(urljoin(args.HOST, "aab9"), headers={'user-agent': ua}, verify=False, timeout=5)
if r.status_code == 200:
data = r.content
if data.startswith(b"\xfc\xe8"):
beacon = decrypt_beacon(data)
if beacon:
config = decode_config(beacon)
if config:
for d in config:
print("Configuration of the x86_64 payload")
if isinstance(config[d], bytearray):
print("{:30} {}".format(d, config[d].hex()))
else:
print("{:30} {}".format(d, config[d]))
else:
print("x86_64: Impossible to extract the configuration")
else:
print("x86_64: Impossible to extract beacon")
elif data.startswith(b"MZ"):
# Sometimes it returns a PE directly
config = decode_config(data)
if config:
print("Configuration of the x86_64 payload:")
for d in config:
if isinstance(config[d], bytearray):
print("{:30} {}".format(d, config[d].hex()))
else:
print("{:30} {}".format(d, config[d]))
print("")
else:
print("x86_64: Impossible to extract the configuration")
else:
print("x86_64: Payload not found")
else:
print("x86_64: HTTP Status code {}".format(r.status_code))
except (requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout, requests.exceptions.TooManyRedirects, requests.exceptions.ContentDecodingError):
print("Request failed")
```
|
{
"source": "jeremyng123/pytm",
"score": 2
}
|
#### File: pytm/tests/test_pytmfunc.py
```python
import sys
sys.path.append("..")
import json
import os
import random
import re
import unittest
from contextlib import contextmanager
from os.path import dirname
from io import StringIO
from pytm import (TM, Action, Actor, Boundary, Dataflow, Datastore, ExternalEntity,
Lambda, Process, Server, Threat)
with open(os.path.abspath(os.path.join(dirname(__file__), '..')) + "/pytm/threatlib/threats.json", "r") as threat_file:
threats = {t["SID"]: Threat(**t) for t in json.load(threat_file)}
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class TestTM(unittest.TestCase):
def test_seq(self):
random.seed(0)
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, 'seq.plantuml')) as x:
expected = x.read().strip()
TM.reset()
tm = TM("my test tm", description="aaa")
internet = Boundary("Internet")
server_db = Boundary("Server/DB")
user = Actor("User", inBoundary=internet)
web = Server("Web Server")
db = Datastore("SQL Database", inBoundary=server_db)
Dataflow(user, web, "User enters comments (*)", note="bbb")
Dataflow(web, db, "Insert query with comments", note="ccc")
Dataflow(db, web, "Retrieve comments")
Dataflow(web, user, "Show comments (*)")
tm.check()
with captured_output() as (out, err):
tm.seq()
output = out.getvalue().strip()
self.maxDiff = None
self.assertEqual(output, expected)
def test_seq_unused(self):
random.seed(0)
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, 'seq_unused.plantuml')) as x:
expected = x.read().strip()
TM.reset()
tm = TM("my test tm", description="aaa", ignoreUnused=True)
internet = Boundary("Internet")
server_db = Boundary("Server/DB")
user = Actor("User", inBoundary=internet)
web = Server("Web Server")
db = Datastore("SQL Database", inBoundary=server_db)
Lambda("Unused Lambda")
Dataflow(user, web, "User enters comments (*)", note="bbb")
Dataflow(web, db, "Insert query with comments", note="ccc")
Dataflow(db, web, "Retrieve comments")
Dataflow(web, user, "Show comments (*)")
tm.check()
with captured_output() as (out, err):
tm.seq()
output = out.getvalue().strip()
self.maxDiff = None
self.assertEqual(output, expected)
def test_dfd(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, 'dfd.dot')) as x:
expected = x.read().strip()
random.seed(0)
TM.reset()
tm = TM("my test tm", description="aaa")
internet = Boundary("Internet")
server_db = Boundary("Server/DB")
user = Actor("User", inBoundary=internet)
web = Server("Web Server")
db = Datastore("SQL Database", inBoundary=server_db)
Dataflow(user, web, "User enters comments (*)")
Dataflow(web, db, "Insert query with comments")
Dataflow(db, web, "Retrieve comments")
Dataflow(web, user, "Show comments (*)")
tm.check()
with captured_output() as (out, err):
tm.dfd()
output = out.getvalue().strip()
self.maxDiff = None
self.assertEqual(output, expected)
def test_dfd_duplicates_ignore(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, 'dfd.dot')) as x:
expected = x.read().strip()
random.seed(0)
TM.reset()
tm = TM("my test tm", description="aaa", onDuplicates=Action.IGNORE)
internet = Boundary("Internet")
server_db = Boundary("Server/DB")
user = Actor("User", inBoundary=internet)
web = Server("Web Server")
db = Datastore("SQL Database", inBoundary=server_db)
Dataflow(user, web, "User enters comments (*)")
Dataflow(user, web, "User views comments")
Dataflow(web, db, "Insert query with comments")
Dataflow(web, db, "Select query")
Dataflow(db, web, "Retrieve comments")
Dataflow(web, user, "Show comments (*)")
tm.check()
with captured_output() as (out, err):
tm.dfd()
output = out.getvalue().strip()
self.maxDiff = None
self.assertEqual(output, expected)
def test_dfd_duplicates_raise(self):
random.seed(0)
TM.reset()
tm = TM("my test tm", description="aaa", onDuplicates=Action.RESTRICT)
internet = Boundary("Internet")
server_db = Boundary("Server/DB")
user = Actor("User", inBoundary=internet)
web = Server("Web Server")
db = Datastore("SQL Database", inBoundary=server_db)
Dataflow(user, web, "User enters comments (*)")
Dataflow(user, web, "User views comments")
Dataflow(web, db, "Insert query with comments")
Dataflow(web, db, "Select query")
Dataflow(db, web, "Retrieve comments")
Dataflow(web, user, "Show comments (*)")
e = re.escape("Duplicate Dataflow found between Actor(User) and Server(Web Server): Dataflow(User enters comments (*)) is same as Dataflow(User views comments)")
with self.assertRaisesRegex(ValueError, e):
tm.check()
def test_resolve(self):
random.seed(0)
TM.reset()
tm = TM("my test tm", description="aaa")
internet = Boundary("Internet")
server_db = Boundary("Server/DB")
user = Actor("User", inBoundary=internet, inScope=False)
web = Server("Web Server")
db = Datastore("SQL Database", inBoundary=server_db)
req = Dataflow(user, web, "User enters comments (*)")
query = Dataflow(web, db, "Insert query with comments")
results = Dataflow(db, web, "Retrieve comments")
resp = Dataflow(web, user, "Show comments (*)")
TM._BagOfThreats = [
Threat(SID=klass, target=klass)
for klass in ["Actor", "Server", "Datastore", "Dataflow"]
]
tm.resolve()
self.maxDiff = None
self.assertListEqual([f.id for f in tm.findings], ['Server', 'Datastore', 'Dataflow', 'Dataflow', 'Dataflow', 'Dataflow'])
self.assertListEqual([f.id for f in user.findings], [])
self.assertListEqual([f.id for f in web.findings], ["Server"])
self.assertListEqual([f.id for f in db.findings], ["Datastore"])
self.assertListEqual([f.id for f in req.findings], ["Dataflow"])
self.assertListEqual([f.id for f in query.findings], ["Dataflow"])
self.assertListEqual([f.id for f in results.findings], ["Dataflow"])
self.assertListEqual([f.id for f in resp.findings], ["Dataflow"])
class Testpytm(unittest.TestCase):
# Test for all the threats in threats.py - test Threat.apply() function
def test_INP01(self):
lambda1 = Lambda('mylambda')
process1 = Process('myprocess')
lambda1.usesEnvironmentVariables = True
lambda1.sanitizesInput = False
lambda1.checksInputBounds = False
process1.usesEnvironmentVariables = True
process1.sanitizesInput = False
process1.checksInputBounds = False
threat = threats["INP01"]
self.assertTrue(threat.apply(lambda1))
self.assertTrue(threat.apply(process1))
def test_INP02(self):
process1 = Process('myprocess')
process1.checksInputBounds = False
threat = threats["INP02"]
self.assertTrue(threat.apply(process1))
def test_INP03(self):
web = Server('Web')
web.sanitizesInput = False
web.encodesOutput = False
threat = threats["INP03"]
self.assertTrue(threat.apply(web))
def test_CR01(self):
user = Actor("User")
web = Server("Web Server")
web.protocol = 'HTTP'
web.usesVPN = False
web.usesSessionTokens = True
user_to_web = Dataflow(user, web, "User enters comments (*)")
user_to_web.protocol = 'HTTP'
user_to_web.usesVPN = False
user_to_web.usesSessionTokens = True
threat = threats["CR01"]
self.assertTrue(threat.apply(web))
self.assertTrue(threat.apply(user_to_web))
def test_INP04(self):
web = Server("Web Server")
web.validatesInput = False
web.validatesHeaders = False
web.protocol = 'HTTP'
threat = threats["INP04"]
self.assertTrue(threat.apply(web))
def test_CR02(self):
user = Actor("User")
web = Server("Web Server")
web.protocol = 'HTTP'
web.sanitizesInput = False
web.validatesInput = False
web.usesSessionTokens = True
user_to_web = Dataflow(user, web, "User enters comments (*)")
user_to_web.protocol = 'HTTP'
user_to_web.sanitizesInput = False
user_to_web.validatesInput = False
user_to_web.usesSessionTokens = True
threat = threats["CR02"]
self.assertTrue(threat.apply(web))
self.assertTrue(threat.apply(user_to_web))
def test_INP05(self):
web = Server("Web Server")
web.validatesInput = False
threat = threats["INP05"]
self.assertTrue(threat.apply(web))
def test_INP06(self):
web = Server("Web Server")
web.protocol = 'SOAP'
web.sanitizesInput = False
web.validatesInput = False
threat = threats["INP06"]
self.assertTrue(threat.apply(web))
def test_SC01(self):
process1 = Process("Process1")
process1.implementsNonce = False
process1.data = 'JSON'
threat = threats["SC01"]
self.assertTrue(threat.apply(process1))
def test_LB01(self):
process1 = Process("Process1")
process1.implementsAPI = True
process1.validatesInput = False
process1.sanitizesInput = False
lambda1 = Lambda("Lambda1")
lambda1.implementsAPI = True
lambda1.validatesInput = False
lambda1.sanitizesInput = False
threat = threats["LB01"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(lambda1))
def test_AA01(self):
process1 = Process("Process1")
web = Server("Web Server")
process1.authenticatesSource = False
web.authenticatesSource = False
threat = threats["AA01"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(web))
def test_DS01(self):
web = Server("Web Server")
web.sanitizesInput = False
web.validatesInput = False
web.encodesOutput = False
threat = threats["DS01"]
self.assertTrue(threat.apply(web))
def test_DE01(self):
user = Actor("User")
web = Server("Web Server")
user_to_web = Dataflow(user, web, "User enters comments (*)")
user_to_web.protocol = 'HTTP'
user_to_web.isEncrypted = False
threat = threats["DE01"]
self.assertTrue(threat.apply(user_to_web))
def test_DE02(self):
web = Server("Web Server")
process1 = Process("Process1")
web.validatesInput = False
web.sanitizesInput = False
process1.validatesInput = False
process1.sanitizesInput = False
threat = threats["DE02"]
self.assertTrue(threat.apply(web))
self.assertTrue(threat.apply(process1))
def test_API01(self):
process1 = Process("Process1")
lambda1 = Lambda("Lambda1")
process1.implementsAPI = True
lambda1.implementsAPI = True
threat = threats["API01"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(lambda1))
def test_AC01(self):
web = Server("Web Server")
process1 = Process("Process1")
db = Datastore("DB")
web.hasAccessControl = False
web.authorizesSource = True
process1.hasAccessControl = False
process1.authorizesSource = False
db.hasAccessControl = False
db.authorizesSource = False
threat = threats["AC01"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(web))
self.assertTrue(threat.apply(db))
def test_INP07(self):
process1 = Process("Process1")
process1.usesSecureFunctions = False
threat = threats["INP07"]
self.assertTrue(threat.apply(process1))
def test_AC02(self):
db = Datastore("DB")
db.isShared = True
threat = threats["AC02"]
self.assertTrue(threat.apply(db))
def test_DO01(self):
process1 = Process("Process1")
web = Server("Web Server")
process1.handlesResourceConsumption = False
process1.isResilient = False
web.handlesResourceConsumption = True
threat = threats["DO01"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(web))
def test_HA01(self):
web = Server("Web Server")
web.validatesInput = False
web.sanitizesInput = False
threat = threats["HA01"]
self.assertTrue(threat.apply(web))
def test_AC03(self):
process1 = Process("Process1")
lambda1 = Lambda("Lambda1")
process1.usesEnvironmentVariables = True
process1.implementsAuthenticationScheme = False
process1.validatesInput = False
process1.authorizesSource = False
lambda1.usesEnvironmentVariables = True
lambda1.implementsAuthenticationScheme = False
lambda1.validatesInput = False
lambda1.authorizesSource = False
threat = threats["AC03"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(lambda1))
def test_DO02(self):
process1 = Process("Process1")
lambda1 = Lambda("Lambda1")
web = Server("Web Server")
db = Datastore("DB")
process1.handlesResourceConsumption = False
lambda1.handlesResourceConsumption = False
web.handlesResourceConsumption = False
db.handlesResourceConsumption = False
threat = threats["DO02"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(lambda1))
self.assertTrue(threat.apply(web))
self.assertTrue(threat.apply(db))
def test_DS02(self):
process1 = Process("Process1")
lambda1 = Lambda("Lambda1")
process1.environment = 'Production'
lambda1.environment = 'Production'
threat = threats["DS02"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(lambda1))
def test_INP08(self):
process1 = Process("Process1")
lambda1 = Lambda("Lambda1")
web = Server("Web Server")
process1.validatesInput = False
process1.sanitizesInput = False
lambda1.validatesInput = False
lambda1.sanitizesInput = False
web.validatesInput = False
web.sanitizesInput = False
threat = threats["INP08"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(lambda1))
self.assertTrue(threat.apply(web))
def test_INP09(self):
web = Server("Web Server")
web.validatesInput = False
threat = threats["INP09"]
self.assertTrue(threat.apply(web))
def test_INP10(self):
web = Server("Web Server")
web.validatesInput = False
threat = threats["INP10"]
self.assertTrue(threat.apply(web))
def test_INP11(self):
web = Server("Web Server")
web.validatesInput = False
web.sanitizesInput = False
threat = threats["INP11"]
self.assertTrue(threat.apply(web))
def test_INP12(self):
process1 = Process("Process1")
lambda1 = Lambda("Lambda1")
process1.checksInputBounds = False
process1.validatesInput = False
lambda1.checksInputBounds = False
lambda1.validatesInput = False
threat = threats["INP12"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(lambda1))
def test_AC04(self):
user = Actor("User")
web = Server("Web Server")
user_to_web = Dataflow(user, web, "User enters comments (*)")
user_to_web.data = 'XML'
user_to_web.authorizesSource = False
threat = threats["AC04"]
self.assertTrue(threat.apply(user_to_web))
def test_DO03(self):
user = Actor("User")
web = Server("Web Server")
user_to_web = Dataflow(user, web, "User enters comments (*)")
user_to_web.data = 'XML'
threat = threats["DO03"]
self.assertTrue(threat.apply(user_to_web))
def test_AC05(self):
process1 = Process("Process1")
web = Server("Web Server")
process1.providesIntegrity = False
process1.authorizesSource = False
web.providesIntegrity = False
web.authorizesSource = False
threat = threats["AC05"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(web))
def test_INP13(self):
process1 = Process("Process1")
lambda1 = Lambda("Lambda1")
process1.validatesInput = False
lambda1.validatesInput = False
threat = threats["INP13"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(lambda1))
def test_INP14(self):
process1 = Process("Process1")
lambda1 = Lambda("Lambda1")
web = Server("Web Server")
process1.validatesInput = False
lambda1.validatesInput = False
web.validatesInput = False
threat = threats["INP14"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(lambda1))
self.assertTrue(threat.apply(web))
def test_DE03(self):
user = Actor("User")
web = Server("Web Server")
user_to_web = Dataflow(user, web, "User enters comments (*)")
user_to_web.protocol = 'HTTP'
user_to_web.isEncrypted = False
user_to_web.usesVPN = False
threat = threats["DE03"]
self.assertTrue(threat.apply(user_to_web))
def test_CR03(self):
process1 = Process("Process1")
web = Server("Web Server")
process1.implementsAuthenticationScheme = False
web.implementsAuthenticationScheme = False
threat = threats["CR03"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(web))
def test_API02(self):
process1 = Process("Process1")
lambda1 = Lambda("Lambda1")
process1.implementsAPI = True
process1.validatesInput = False
lambda1.implementsAPI = True
lambda1.validatesInput = False
threat = threats["API02"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(lambda1))
def test_HA02(self):
EE = ExternalEntity("EE")
EE.hasPhysicalAccess = True
threat = threats["HA02"]
self.assertTrue(threat.apply(EE))
def test_DS03(self):
web = Server("Web Server")
web.isHardened = False
threat = threats["DS03"]
self.assertTrue(threat.apply(web))
def test_AC06(self):
web = Server("Web Server")
web.isHardened = False
web.hasAccessControl = False
threat = threats["AC06"]
self.assertTrue(threat.apply(web))
def test_HA03(self):
web = Server("Web Server")
web.validatesHeaders = False
web.encodesOutput = False
web.isHardened = False
threat = threats["HA03"]
self.assertTrue(threat.apply(web))
def test_SC02(self):
web = Server("Web Server")
web.validatesInput = False
web.encodesOutput = False
threat = threats["SC02"]
self.assertTrue(threat.apply(web))
def test_AC07(self):
web = Server("Web Server")
web.hasAccessControl = False
threat = threats["AC07"]
self.assertTrue(threat.apply(web))
def test_INP15(self):
web = Server("Web Server")
web.protocol = 'IMAP'
web.sanitizesInput = False
threat = threats["INP15"]
self.assertTrue(threat.apply(web))
def test_HA04(self):
EE = ExternalEntity("ee")
EE.hasPhysicalAccess = True
threat = threats["HA04"]
self.assertTrue(threat.apply(EE))
def test_SC03(self):
web = Server("Web Server")
web.validatesInput = False
web.sanitizesInput = False
web.hasAccessControl = False
threat = threats["SC03"]
self.assertTrue(threat.apply(web))
def test_INP16(self):
web = Server("Web Server")
web.validatesInput = False
threat = threats["INP16"]
self.assertTrue(threat.apply(web))
def test_AA02(self):
web = Server("Web Server")
process1 = Process("process")
web.authenticatesSource = False
process1.authenticatesSource = False
threat = threats["AA02"]
self.assertTrue(threat.apply(web))
self.assertTrue(threat.apply(process1))
def test_CR04(self):
web = Server("Web Server")
web.usesSessionTokens = True
web.implementsNonce = False
threat = threats["CR04"]
self.assertTrue(threat.apply(web))
def test_DO04(self):
user = Actor("User")
web = Server("Web Server")
user_to_web = Dataflow(user, web, "User enters comments (*)")
user_to_web.data = 'XML'
user_to_web.handlesResources = False
threat = threats["DO04"]
self.assertTrue(threat.apply(user_to_web))
def test_DS04(self):
web = Server("Web Server")
web.encodesOutput = False
web.validatesInput = False
web.sanitizesInput = False
threat = threats["DS04"]
self.assertTrue(threat.apply(web))
def test_SC04(self):
web = Server("Web Server")
web.sanitizesInput = False
web.validatesInput = False
web.encodesOutput = False
threat = threats["SC04"]
self.assertTrue(threat.apply(web))
def test_CR05(self):
web = Server("Web Server")
db = Datastore("db")
web.usesEncryptionAlgorithm != 'RSA'
web.usesEncryptionAlgorithm != 'AES'
db.usesEncryptionAlgorithm != 'RSA'
db.usesEncryptionAlgorithm != 'AES'
threat = threats["CR05"]
self.assertTrue(threat.apply(web))
self.assertTrue(threat.apply(db))
def test_AC08(self):
web = Server("Web Server")
web.hasAccessControl = False
threat = threats["AC08"]
self.assertTrue(threat.apply(web))
def test_DS05(self):
web = Server("Web Server")
web.usesCache = True
threat = threats["DS05"]
self.assertTrue(threat.apply(web))
def test_SC05(self):
web = Server("Web Server")
web.providesIntegrity = False
web.usesCodeSigning = False
threat = threats["SC05"]
self.assertTrue(threat.apply(web))
def test_INP17(self):
web = Server("Web Server")
web.validatesContentType = False
web.invokesScriptFilters = False
threat = threats["INP17"]
self.assertTrue(threat.apply(web))
def test_AA03(self):
web = Server("Web Server")
web.providesIntegrity = False
web.authenticatesSource = False
web.usesStrongSessionIdentifiers = False
threat = threats["AA03"]
self.assertTrue(threat.apply(web))
def test_AC09(self):
web = Server("Web Server")
web.hasAccessControl = False
web.authorizesSource = False
threat = threats["AC09"]
self.assertTrue(threat.apply(web))
def test_INP18(self):
web = Server("Web Server")
web.sanitizesInput = False
web.encodesOutput = False
threat = threats["INP18"]
self.assertTrue(threat.apply(web))
def test_CR06(self):
user = Actor("User")
web = Server("Web Server")
user_to_web = Dataflow(user, web, "User enters comments (*)")
user_to_web.protocol = 'HTTP'
user_to_web.usesVPN = False
user_to_web.implementsAuthenticationScheme = False
user_to_web.authorizesSource = False
threat = threats["CR06"]
self.assertTrue(threat.apply(user_to_web))
def test_AC10(self):
web = Server("Web Server")
web.usesLatestTLSversion = False
web.implementsAuthenticationScheme = False
web.authorizesSource = False
threat = threats["AC10"]
self.assertTrue(threat.apply(web))
def test_CR07(self):
user = Actor("User")
web = Server("Web Server")
user_to_web = Dataflow(user, web, "User enters comments (*)")
user_to_web.protocol = 'HTTP'
user_to_web.data = 'XML'
threat = threats["CR07"]
self.assertTrue(threat.apply(user_to_web))
def test_AA04(self):
web = Server("Web Server")
web.implementsServerSideValidation = False
web.providesIntegrity = False
web.authorizesSource = False
threat = threats["AA04"]
self.assertTrue(threat.apply(web))
def test_CR08(self):
user = Actor("User")
web = Server("Web Server")
user_to_web = Dataflow(user, web, "User enters comments (*)")
user_to_web.protocol = 'HTTP'
user_to_web.usesLatestTLSversion = False
threat = threats["CR08"]
self.assertTrue(threat.apply(user_to_web))
def test_INP19(self):
web = Server("Web Server")
web.usesXMLParser = False
web.disablesDTD = False
threat = threats["INP19"]
self.assertTrue(threat.apply(web))
def test_INP20(self):
process1 = Process("process")
process1.disablesiFrames = False
threat = threats["INP20"]
self.assertTrue(threat.apply(process1))
def test_AC11(self):
web = Server("Web Server")
web.usesStrongSessionIdentifiers = False
threat = threats["AC11"]
self.assertTrue(threat.apply(web))
def test_INP21(self):
web = Server("Web Server")
web.usesXMLParser = False
web.disablesDTD = False
threat = threats["INP21"]
self.assertTrue(threat.apply(web))
def test_INP22(self):
web = Server("Web Server")
web.usesXMLParser = False
web.disablesDTD = False
threat = threats["INP22"]
self.assertTrue(threat.apply(web))
def test_INP23(self):
process1 = Process("Process")
process1.hasAccessControl = False
process1.sanitizesInput = False
process1.validatesInput = False
threat = threats["INP23"]
self.assertTrue(threat.apply(process1))
def test_DO05(self):
web = Server("Web Server")
web.validatesInput = False
web.sanitizesInput = False
web.usesXMLParser = True
threat = threats["DO05"]
self.assertTrue(threat.apply(web))
def test_AC12(self):
process1 = Process("Process")
process1.hasAccessControl = False
process1.implementsPOLP = False
threat = threats["AC12"]
self.assertTrue(threat.apply(process1))
def test_AC13(self):
process1 = Process("Process")
process1.hasAccessControl = False
process1.implementsPOLP = False
threat = threats["AC13"]
self.assertTrue(threat.apply(process1))
def test_AC14(self):
process1 = Process("Process")
process1.implementsPOLP = False
process1.usesEnvironmentVariables = False
process1.validatesInput = False
threat = threats["AC14"]
self.assertTrue(threat.apply(process1))
def test_INP24(self):
process1 = Process("Process")
lambda1 = Lambda("lambda")
process1.checksInputBounds = False
process1.validatesInput = False
lambda1.checksInputBounds = False
lambda1.validatesInput = False
threat = threats["INP24"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(lambda1))
def test_INP25(self):
process1 = Process("Process")
lambda1 = Lambda("lambda")
process1.validatesInput = False
process1.sanitizesInput = False
lambda1.validatesInput = False
lambda1.sanitizesInput = False
threat = threats["INP25"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(lambda1))
def test_INP26(self):
process1 = Process("Process")
lambda1 = Lambda("lambda")
process1.validatesInput = False
process1.sanitizesInput = False
lambda1.validatesInput = False
lambda1.sanitizesInput = False
threat = threats["INP26"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(lambda1))
def test_INP27(self):
process1 = Process("Process")
process1.validatesInput = False
process1.sanitizesInput = False
threat = threats["INP27"]
self.assertTrue(threat.apply(process1))
def test_INP28(self):
web = Server("Web Server")
process1 = Process("Process")
web.validatesInput = False
web.sanitizesInput = False
web.encodesOutput = False
process1.validatesInput = False
process1.sanitizesInput = False
process1.encodesOutput = False
threat = threats["INP28"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(web))
def test_INP29(self):
web = Server("Web Server")
process1 = Process("Process")
web.validatesInput = False
web.sanitizesInput = False
web.encodesOutput = False
process1.validatesInput = False
process1.sanitizesInput = False
process1.encodesOutput = False
threat = threats["INP29"]
self.assertTrue(threat.apply(process1))
self.assertTrue(threat.apply(web))
def test_INP30(self):
process1 = Process("Process")
process1.validatesInput = False
process1.sanitizesInput = False
threat = threats["INP30"]
self.assertTrue(threat.apply(process1))
def test_INP31(self):
process1 = Process("Process")
process1.validatesInput = False
process1.sanitizesInput = False
process1.usesParameterizedInput = False
threat = threats["INP31"]
self.assertTrue(threat.apply(process1))
def test_INP32(self):
process1 = Process("Process")
process1.validatesInput = False
process1.sanitizesInput = False
process1.encodesOutput = False
threat = threats["INP32"]
self.assertTrue(threat.apply(process1))
def test_INP33(self):
process1 = Process("Process")
process1.validatesInput = False
process1.sanitizesInput = False
threat = threats["INP33"]
self.assertTrue(threat.apply(process1))
def test_INP34(self):
web = Server("web")
web.checksInputBounds = False
threat = threats["INP34"]
self.assertTrue(threat.apply(web))
def test_INP35(self):
process1 = Process("Process")
process1.validatesInput = False
process1.sanitizesInput = False
threat = threats["INP35"]
self.assertTrue(threat.apply(process1))
def test_DE04(self):
data = Datastore("DB")
data.validatesInput = False
data.implementsPOLP = False
threat = threats["DE04"]
self.assertTrue(threat.apply(data))
def test_AC15(self):
process1 = Process("Process")
process1.implementsPOLP = False
threat = threats["AC15"]
self.assertTrue(threat.apply(process1))
def test_INP36(self):
web = Server("web")
web.implementsStrictHTTPValidation = False
web.encodesHeaders = False
threat = threats["INP36"]
self.assertTrue(threat.apply(web))
def test_INP37(self):
web = Server("web")
web.implementsStrictHTTPValidation = False
web.encodesHeaders = False
threat = threats["INP37"]
self.assertTrue(threat.apply(web))
def test_INP38(self):
process1 = Process("Process")
process1.allowsClientSideScripting = True
process1.validatesInput = False
process1.sanitizesInput = False
threat = threats["INP38"]
self.assertTrue(threat.apply(process1))
def test_AC16(self):
web = Server("web")
web.usesStrongSessionIdentifiers = False
web.encryptsCookies = False
threat = threats["AC16"]
self.assertTrue(threat.apply(web))
def test_INP39(self):
process1 = Process("Process")
process1.allowsClientSideScripting = True
process1.validatesInput = False
process1.sanitizesInput = False
threat = threats["INP39"]
self.assertTrue(threat.apply(process1))
def test_INP40(self):
process1 = Process("Process")
process1.allowsClientSideScripting = True
process1.sanitizesInput = False
process1.validatesInput = False
threat = threats["INP40"]
self.assertTrue(threat.apply(process1))
def test_AC17(self):
web = Server("web")
web.usesStrongSessionIdentifiers = False
threat = threats["AC17"]
self.assertTrue(threat.apply(web))
def test_AC18(self):
process1 = Process("Process")
process1.usesStrongSessionIdentifiers = False
process1.encryptsCookies = False
process1.definesConnectionTimeout = False
threat = threats["AC18"]
self.assertTrue(threat.apply(process1))
def test_INP41(self):
process1 = Process("Process")
process1.validatesInput = False
process1.sanitizesInput = False
threat = threats["INP41"]
self.assertTrue(threat.apply(process1))
def test_AC19(self):
web = Server("web")
web.usesSessionTokens = True
web.implementsNonce = False
threat = threats["AC19"]
self.assertTrue(threat.apply(web))
def test_AC20(self):
process1 = Process("Process")
process1.definesConnectionTimeout = False
process1.usesMFA = False
process1.encryptsSessionData = False
threat = threats["AC20"]
self.assertTrue(threat.apply(process1))
def test_AC21(self):
process1 = Process("Process")
process1.implementsCSRFToken = False
process1.verifySessionIdentifiers = False
threat = threats["AC21"]
self.assertTrue(threat.apply(process1))
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JeremyNixon/uncertainty-metrics-1",
"score": 2
}
|
#### File: uncertainty_metrics/tensorflow/accuracy.py
```python
import tensorflow as tf
from uncertainty_metrics.tensorflow import calibration
def _bin_probabilities(num_bins, index, dtype):
"""Computing corresponding probabilities.
Args:
num_bins: Number of bins to maintain over the interval [0, 1], and the bins
are uniformly spaced.
index: Which index to return.
dtype: Data type
Returns:
bin_probabilities: Tensor, the corresponding probabilities.
"""
return tf.cast(tf.linspace(0.0 + 1.0 / num_bins, 1.0, num_bins)[index], dtype)
class OracleCollaborativeAccuracy(calibration.ExpectedCalibrationError):
"""Oracle Collaborative Accuracy."""
def __init__(self,
fraction=0.01,
num_bins=100,
binary_threshold=0.5,
name=None,
dtype=None):
"""Constructs an expected collaborative accuracy metric.
The class probabilities are computed using the argmax by default, but a
custom threshold can be used in the binary case. This binary threshold is
applied to the second (taken to be the positive) class.
Args:
fraction: the fraction of total examples to send to moderators.
num_bins: Number of bins to maintain over the interval [0, 1].
binary_threshold: Threshold to use in the binary case.
name: Name of this metric.
dtype: Data type.
"""
super(OracleCollaborativeAccuracy, self).__init__(
num_bins=num_bins, name=name, dtype=dtype)
self.fraction = fraction
self.binary_threshold = binary_threshold
def _compute_pred_labels(self, probs):
"""Computes predicted labels, using binary_threshold in the binary case.
Args:
probs: Tensor of shape [..., k] of normalized probabilities associated
with each of k classes.
Returns:
Predicted class labels.
"""
return tf.cond(
tf.shape(probs)[-1] == 2,
lambda: tf.cast(probs[:, 1] > self.binary_threshold, tf.int64),
lambda: tf.math.argmax(probs, axis=-1))
def _compute_pred_probs(self, probs):
"""Computes predicted probabilities associated with the predicted labels."""
pred_labels = self._compute_pred_labels(probs)
indices = tf.stack(
[tf.range(tf.shape(probs)[0], dtype=tf.int64), pred_labels], axis=1)
return tf.gather_nd(probs, indices)
def update_state(self,
labels,
probabilities,
custom_binning_score=None,
**kwargs):
if self.binary_threshold != 0.5 and not custom_binning_score:
# Bin by distance from threshold, i.e. send to the oracle in that order.
custom_binning_score = tf.abs(probabilities - self.binary_threshold)
super().update_state(
labels, probabilities, custom_binning_score, kwargs=kwargs)
def result(self):
"""Returns the expected calibration error."""
num_total_examples = tf.reduce_sum(self.counts)
num_oracle_examples = tf.cast(
tf.floor(num_total_examples * self.fraction), self.dtype)
non_empty_bin_mask = self.counts != 0
counts = tf.boolean_mask(self.counts, non_empty_bin_mask)
correct_sums = tf.boolean_mask(self.correct_sums, non_empty_bin_mask)
cum_counts = tf.cumsum(counts)
# Identify the final bin the oracle sees examples from, and the remaining
# number of predictions it can make on that bin.
final_oracle_bin = tf.cast(
tf.argmax(cum_counts > num_oracle_examples), tf.int32)
oracle_predictions_used = tf.cast(
tf.cond(final_oracle_bin > 0, lambda: cum_counts[final_oracle_bin - 1],
lambda: 0.), self.dtype)
remaining_oracle_predictions = num_oracle_examples - oracle_predictions_used
expected_correct_final_bin = (
correct_sums[final_oracle_bin] / counts[final_oracle_bin] *
(counts[final_oracle_bin] - remaining_oracle_predictions))
expected_correct_after_final_bin = tf.reduce_sum(
correct_sums[final_oracle_bin + 1:])
expected_correct = (
num_oracle_examples + expected_correct_final_bin +
expected_correct_after_final_bin)
return expected_correct / num_total_examples
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.