metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jgericardo/iclr-network",
"score": 3
}
|
#### File: iclr-network/scripts/iclr_scraper-html.py
```python
import os
import csv
from bs4 import BeautifulSoup
from tqdm import tqdm
import re
import requests
def replace_space(str_text):
return re.sub(' +', ' ', str_text.strip().replace("\n", "").replace("\t", ""))
page = requests.get("https://iclr.cc/Conferences/2019/Schedule?type=Poster")
soup_bowl = BeautifulSoup(page.content, "html.parser")
dataset = []
submissions_list = soup_bowl.find_all("div", class_="maincard")
for each_sub in submissions_list:
sub_info = []
sub_info.append("2019")
sub_info.append("Poster")
#print(each_sub)
title = each_sub.find("div", class_="maincardBody")
sub_info.append(replace_space(title.text))
note_authors = each_sub.find("div", class_="maincardFooter")
authors = note_authors.text.split(" · ")
sub_info.append(replace_space('; '.join(authors)))
dataset.append(sub_info)
#print()
with open('iclr2019-poster-papers.csv', 'w', newline='', encoding='utf-8') as csvfile:
csv.writer(csvfile).writerows(dataset)
#print('Saving:', '{}-topics-refined.csv'.format(each_file.split('.')[1]))
#with open('.{}-topics-refined.csv'.format(each_file.split('.')[1]), 'w', newline='', encoding='utf-8') as csvfile:
# csv.writer(csvfile).writerows(keep_list)
#with open('missing-topics-refined.csv', 'w', newline='', encoding='utf-8') as csvfile:
# csv.writer(csvfile).writerows(missing_list)
```
|
{
"source": "jgericardo/pytorch-playground",
"score": 3
}
|
#### File: scripts/ptpg/utils.py
```python
from typing import Tuple
import json
import random
import torch
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import (
OneHotEncoder,
LabelBinarizer,
MinMaxScaler
)
__author__ = "<NAME>"
def load_dataset_from_file(
dataset_path: str,
feature_range: Tuple[int] = None,
lbl_encoding: Tuple[int] = None,
random_seed: int = None
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Loads the dataset from a file.
Parameters
----------
dataset_path: str
File path of the dataset.
feature_range: list, optional, default: None
Tuple containing the min and max value for each feature's range.
lbl_encoding: list, optional, default: None
Tuple containing the encoding for positive and negative labels.
random_seed: int, optional, default: None
Seed used for dataset split reproducibility.
Returns
-------
X_train: numpy.ndarray
Training subset for features
y_train: numpy.ndarray
Training subset for target labels
X_test: numpy.ndarray
Test subset for features
y_test: numpy.ndarray
Test subset for target labels
"""
dataset = np.genfromtxt(dataset_path, delimiter=',')
# slice all rows and up until 2nd last column
data_X = dataset[:, :dataset.shape[1]-1]
# slice all rows and only last column
data_y = dataset[:, dataset.shape[1]-1].astype(int)
# split dataset
X_train, X_test, y_train, y_test = train_test_split(
data_X,
data_y,
test_size=0.2,
stratify=data_y,
random_state=random_seed
)
# rescale features to specified range
if isinstance(feature_range, tuple):
minmaxsc = MinMaxScaler(feature_range=feature_range)
X_train = minmaxsc.fit_transform(X_train)
X_test = minmaxsc.transform(X_test)
# label transformations to match NN output layer
onehotenc = OneHotEncoder(sparse=False)
onehotenc.fit(data_y.reshape(-1, 1))
y_train = onehotenc.transform(y_train.reshape(-1, 1))
y_test = onehotenc.transform(y_test.reshape(-1, 1))
# encode one-hot labels to specified positive and negative values
if isinstance(lbl_encoding, tuple):
lbl_binarizer = LabelBinarizer(
neg_label=lbl_encoding[0],
pos_label=lbl_encoding[1]
)
lbl_binarizer.fit(y_train)
y_train = lbl_binarizer.transform(y_train)
y_test = lbl_binarizer.transform(y_test)
return (X_train, y_train, X_test, y_test)
def set_global_seed(seed: int) -> None:
"""
Set all pseudorandom functions to the same seed
Parameter
---------
seed: int
global seed to use for all pseudorandom generators
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def torch_data_loader(
features: np.ndarray,
labels: np.ndarray,
batch_size: int,
shuffle: bool = None,
num_workers: int = 0
) -> Tuple[torch.utils.data.DataLoader]:
"""
Creates the data loader for the train and test dataset.
Parameters
---------
features: numpy.ndarray
Training input features.
labels: numpy.ndarray
Training labels.
batch_size: int
Batch size
shuffle: bool, optional, default: None
Flag indicating whether to shuffle dataset or not.
num_workers: int, optional, default: 0
Number of workers to use during sampling iteration.
Returns
-------
torch.utils.data.DataLoader: data loader
"""
features = torch.Tensor(features)
labels = torch.Tensor(labels)
dataset = torch.utils.data.TensorDataset(features, labels)
data_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle
)
return data_loader
def encode_labels(labels: np.ndarray, encoding: tuple) -> np.ndarray:
"""
Encode labels in another encoding.
Parameter
---------
labels: numpy.ndarray
One-hot encoded labels.
encoding: tuple
Coding scheme for the labels.
Returns
-------
numpy.ndarray: Transformed labels.
"""
lbl_bin = LabelBinarizer(encoding[0], encoding[1])
return lbl_bin.fit_transform(labels)
def acc_score(y_pred: np.ndarray, y_labels: np.ndarray) -> float:
"""
Evaluate performance between predictions and ground truth labels.
Parameters
----------
y_pred: numpy.ndarray
Numpy list of one-hot encoded predictions or logits.
y_labels: numpy.ndarray
Numpy list of ground truth labels.
Returns
-------
float:
Accuracy score of model's predictions.
"""
correct = 0
total = y_pred.shape[0]
for i in range(total):
predicted = np.argmax(y_pred[i])
test = np.argmax(y_labels[i])
correct = correct + (1 if predicted == test else 0)
return (float(correct)/float(total))*100.
def torch_acc_score(
y_pred: torch.Tensor,
y_true: torch.Tensor
) -> float:
"""
Evaluate performance between predictions and ground truth labels.
Parameters
----------
y_pred: torch.Tensor
Model predictions.
y_true: numpy.ndarray
Ground truth labels.
Returns
-------
float: Accuracy score of model's predictions.
"""
if not isinstance(y_pred, torch.Tensor):
y_pred = torch.from_numpy(y_pred)
if not isinstance(y_true, torch.Tensor):
y_true = torch.from_numpy(y_true)
return (y_pred.argmax(1) == y_true.argmax(1)).sum().item() / len(y_true)
def export_to_json(
results_data: dict, filename: str,
sort_keys: bool = True, indent: int = 4
) -> None:
"""
Export dictionary to a JSON file.
Parameters
----------
results_data: dict
Dictionary containing results data.
filename: str
Output file name.
sort_keys: bool, optional, default: True
Flag to sort keys in the dictionary, including inner dictionaries.
indent: int, optional, default: 4
Number of tabs to insert per level.
"""
with open(filename, 'w') as out_file:
json.dump(results_data, out_file, sort_keys=sort_keys, indent=indent)
def load_results_from_json(filename: str) -> dict:
"""
Parses a JSON file and returns results as a dictionary.
Parameters
----------
filename: str
Input file name to load.
sort_keys: bool, optional, default: True
Flag to sort keys in the dictionary, including inner dictionaries.
"""
with open(filename, 'r') as in_file:
data = json.load(in_file)
return data
def make_json_readable(
data: dict, sort_keys: bool = True, indent: int = 4
) -> str:
"""
Converts a dictionary into a human readable string.
Parameters
----------
data: dict
Input file name to load.
sort_keys: bool, optional, default: True
Flag to sort keys in the dictionary, including inner dictionaries.
indent: int, optional, default: 4
Number of tabs to insert per level.
"""
return json.dumps(data, sort_keys=sort_keys, indent=indent)
def load_datasets(
dataset_paths: dict,
feature_range: Tuple[int, int] = None,
label_encoding: Tuple[int, int] = None,
random_seed: int = None
) -> dict:
"""
Helper function to load multiple datasets with consistent
feature ranges and label encodings.
Parameters
---------
dataset_paths: dict
A dictionary of dataset paths with custom keys.
feature_range: Tuple[int, int], optional, default=None
Feature range to use for all the dataset features.
label_encoding: Tuple[int, int], optional, default=None
Label encoding to use for all the dataset labels.
random_seed: int, optional, default=None
Fixed random seed to use for each dataset.
Returns
-------
datasets: dict
A dictionary of datasets using the same custom keys
as the passed dictionary of paths (for easier lookup).
"""
datasets = {}
for key in dataset_paths.keys():
set_global_seed(random_seed)
dataset = load_dataset_from_file(
dataset_path=dataset_paths[key],
feature_range=feature_range,
lbl_encoding=label_encoding,
random_seed=random_seed
)
datasets[key] = dataset
return datasets
def send_message(key: str, value: any) -> None:
"""
Wrapper for print. Serializes data in JSON format.
Parameters
----------
key: str
The dictionary key.
value: any
The list or single item for the dictionary value.
"""
print(json.dumps({key: value}, indent=2, sort_keys=True))
```
|
{
"source": "jgericke/flask-nameko",
"score": 2
}
|
#### File: flask-nameko/tests/test_connection_pool.py
```python
import pytest
import eventlet
from datetime import timedelta
from mock import Mock, patch
from flask_nameko.connection_pool import ConnectionPool, Connection
from flask_nameko.errors import ClientUnavailableError
@pytest.fixture
def some_fixture():
pass
@pytest.fixture
def get_connection():
connection = Mock(side_effect=lambda: object())
return connection
def test_connections_recycled(get_connection):
pool = ConnectionPool(get_connection, initial_connections=0)
o = pool.get_connection()
pool.release_connection(o)
o1 = pool.get_connection()
o2 = pool.get_connection()
assert o1 == o
assert o1 != o2
def test_new_connections_used(get_connection):
pool = ConnectionPool(get_connection, initial_connections=0)
o = pool.get_connection()
o1 = pool.get_connection()
assert o1 != o
def test_max_connections_raises(get_connection):
pool = ConnectionPool(get_connection, initial_connections=0, max_connections=2)
pool.get_connection()
pool.get_connection()
with pytest.raises(ClientUnavailableError):
pool.get_connection(next_timeout=0)
def test_creates_initial_connections(get_connection):
pool = ConnectionPool(get_connection, initial_connections=2)
assert get_connection.call_count == 2
def test_connections_get_recycled(get_connection):
pool = ConnectionPool(
get_connection,
initial_connections=1,
max_connections=1,
recycle=3600
)
conn = pool.get_connection()
pool.release_connection(conn)
conn2 = pool.get_connection()
pool.release_connection(conn2)
assert conn == conn2
with patch.object(conn2, 'is_stale', return_value=True):
conn3 = pool.get_connection()
assert conn3 != conn
assert conn3 != conn2
def test_connection_is_stale_for_stale_connection():
connection = Connection(None)
eventlet.sleep(2)
assert connection.is_stale(timedelta(seconds=1))
def test_connection_is_not_stale_for_good_connection():
connection = Connection(None)
assert not connection.is_stale(timedelta(seconds=3600))
```
|
{
"source": "jgerigmeyer/jquery-django-messages-ui",
"score": 3
}
|
#### File: jquery-django-messages-ui/messages_ui/middleware.py
```python
import json
from django.contrib import messages
from django.utils.six import text_type
class AjaxMessagesMiddleware(object):
"""
Middleware to handle messages for AJAX requests.
If the AJAX response is already JSON, add a "messages" key to it (or
append to an existing "messages" key) a list of messages (each
message is an object with "level", "message", and "tags" keys).
If the AJAX response is currently html, turn it into JSON and stuff
the HTML content into the "html" key, adding a "messages" key as
well.
If the AJAX response is neither json nor html, return it as-is (with
no messages attached, and without iterating over messages).
If the AJAX response has a status code other than 200, or has an attribute
``no_messages`` that is ``True``, it will not be modified (and messages
will not be read).
"""
def process_response(self, request, response):
handle_response = (
request.is_ajax() and
response.status_code == 200 and
not getattr(response, 'no_messages', False)
)
if handle_response:
content_type = response.get('content-type', 'None').split(";")[0]
content = response.content.decode('utf-8')
if content_type == "application/json":
data = json.loads(content)
elif content_type == "text/html":
data = {"html": content}
else:
return response
messagelist = data.setdefault("messages", [])
for message in messages.get_messages(request):
messagelist.append({
"level": message.level,
"message": text_type(message.message),
"tags": message.tags,
})
response.content = json.dumps(data)
response["content-type"] = "application/json"
response["content-length"] = len(response.content)
return response
```
|
{
"source": "j-germino/sc-rna-tools-git",
"score": 2
}
|
#### File: sc-rna-tools-git/scrnatools/_configs.py
```python
import logging
import os
# <name TBD> package imports
from ._utils import type_check
class Config:
"""Config manager for sc-rna-tools"""
def __init__(
self,
verbosity="warning",
debug_memory=False,
debug_timing=False,
log_path="sc-rna-tools_logs",
save_logs=False,
):
self._verbosity = verbosity
self._debug_memory = debug_memory
self._debug_timing = debug_timing
self._log_path = log_path
self._save_logs = save_logs
self._loggers = [] # list of loggers created by modules
@property
def verbosity(self) -> str:
return self._verbosity
@verbosity.setter
def verbosity(self, verbosity: str):
"""Sets the verbosity level of the logger
Allowed are: 'debug', 'info', 'warning', 'error', 'critical'
"""
type_check(verbosity, "verbosity", str)
# set the level of all the loggers
for logger in self._loggers:
self._set_log_level(logger, verbosity)
self._verbosity = verbosity
@property
def debug_memory(self) -> bool:
"""Determines whether line by line memory usage is logged for debugging functions"""
return self._debug_memory
@debug_memory.setter
def debug_memory(self, debug_memory: bool):
type_check(debug_memory, "debug_memory", bool)
self._debug_memory = debug_memory
@property
def debug_timing(self) -> bool:
"""Determines whether function timing is logged for debugging"""
return self._debug_timing
@debug_timing.setter
def debug_timing(self, debug_timing: bool):
type_check(debug_timing, "debug_timing", bool)
self._debug_timing = debug_timing
@property
def log_path(self) -> str:
"""The path where log files are saved"""
return self._log_path
@log_path.setter
def log_path(self, path: str):
type_check(path, "path", str)
self._log_path = path
@property
def save_logs(self) -> bool:
"""Determines whether logging messages are saved to a log file"""
return self._save_logs
@save_logs.setter
def save_logs(self, save_logs: bool):
type_check(save_logs, "save_logs", bool)
# if logs are not currently being written to a file, but being updated to be written to a file, add a
# FileHandler to each logger
if not self.save_logs and save_logs:
# make sure log directory exists
self.check_log_path()
# create the logging format and FileHandler
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s: %(message)s", "%Y-%m-%d %H:%M:%S")
file_handler = logging.FileHandler(f"{self.log_path}/sc-rna-tools.log")
file_handler.setFormatter(formatter)
# add the FileHandler to each logger if it doesn't already have a FileHandler
for logger in self._loggers:
handlers = logger.handlers
if sum([isinstance(h, logging.FileHandler) for h in handlers]) == 0: # check if there is a FileHandler
logger.addHandler(file_handler)
# if logs are currently being written to a file, but being updated to not be written to a file, remove the
# FileHandler to each logger
if self.save_logs and not save_logs:
for logger in self._loggers:
# find the FileHandler for this logger and remove it
for h in logger.handlers:
if isinstance(h, logging.FileHandler):
logger.removeHandler(h)
self._save_logs = save_logs
def __str__(self) -> str:
return f"ScoreConfig(" \
f"verbosity: {self.verbosity}, " \
f"debug_memory: {self.debug_memory}, " \
f"debug_timing: {self.debug_timing}, " \
f"log_path: {self.log_path}, " \
f"save_logs: {self.save_logs})"
def clear_logs(self):
"""Clears all logs saved in the logs path"""
if os.path.exists(self._log_path):
for f in os.listdir(self._log_path):
# Remove all the files in the log directory
os.remove(os.path.join(self._log_path, f))
# remove the log directory
os.rmdir(self._log_path)
def check_log_path(self):
"""Checks if the log path exists, and creates it if it doesn't"""
if not os.path.exists(self._log_path):
os.makedirs(self._log_path)
def create_logger(self, name: str):
"""Called by modules to create a logger for that function
Parameters
----------
name
The name of the logger (typically the module name creating the logger)
"""
type_check(name, 'name', str)
# create a logger, StreamHandler, and the formatter for that logger
logger = logging.getLogger(name)
handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s: %(message)s", "%Y-%m-%d %H:%M:%S")
handler.setFormatter(formatter)
handlers = logger.handlers
if sum([isinstance(h, logging.StreamHandler) for h in handlers]) == 0: # check if there is a StreamHandler
logger.addHandler(handler)
# if logs are being saved to a file as well, create a FileHandler for the new logger too
if self.save_logs:
self.check_log_path()
file_handler = logging.FileHandler(f"{self.log_path}/sc-rna-tools.log")
file_handler.setFormatter(formatter)
if sum([isinstance(h, logging.FileHandler) for h in handlers]) == 0: # check if there is a StreamHandler
logger.addHandler(file_handler)
# Set the log level to the current verbosity
self._set_log_level(logger, self.verbosity)
# Add the logger to the master list
self._loggers.append(logger)
return logger
@staticmethod
def _set_log_level(logger: logging.Logger, level: str):
"""Sets the log level of a modules logger
Parameters
----------
logger
The logger to set the level of
level
String corresponding to the name of the logging level to set.
Allowed: 'info', 'debug', 'warning', 'error', 'critical'
"""
log_levels = {
"info": logging.INFO,
"debug": logging.DEBUG,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL
}
if level not in log_levels:
raise ValueError(f"'{level}' is not a valid logging level "
f"('info', 'debug', 'warning', 'error', 'critical')")
logger.setLevel(log_levels[level])
configs = Config()
```
#### File: scrnatools/qc/_filter_cells.py
```python
from anndata import AnnData
from typing import Tuple, Optional
# sc-rna-tools package imports
from .._configs import configs
from .._utils import debug
from ..plotting import qc_plotting
logger = configs.create_logger(__name__.split('_', 1)[1])
# -------------------------------------------------------function----------------------------------------------------- #
@debug(logger, configs)
def filter_cells(
adata: AnnData,
gene_thresholds: Tuple[int, int],
count_thresholds: Tuple[int, int],
mt_threshold: int = 10,
save_path: Optional[str] = None,
file_type: str = "png",
*args, **kwargs
) -> AnnData:
"""Filters cells based on gene number, total counts, and % mitochondrial
Parameters
----------
adata
The AnnData with the data to filter
gene_thresholds
A Tuple of thresholds for the number of genes per cell with 'gene_thresholds[0]' being the lower bound and
'gene_thresholds[1]' being the upper bound (both exclusive).
count_thresholds
A Tuple of thresholds for the number of total counts cell with 'count_thresholds[0]' being the lower bound and
'count_thresholds[1]' being the upper bound (both exclusive).
mt_threshold
The maximum percent mitochondrial reads per cell. Default 10%.
args
Arguments to pass on to qc_plotting function calls
kwargs
Keyword arguments to pass on to qc_plotting function calls
save_path
The path and file name prefix to save QC plots to ('_qc_plots' or '_filtered_qc_plots' and the file type
provided with 'file_type' will be appended
file_type
The file type for the figures to be saved
Returns
-------
An AnnData object with cells that don't pass the thresholds filtered out
"""
qc_plotting(
adata,
counts_thresholds=count_thresholds,
genes_thresholds=gene_thresholds,
save_path=f"{save_path}_qc_plots.{file_type}",
*args, **kwargs
)
logger.info(f"Number of cells before QC filtering: {len(adata.obs)}")
filtered_adata = adata[adata.obs.pct_counts_mt < mt_threshold].copy()
filtered_adata = filtered_adata[filtered_adata.obs.total_counts < count_thresholds[1]]
filtered_adata = filtered_adata[filtered_adata.obs.total_counts > count_thresholds[0]]
filtered_adata = filtered_adata[filtered_adata.obs.n_genes_by_counts < gene_thresholds[1]]
filtered_adata = filtered_adata[filtered_adata.obs.n_genes_by_counts > gene_thresholds[0]].copy()
logger.info(f"Number of cells after QC filtering: {len(filtered_adata.obs)}")
qc_plotting(
filtered_adata,
show_thresholds=False,
save_path=f"{save_path}_filtered_qc_plots.{file_type}",
*args, **kwargs
)
return filtered_adata
```
#### File: scrnatools/tools/_cell_type_similarity.py
```python
from anndata import AnnData
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import StandardScaler
from typing import Optional
import pandas as pd
# sc-rna-tools package imports
from .._configs import configs
from .._utils import debug
logger = configs.create_logger(__name__.split('_', 1)[1])
# -------------------------------------------------------function----------------------------------------------------- #
@debug(logger, configs)
def cell_type_similarity(
adata: AnnData,
signatures: pd.DataFrame,
normalize: str = "cell",
) -> AnnData:
"""
Parameters
----------
adata
The AnnData object containing cells to score, with library size rescaled and log-normalized counts stored in
'adata.raw'
signatures
A DataFrame containing transcriptome signatures for each cell type to be tested. Columns are cell types and rows
are the log-normalized expression of each gene in that cell type. Can be created from cell type clusters of a
scRNAseq dataset using 'create_cell_type_signature'
normalize
How to rescale the cosine similarity scores. Default is 'cell', which calculates a z-score normalized for each
cell across all cell types in 'signatures'. Other possible values are 'cell type' which calculates a z-score
normalized for each cell type in 'signatures' across all cells and 'none' which returns the raw cosine
similarity values.
Returns
-------
The AnnData passed in with 'adata' with the cosine similarity scores for each immgen cell type added as a column to
'adata.obs'
"""
# Raw attribute contains the log-normalized counts
cell_data = pd.DataFrame(adata.raw.X.toarray(), columns=adata.raw.var_names, index=adata.obs.index).T
cell_data = cell_data / cell_data.sum(axis=0) # Normalize per cell expression to sum to 1
# Join with gene x cell type signature matrix so that only shared genes are kept
all_df = cell_data.join(
signatures,
how="inner"
)
all_df = all_df / all_df.sum(axis=0) # Normalize per cell expression to sum to 1
# Calculate cosine similarity of single cells to each cell type
sim = cosine_similarity(
all_df[all_df.columns[:cell_data.shape[1]]].values.T, # Single-cell expression data
all_df[all_df.columns[cell_data.shape[1]:]].values.T # Cell type gene signature expression data
)
# Create dataframe with per cell similarity scores for each cell type
similarity = pd.DataFrame(
sim,
columns=all_df.columns[cell_data.shape[1]:],
index=all_df.columns[:cell_data.shape[1]]
)
if normalize == "cell type":
logger.info(f"Scaling cosine similarity scores to z-scores by cell type")
# Scale similarity scores
scaler = StandardScaler()
minmax_scale = scaler.fit(similarity)
x_minmax = minmax_scale.transform(similarity)
scale_by_row = pd.DataFrame(x_minmax)
elif normalize == "cell":
logger.info(f"Scaling cosine similarity scores to z-scores by cell")
# Scale similarity scores
scaler = StandardScaler()
minmax_scale = scaler.fit(similarity.T)
x_minmax = minmax_scale.transform(similarity.T).T
scale_by_row = pd.DataFrame(x_minmax)
else:
logger.info(f"Leaving similarity scores unscaled")
scale_by_row = pd.DataFrame(similarity)
# Add cosine similarity scores to adata obs
scale_by_row.index = similarity.index
if normalize != "none":
scale_by_row.columns = similarity.columns + "_cosine_similarity_" + normalize + "_z_score"
else:
scale_by_row.columns = similarity.columns + "_cosine_similarity"
adata.obs = adata.obs.join(scale_by_row)
return adata
```
#### File: scrnatools/tools/_cluster_de.py
```python
from anndata import AnnData
from pandas import DataFrame
from typing import Optional, Dict
from scvi.model import SCVI
# sc-rna-tools package imports
from .._configs import configs
from .._utils import debug, check_path
logger = configs.create_logger(__name__.split('_', 1)[1])
# -------------------------------------------------------function----------------------------------------------------- #
@debug(logger, configs)
def cluster_de(
adata: AnnData,
model: SCVI,
cluster_key: str,
lfc_mean_threshold: int = 0,
bayes_factor_threshold: int = 3,
non_zeroes_proportion_threshold: int = 0.1,
save_path: Optional[str] = None,
) -> Dict[str, DataFrame]:
"""Calculates DE marker genes for data clusters
Parameters
----------
adata
The data to analyze
model
The scVI model for 'adata'
cluster_key
The column name of the cluster data in 'adata.obs'
lfc_mean_threshold
The minimum lfc_mean to filter de genes on (exclusive)
bayes_factor_threshold
The minimum bayes factor to filter de genes on (exclusive)
non_zeroes_proportion_threshold
The minimum proportion of cells with non-zero expression filter de genes on (exclusive)
save_path
The path to save the marker gene lists to
Returns
-------
A dictionary with keys equal to the categories of the cluster column in 'adata.obs' (i.e. cell types) linked to
DataFrames of the filtered DE marker genes for that cluster
"""
de_df = model.differential_expression(groupby=cluster_key, )
cats = adata.obs[cluster_key].cat.categories
markers = {}
for i, c in enumerate(cats):
cid = f"{c} vs Rest"
cell_type_df = de_df.loc[de_df.comparison == cid]
cell_type_df = cell_type_df[cell_type_df.lfc_mean > lfc_mean_threshold]
cell_type_df = cell_type_df[cell_type_df["bayes_factor"] > bayes_factor_threshold]
cell_type_df = cell_type_df[cell_type_df["non_zeros_proportion1"] > non_zeroes_proportion_threshold]
markers[c] = cell_type_df
if save_path is not None:
check_path(save_path)
for cluster, marker_genes in markers.items():
if save_path[-1] == "/":
save_path = save_path[:-1]
marker_genes.to_csv(f"{save_path}/{cluster}.csv")
return markers
```
#### File: scrnatools/tools/_create_cell_type_signature.py
```python
from anndata import AnnData
from typing import Optional
import pandas as pd
# sc-rna-tools package imports
from .._configs import configs
from .._utils import debug, check_path
logger = configs.create_logger(__name__.split('_', 1)[1])
# -------------------------------------------------------function----------------------------------------------------- #
@debug(logger, configs)
def create_cell_type_signature(
adata: AnnData,
save_path: Optional[str] = None,
cell_type_labels: str = "cell_type",
data_loc: str = "X",
) -> pd.DataFrame:
"""
Parameters
----------
adata
The AnnData object containing the cell type expression data, with cell labels as a key in 'adata.obs' and
library size corrected and log-normalized counts in adata.X, adata.raw, or a layer in adata.layers.
save_path
The path to save a csv containing the cell type gene signatures to
cell_type_labels
The column name in 'adata.obs' containing the cell type labels for each cell. Default is 'cell_type'
data_loc
The location of the library size corrected and log-normalized gene expression data in 'adata'. Default is
'X' but can also be 'raw' or a valid key from 'adata.layers'
Returns
-------
A DataFrame containing the psuedobulked gene signatures for each cell type in adata.obs[cell_type_labels] with
columns containing data each cell type and rows containing the average expression for each gene within that call
type.
"""
if cell_type_labels not in adata.obs:
raise ValueError(f"{cell_type_labels} not a valid key in 'adata.obs'")
signatures = pd.DataFrame(index=adata.var_names)
for cell_type in adata.obs[cell_type_labels].unique():
if data_loc == "X":
signatures[cell_type] = adata[adata.obs[cell_type_labels] == cell_type].X.mean(axis=0)
elif data_loc == "raw":
signatures[cell_type] = adata[adata.obs[cell_type_labels] == cell_type].raw.X.mean(axis=0)
else:
if data_loc in adata.layers:
signatures[cell_type] = adata[adata.obs[cell_type_labels] == cell_type].layers[data_loc].mean(axis=0)
else:
raise ValueError(f"{data_loc} not 'X', 'raw', or a valid layer in 'adata.layers'")
if save_path is not None:
check_path(save_path.rsplit("/")[0])
logger.info(f"Saving signature DataFrame to {save_path}")
signatures.to_csv(save_path)
return signatures
```
#### File: scrnatools/tools/_get_expression_matrix.py
```python
from typing import Optional
from scipy.sparse import issparse
from pandas import DataFrame
from anndata import AnnData
# mitsa package imports
from .._configs import configs
from .._utils import debug
logger = configs.create_logger(__name__.split('_', 1)[1])
# -------------------------------------------------------function----------------------------------------------------- #
@debug(logger, configs)
def get_expression_matrix(anndata: AnnData, gene_data: Optional[str] = None, ) -> DataFrame:
"""Extracts the cell x gene expression matrix from an AnnData object
Parameters
----------
anndata
The AnnData to pull the expression matrix from
gene_data
Where to get the expression data from the AnnData object. If a layer from the AnnData['layers'] is passed,
that is used, otherwise 'X' or 'raw' can be used.
Returns
-------
A Pandas DataFrame containing the expression matrix (cells x genes)
Raises
-------
ValueError
When 'layer' is not one of the layers in 'anndata'
"""
if gene_data == "X":
# Convert to a dense matrix if needed
if issparse(anndata.X):
matrix = DataFrame(anndata.X.todense(), index=anndata.obs.index, columns=anndata.var_names)
else:
matrix = DataFrame(anndata.X, index=anndata.obs.index, columns=anndata.var_names)
# get a layer from the AnnData if specified
elif gene_data == "raw":
raw_data = anndata.raw.to_adata()
# Convert to a dense matrix if needed
if issparse(raw_data.X):
matrix = DataFrame(raw_data.X.todense(), index=anndata.obs.index, columns=anndata.raw.var_names)
else:
matrix = DataFrame(raw_data.X, index=anndata.obs.index, columns=anndata.raw.var_names)
elif gene_data in anndata.layers:
if issparse(anndata.layers[gene_data]):
matrix = DataFrame(anndata.layers[gene_data].todense(), index=anndata.obs.index, columns=anndata.var_names)
else:
matrix = DataFrame(anndata.layers[gene_data])
else:
raise ValueError(f"{gene_data} is not 'X', 'raw', or a valid layer name in '{anndata.layers}'")
return matrix
```
|
{
"source": "jgerrish/bitey",
"score": 3
}
|
#### File: bitey/cpu/addressing_mode.py
```python
from dataclasses import dataclass
import logging
from typing import ClassVar
from bitey.cpu.arch import EightBitArch
@dataclass
class AddressingMode:
"""
Addressing mode base class
"""
bytes: int
"""
The number of bytes an instruction with this addressing mode takes,
including the instruction itself.
"""
def __post_init__(self):
self.logger = logging.getLogger("bitey.cpu.addressing_mode")
def get_value(self, flags, registers, memory):
"""
Get the value at the address
Returns a tuple of the address and value for convenience
"""
return (None, None)
def get_address(self, flags, registers, memory):
"""
Return the effective address
"""
# The size to consume is bytes minus one for the opcode itself
size = self.bytes - 1
if size > 0:
registers["PC"].add(size)
return None
def get_inst_str(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
if address is not None:
return "${0:02x}".format(address)
else:
return ""
@dataclass
class AbsoluteAddressingMode(AddressingMode):
"""
Absolute addressing mode
Absolute addressing is a three-byte instruction
The address is encoded in the next two bytes after the opcode
The first byte contains the opcode
The second byte contains the low-order byte of the effective address
The effective address contains the data
The third byte contains the high-order byte of the effective address
The following absolute JMP command would jump to the NOP instruction
0x0000 0x4C JMP
0x0001 0x12
0x0002 0x34
...
0x3412 NOP
"""
adl: int = 0
"The low-order byte"
adh: int = 0
"The high-order byte"
bytes: ClassVar[int] = 3
def get_address(self, flags, registers, memory):
self.adl = memory.read(registers["PC"].get())
registers["PC"].inc()
self.adh = memory.read(registers["PC"].get())
registers["PC"].inc()
return memory.get_16bit_address(self.adl, self.adh)
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
# address = self.get_address(flags, registers, memory)
address = self.get_address(flags, registers, memory)
self.logger.debug("AbsoluteAddressing get_value address: {}".format(address))
if address is not None:
return "${0:04x}".format(address)
else:
return ""
@dataclass
class AccumulatorAddressingMode(AddressingMode):
"""
Accumulator Addressing Mode
The value is set to the current value of the accumulator
Instructions can test the addressing mode to set the accumulator after they
have performed their operation.
"""
bytes: ClassVar[int] = 1
def get_address(self, flags, registers, memory):
return None
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
value = registers["A"].get()
return (address, value)
def get_inst_str(self, flags, registers, memory):
self.get_address(flags, registers, memory)
return ""
@dataclass
class AbsoluteIndirectAddressingMode(AddressingMode):
bytes: ClassVar[int] = 3
"""
Absolute Indirectaddressing mode
Absolute Indirect addressing is a three-byte instruction
The address is encoded in the next two bytes after the opcode
The first byte contains the opcode
The second byte contains the low-order byte of an address
that contains the effective address
The third byte contains the high-order byte of an address that contains
the effective address
The effective address points to the actual location
The following absolute indirect JMP command would jump to the NOP instruction
0x0000 0x6C JMP
0x0001 0x12
0x0002 0x34
...
0x3412 0x15
0x3413 0x34
...
0x3415 0xEA NOP
"""
adl: int = 0
"The low-order byte"
adh: int = 0
"The high-order byte"
bytes: ClassVar[int] = 3
def get_address(self, flags, registers, memory):
self.adl = memory.read(registers["PC"].get())
registers["PC"].inc()
self.adh = memory.read(registers["PC"].get())
registers["PC"].inc()
address_to_address = memory.get_16bit_address(self.adl, self.adh)
self.adl = memory.read(address_to_address)
self.adh = memory.read(address_to_address + 1)
effective_address = memory.get_16bit_address(self.adl, self.adh)
return effective_address
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
if address is not None:
return "(${0:04x})".format(address)
else:
return ""
class AbsoluteIndirectPageBoundaryBugAddressingMode(AddressingMode):
bytes: ClassVar[int] = 3
"""
Absolute Indirect addressing mode with the Page Boundary Bug
Absolute Indirect addressing is a three-byte instruction
The address is encoded in the next two bytes after the opcode
The first byte contains the opcode
The second byte contains the low-order byte of an address
that contains the effective address
The third byte contains the high-order byte of an address that contains
the effective address
The effective address points to the actual location
This version of the addressing mode exhibits the JMP page boundary bug
seen on some NMOS chips.
If the base address is on the edge of a page boundary, it wraps to the
beginning of that page instead of going to the beginning of the next page.
The following absolute indirect JMP command would jump to the NOP instruction
0x0000 0x34 ; most-significant byte of address
...
0x00FE 0x6C JMP
0x00FF 0x12 ; least-significant byte of address
...
0x3412 0x15
0x3413 0x34
...
0x3415 0xEA NOP
"""
adl: int = 0
"The low-order byte"
adh: int = 0
"The high-order byte"
bytes: ClassVar[int] = 3
def get_address(self, flags, registers, memory):
pc = registers["PC"].get()
self.adl = memory.read(pc)
if (pc & 0xFF) == 0xFF:
# Memory form 0x??FF should wrap to the same page
self.adh = memory.read(pc & 0xFF00)
registers["PC"].inc()
else:
# Other memory should work the same as the normal AbsoluteIndirectAddressingMode
# TODO: Verify that the PC ends up in the correct place
# (it should still go to the next instruction)
# "technically" it doesn't matter, since this bug is exclusive to JMP
# instructions
# But for more accurate simulation and cycle-dependent stuff, it may matter
registers["PC"].inc()
self.adh = memory.read(registers["PC"].get())
registers["PC"].inc()
address_to_address = memory.get_16bit_address(self.adl, self.adh)
self.adl = memory.read(address_to_address)
self.adh = memory.read(address_to_address + 1)
effective_address = memory.get_16bit_address(self.adl, self.adh)
return effective_address
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
if address is not None:
return "(${0:04x})".format(address)
else:
return ""
@dataclass
class AbsoluteXAddressingMode(AddressingMode):
bytes: ClassVar[int] = 3
"""
Absolute,X addressing mode
Absolute addressing is a three-byte instruction
The address is encoded in the next two bytes after the opcode
The first byte contains the opcode
The second byte contains the low-order byte of the effective address
The effective address contains the data
The third byte contains the high-order byte of the effective address
The X Index is then added to this address
"""
adl: int = 0
"""
The low-order byte
This does not include the X offset
"""
adh: int = 0
"""
The high-order byte
This does not include the X offset
"""
bytes: ClassVar[int] = 3
def get_address(self, flags, registers, memory):
self.adl = memory.read(registers["PC"].get())
# TODO: Maybe wrap the flag with bounds checking too, read expected
# behavior
registers["PC"].inc()
self.adh = memory.read(registers["PC"].get())
registers["PC"].inc()
address = memory.get_16bit_address(self.adl, self.adh)
address += registers["X"].get()
# Wrap at end of memory
# address = address % 0xFFFF
address = address % 0x10000
return address
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
# address = self.get_address(flags, registers, memory)
address = self.get_address(flags, registers, memory)
if address is not None:
return "${0:04x},X".format(memory.get_16bit_address(self.adl, self.adh))
else:
return ""
@dataclass
class AbsoluteYAddressingMode(AddressingMode):
bytes: ClassVar[int] = 3
"""
Absolute,Y addressing mode
Absolute,Y addressing is a three-byte instruction
The address is encoded in the next two bytes after the opcode
The first byte contains the opcode
The second byte contains the low-order byte of the effective address
The effective address contains the data
The third byte contains the high-order byte of the effective address
The Y Index is then added to this address
"""
adl: int = 0
"""
The low-order byte
This does not include the X offset
"""
adh: int = 0
"""
The high-order byte
This does not include the X offset
"""
bytes: ClassVar[int] = 3
def get_address(self, flags, registers, memory):
self.adl = memory.read(registers["PC"].get())
# TODO: Maybe wrap the flag with bounds checking too, read expected
# behavior
registers["PC"].inc()
self.adh = memory.read(registers["PC"].get())
registers["PC"].inc()
address = memory.get_16bit_address(self.adl, self.adh)
address += registers["Y"].get()
# Wrap at end of memory
# address = address % 0xFFFF
address = address % 0x10000
return address
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
# address = self.get_address(flags, registers, memory)
address = self.get_address(flags, registers, memory)
if address is not None:
return "${0:04x},Y".format(memory.get_16bit_address(self.adl, self.adh))
else:
return ""
@dataclass
class ImmediateAddressingMode(AddressingMode):
"""
Immediate addressing mode
The value is encoded as a constant in the next byte
"""
bytes: ClassVar[int] = 2
def get_value(self, flags, registers, memory):
byte = memory.read(registers["PC"].get())
# TODO: Maybe wrap the flag with bounds checking too, read expected
# behavior
registers["PC"].inc()
return (None, byte)
def get_inst_str(self, flags, registers, memory):
(address, value) = self.get_value(flags, registers, memory)
return "#${0:02x}".format(value)
@dataclass
class ImpliedAddressingMode(AddressingMode):
"""
Implied addressing mode
The address is encoded in the instruction
"""
bytes: ClassVar[int] = 1
def get_address(self, flags, registers, memory):
return None
def get_value(self, flags, registers, memory):
return (None, None)
@dataclass
class IndexedIndirectAddressingMode(AddressingMode):
"""
Indexed Indirect addressing mode
Get an address in zero page memory from the next byte and the X Index.
The X index is added to the base address before fetching the effective
address. This is different than Indirect Indexed, where the Index
is added after fetching the address from Zero Page.
Also called Indirect X
A1 80 LDA ($80,X)
"""
bytes: ClassVar[int] = 2
def get_address(self, flags, registers, memory):
zero_page_address = registers["PC"].get()
registers["PC"].inc()
# X register size is 8-bits
zero_page_address += registers["X"].get()
adl = zero_page_address
# TODO: Test for case we go beyond the page boundary
# Wraparound is assumed
# This behavior may still be incorrect
# TODO: Use one of the native functional test suites like
# https://github.com/Klaus2m5/6502_65C02_functional_tests.git
# after implementation
adh = (zero_page_address + 1) % 0x100
address = memory.get_16bit_value(adl, adh)
return address
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
address = memory.read(registers["PC"].get())
self.get_address(flags, registers, memory)
return "(${0:02x},X)".format(address)
@dataclass
class IndirectIndexedAddressingMode(AddressingMode):
"""
Indirect Indexed addressing mode.
Get an address in zero page memory from the next byte.
The address in zero page is two bytes long. The first byte is the
low-order byte. The second is the high-order byte.
It then adds the Y Index to this address.
Also called Indirect Y
B1 80 LDA ($80),Y
"""
bytes: ClassVar[int] = 2
def get_address(self, flags, registers, memory):
zero_page_address = registers["PC"].get()
registers["PC"].inc()
adl = zero_page_address
# TODO: Test for case we go beyond the page boundary
# Wrap-around is assumed
adh = zero_page_address + 1
address = memory.get_16bit_value(adl, adh)
address += registers["Y"].get()
# TODO: Verify wrapping is the correct behavior
address = address % 0x10000
return address
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
address = memory.read(registers["PC"].get())
self.get_address(flags, registers, memory)
return "(${0:02x}),Y".format(address)
@dataclass
class IndirectXAddressingMode(AddressingMode):
"""
Another name for Indexed Indirect Addressing
TODO: Maybe consolidate these
"""
bytes: ClassVar[int] = 2
def __post_init__(self):
self.am = IndexedIndirectAddressingMode()
def get_address(self, flags, registers, memory):
return self.am.get_address(flags, registers, memory)
def get_value(self, flags, registers, memory):
return (self, self.am.get_value(flags, registers, memory))
def get_inst_str(self, flags, registers, memory):
return self.am.get_inst_str(flags, registers, memory)
@dataclass
class IndirectYAddressingMode(AddressingMode):
"""
Another name for Indirect Indexed Addressing
TODO: Maybe consolidate these
"""
bytes: ClassVar[int] = 2
def __post_init__(self):
self.am = IndirectIndexedAddressingMode()
def get_address(self, flags, registers, memory):
return self.am.get_address(flags, registers, memory)
def get_value(self, flags, registers, memory):
return self.am.get_value(flags, registers, memory)
def get_inst_str(self, flags, registers, memory):
return self.am.get_inst_str(flags, registers, memory)
@dataclass
class ZeroPageAddressingMode(AddressingMode):
"""
Zero Page addressing mode
The address in Zero Page is encoded as a constant in the next byte
"""
bytes: ClassVar[int] = 2
def get_address(self, flags, registers, memory):
address = memory.read(registers["PC"].get())
registers["PC"].inc()
# TODO: Create exception API
assert address <= 0xFF
return address
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
@dataclass
class ZeroPageXAddressingMode(AddressingMode):
"""
Zero Page X addressing mode
Compute the address by adding the value the PC points to
plus the X register value.
Wraps if the value is greater than 0x255.
"""
bytes: ClassVar[int] = 2
def get_address(self, flags, registers, memory):
address = memory.read(registers["PC"].get())
registers["PC"].inc()
address += registers["X"].get()
# wrap on values > 0xFF
address = address % 0x100
return address
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
address = memory.read(registers["PC"].get())
self.get_value(flags, registers, memory)
return "${0:02x},X".format(address)
@dataclass
class ZeroPageYAddressingMode(AddressingMode):
"""
Zero Page Y addressing mode
Compute the address by adding the value the PC points to
plus the Y register value.
Wraps if the value is greater than 0x255.
"""
bytes: ClassVar[int] = 2
def get_value(self, flags, registers, memory):
address = memory.read(registers["PC"].get())
registers["PC"].inc()
address += registers["Y"].get()
# wrap on values > 0xFF
address = address % 0x100
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
address = memory.read(registers["PC"].get())
self.get_value(flags, registers, memory)
return "${0:02x},Y".format(address)
@dataclass
class RelativeAddressingMode(AddressingMode):
"""
Relative addressing mode
The value in the next byte is added to the PC to find the effective
address.
The effective address is calculated from the PC after it has been
incremented reading the offset, not from the JMP opcode position.
This uses two's complement, and supports negative offsets.
"""
bytes: ClassVar[int] = 2
def get_address(self, flags, registers, memory):
"Get the effective address"
offset = memory.read(registers["PC"].get())
# Calculate two's complement to get negative value
offset = EightBitArch.twos_complement_to_signed_int(offset)
# TODO: update flags
registers["PC"].inc()
effective_address = registers["PC"].get() + offset
return effective_address % 0x10000
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
"Return the address as an effective address"
(address, value) = self.get_value(flags, registers, memory)
if address is not None:
return "${0:04x}".format(address)
else:
return ""
```
#### File: bitey/cpu/arch.py
```python
class IntegerValueError(Exception):
"""
Represents an exception when the argument to a function is an invalid value.
For example, it may be too small or too large.
"""
class EightBitArch:
def high_nibble(byte):
"""
Get the high nibble of a byte
"""
return (byte >> 4) & 0x0F
def low_nibble(byte):
"""
Get the low nibble of a byte
"""
return byte & 0x0F
def decimal_value(byte):
"""
Get the 4-bit Binary Coded Decimal (BCD) value of a byte
The decimal value of a byte is found by "concatenating" the
high nibble and low nibble, or adding them after multiplying
the high nibble by ten.
"""
return (EightBitArch.high_nibble(byte) * 10) + EightBitArch.low_nibble(byte)
def signed_int_to_twos_complement(signed_int_value):
"""
Convert a signed integer to a single-byte two's-complement value
"""
# The range of negative values represented in two's complement stored
# in a single byte is -128 to 127 inclusive
if (signed_int_value < -128) or (signed_int_value > 127):
raise IntegerValueError
if signed_int_value < 0:
twos_complement_value = (0xFF - abs(signed_int_value)) + 1
else:
twos_complement_value = signed_int_value
return twos_complement_value
def twos_complement_to_signed_int(twos_complement_value):
"""
Convert a single-byte negative value represented as
two's-complement to a signed integer value.
"""
if (twos_complement_value < 0x00) or (twos_complement_value > 0xFF):
raise IntegerValueError
if twos_complement_value > 0x7F:
# Calculate two's complement to get negative value
signed_int_value = -((0xFF - twos_complement_value) + 1)
else:
signed_int_value = twos_complement_value
return signed_int_value
```
#### File: cpu/flag/flag_json_decoder.py
```python
from dataclasses import dataclass
import json
from json import JSONDecoder
from typing import ClassVar
from bitey.cpu.flag.flag import Flag, Flags
from bitey.cpu.flag.carry_flag import CarryFlag
from bitey.cpu.flag.decimal_flag import DecimalFlag
from bitey.cpu.flag.negative_flag import NegativeFlag
from bitey.cpu.flag.zero_flag import ZeroFlag
@dataclass
class FlagJSONDecoder(JSONDecoder):
"""
Decode a flag definition in JSON format
"""
flag_map: ClassVar[dict[str, Flag]] = {
"C": CarryFlag,
"N": NegativeFlag,
"Z": ZeroFlag,
"D": DecimalFlag,
}
def decode(self, json_doc):
"""
Decode a flag definition in JSON format
json_doc should be a parsed JSON structure
"""
if (
("short_name" in json_doc)
and ("name" in json_doc)
and ("bit_field_pos" in json_doc)
and ("status" in json_doc)
):
status = False
if json_doc["status"] == 0:
status = False
elif json_doc["status"] == 1:
status = True
short_name = json_doc["short_name"]
if "options" in json_doc:
options = json_doc["options"]
else:
options = None
# Create a specific class if it exists
if short_name in FlagJSONDecoder.flag_map:
flag_class = FlagJSONDecoder.flag_map[short_name]
return flag_class(
short_name,
json_doc["name"],
json_doc["bit_field_pos"],
status,
options,
)
return Flag(
short_name,
json_doc["name"],
json_doc["bit_field_pos"],
status,
options,
)
else:
# Return None if the flag JSON object is missing fields or invalid
return None
class FlagsJSONDecoder(JSONDecoder):
"""
Decode a list of flag definitions in JSON format
"""
def decode(self, json_doc):
parsed_json = json.loads(json_doc)
return self.decode_parsed(parsed_json)
def decode_parsed(self, parsed_json):
flag_list = []
rjd = FlagJSONDecoder()
for flag in parsed_json:
f = rjd.decode(flag)
# Only append the flag if all fields are present and the JSON
# is valid for the flag
if f:
flag_list.append(f)
# TODO: Some things should be initialized to certain values
# Make sure setting the flags byte to zero on start is ok
return Flags(flag_list, None)
```
#### File: cpu/instruction/an.py
```python
from dataclasses import dataclass
from bitey.cpu.instruction.instruction import (
Instruction,
IncompleteInstruction,
)
@dataclass
class AND(Instruction):
"""
AND: AND Memory with Accumulator
Perform a bitwise and with the Accumulator and a value in memory.
Store the result in the accumulator.
Sets the zero flag if the result is zero.
Sets the negative flag if the result has a one in bit seven.
"""
def instruction_execute(self, cpu, memory, value, address=None):
"""
Execute the instruction, bit-wise anding the accumulator and memory
"""
if value is not None:
cpu.registers["A"].set(cpu.registers["A"].get() & value)
self.set_flags(cpu.flags, cpu.registers)
else:
raise IncompleteInstruction
def set_flags(self, flags, registers):
"""
Set the zero flag if the accumulator is zero as the result of the AND.
Resets the zero flag if the accumulator is not zero as the result of the AND.
Sets the negative (N) flag if bit 7 is one.
"""
flags["N"].test_register_result(registers["A"])
flags["Z"].test_register_result(registers["A"])
```
#### File: cpu/instruction/bcc.py
```python
from dataclasses import dataclass
from bitey.cpu.instruction.instruction import Instruction
@dataclass
class BCC(Instruction):
"""
BCC: Branch on Carry Clear
Branch if the Carry Flag is not True
"""
def instruction_execute(self, cpu, memory, value, address=None):
if (address is not None) and (cpu.flags["C"].status is not True):
cpu.registers["PC"].set(address)
```
#### File: cpu/instruction/cld.py
```python
from dataclasses import dataclass
from bitey.cpu.instruction.instruction import Instruction
@dataclass
class CLD(Instruction):
"CLD Clear Decimal Mode"
def execute(self, cpu, memory):
"Execute the instruction"
self.set_flags(cpu.flags, cpu.registers)
return
def set_flags(self, flags, registers):
flags["D"].clear()
return
```
#### File: cpu/instruction/dec.py
```python
from bitey.cpu.instruction.instruction import (
Instruction,
IncompleteInstruction,
)
class DE(Instruction):
"Generic register decrement instruction"
def __init__(self, name, opcode, description, options, register):
"Initialize with the register"
super().__init__(name, opcode, description, options)
self.register = register
def instruction_execute(self, cpu, memory, value, address=None):
"""
Execute the instruction, decrementing the register by one.
"""
cpu.registers[self.register].dec()
self.set_flags(cpu.flags, cpu.registers)
def set_flags(self, flags, registers):
flags["Z"].test_register_result(registers[self.register])
class DEX(DE):
"DEX: Decrement Index X by One"
def __init__(self, name, opcode, description, options):
super().__init__(name, opcode, description, options, "X")
class DEY(DE):
"DEY: Decrement Index Y by One"
def __init__(self, name, opcode, description, options):
super().__init__(name, opcode, description, options, "Y")
class DEC(Instruction):
"Decrement Memory by One"
def instruction_execute(self, cpu, memory, value, address=None):
"""
Execute the instruction, incrementing the register by one.
"""
if value is not None:
value = value - 1
if value < 0:
value = 0xFF
memory.write(address, value)
self.set_flags(cpu.flags, cpu.registers, value)
else:
raise IncompleteInstruction
def set_flags(self, flags, registers, value):
# The zero flag is set when the value is zero, not necessarily on a wrap
if value == 0x00:
flags["Z"].set()
```
#### File: cpu/instruction/instruction_json_decoder.py
```python
import json
from json import JSONDecoder
import logging
from bitey.cpu.instruction.opcode import OpcodeJSONDecoder, OpcodesJSONDecoder
from bitey.cpu.instruction.instruction import Instructions, InstructionSet
from bitey.cpu.instruction.instruction_factory import (
InstructionFactory,
InstructionClassFactory,
)
class InstructionJSONDecoder(JSONDecoder):
"""
Decode an instruction definition in JSON format.
The instance generation logic is collected in here, it should be
refactored to the other classes.
"""
def __init__(self):
self.logger = logging.getLogger(
"bitey.cpu.instruction.instruction_json_decoder.InstructionJSONDecoder"
)
def decode(self, json_doc):
parsed_json = json.loads(json_doc)
return self.decode_parsed(parsed_json)
def decode_parsed(self, parsed_json):
if ("name" in parsed_json) and ("description" in parsed_json):
name = parsed_json["name"]
description = parsed_json["description"]
if "opcode" in parsed_json:
opcode_decoder = OpcodeJSONDecoder()
opcode = opcode_decoder.decode_parsed(parsed_json["opcode"])
else:
opcode = None
if "options" in parsed_json:
options = parsed_json["options"]
else:
options = None
return InstructionFactory.build(name, opcode, description, options)
else:
# Return None if the instruction JSON object is missing fields or invalid
return None
class InstructionClassJSONDecoder(JSONDecoder):
"""
Decode an instruction class definition in JSON format.
The instance generation logic is collected in here, it should be
refactored to the other classes.
"""
def __init__(self):
self.logger = logging.getLogger(
"bitey.cpu.instruction.instruction_json_decoder.InstructionClassJSONDecoder"
)
def decode(self, json_doc):
parsed_json = json.loads(json_doc)
return self.decode_parsed(parsed_json)
def decode_parsed(self, parsed_json):
if ("name" in parsed_json) and ("description" in parsed_json):
name = parsed_json["name"]
description = parsed_json["description"]
if "opcodes" in parsed_json:
opcodes_decoder = OpcodesJSONDecoder()
opcodes = opcodes_decoder.decode_parsed(parsed_json["opcodes"])
else:
opcodes = None
if "options" in parsed_json:
options = parsed_json["options"]
else:
options = None
icf = InstructionClassFactory.build(name, opcodes, description, options)
return icf
else:
# Return None if the instruction JSON object is missing fields or invalid
return None
class InstructionsJSONDecoder(JSONDecoder):
"""
Decode a list of register definitions in JSON format
"""
# TODO: Define this format formally
# TODO: Extend to allow multiple address modes in the instruction definitions
def decode(self, json_doc):
parsed_json = json.loads(json_doc)
return self.decode_parsed(parsed_json)
def decode_parsed(self, parsed_json):
instruction_list = []
ijd = InstructionJSONDecoder()
for instruction in parsed_json:
i = ijd.decode_parsed(instruction)
# Only append the instruction if all fields are present and the JSON
# is valid for the instruction
if i:
instruction_list.append(i)
return Instructions(instruction_list)
class InstructionSetJSONDecoder(JSONDecoder):
"""
Decode a list of instruction class definitions in JSON format
"""
# TODO: Define this format formally
def decode(self, json_doc):
parsed_json = json.loads(json_doc)
return self.decode_parsed(parsed_json)
def decode_parsed(self, parsed_json):
instruction_list = []
ijd = InstructionClassJSONDecoder()
for instruction in parsed_json:
i = ijd.decode_parsed(instruction)
# Only append the instruction if all fields are present and the JSON
# is valid for the instruction
if i:
instruction_list.append(i)
return InstructionSet(instruction_list)
```
#### File: cpu/instruction/nop.py
```python
from dataclasses import dataclass
from bitey.cpu.instruction.instruction import Instruction
@dataclass
class NOP(Instruction):
"""
BNE: No Operation
Doesn't do anything
"""
def instruction_execute(self, cpu, memory, value, address=None):
return
```
#### File: cpu/instruction/php.py
```python
from dataclasses import dataclass
from bitey.cpu.instruction.instruction import Instruction
@dataclass
class PHP(Instruction):
"""
PHP: Push Processor Status on Stack
Push the processor status register onto the stack.
This updates the stack pointer register to point to the next empty location.
"""
def instruction_execute(self, cpu, memory, value, address=None):
cpu.stack_push(memory, cpu.registers["P"].get())
```
#### File: cpu/instruction/rti.py
```python
from dataclasses import dataclass
from bitey.cpu.instruction.instruction import Instruction
@dataclass
class RTI(Instruction):
"""
RTI: Return from Interrupt
Return from an Interrupt
Restore the return address from the stack so we can return from the interrupt.
"""
def instruction_execute(self, cpu, memory, value=None, address=None):
"RTI: Return from Interrupt"
# The interrupt stores the flag data on top of the stack
cpu.registers["P"].set(cpu.stack_pop(memory))
# The interrupt stores the return address after the flag data
cpu.registers["PC"].set(cpu.stack_pop_address(memory))
# Set flags
self.set_flags(cpu.flags, cpu.registers)
def set_flags(self, flags, registers):
"Clear the Interrupt Disable flag"
flags["I"].clear()
```
#### File: cpu/instruction/rts.py
```python
from dataclasses import dataclass
from bitey.cpu.instruction.instruction import Instruction
@dataclass
class RTS(Instruction):
"RTS: Return from Subroutine"
def instruction_execute(self, cpu, memory, value, address=None):
"""
Execute the instruction, getting the return address from the stack
and jumping there.
"""
cpu.registers["PC"].set(cpu.stack_pop_address(memory))
```
#### File: cpu/instruction/sei.py
```python
from dataclasses import dataclass
from bitey.cpu.instruction.instruction import Instruction
@dataclass
class SEI(Instruction):
"SEI: Set Interrupt Disable"
def execute(self, cpu, memory):
"Execute the instruction"
self.set_flags(cpu.flags, cpu.registers)
return
def set_flags(self, flags, registers):
flags["I"].set()
return
```
#### File: cpu/instruction/ta.py
```python
from bitey.cpu.instruction.instruction import Instruction
class TAU(Instruction):
"Generic transfer accumulator to index instruction"
def __init__(self, name, opcode, description, options, register):
"Initialize with the register"
super().__init__(name, opcode, description, options)
self.register = register
def instruction_execute(self, cpu, memory, value, address=None):
"Execute the instruction, storing the accumulator value into the register"
self.result = cpu.registers["A"].get()
cpu.registers[self.register].set(self.result)
self.set_flags(cpu.flags, cpu.registers)
def set_flags(self, flags, registers):
"""
Sets flags based on the result of the subtract operation
"""
flags["N"].test_result(self.result)
flags["Z"].test_result(self.result)
class TAX(TAU):
"TAX: Transfer Accumulator to Index X"
def __init__(self, name, opcode, description, options=None):
super().__init__(name, opcode, description, options, "X")
class TAY(TAU):
"TAY: Transfer Accumulator to Index Y"
def __init__(self, name, opcode, description, options=None):
super().__init__(name, opcode, description, options, "Y")
class TUA(Instruction):
"Generic transfer index to accumulator instruction"
def __init__(self, name, opcode, description, options, register):
"Initialize with the register"
super().__init__(name, opcode, description, options)
self.register = register
def instruction_execute(self, cpu, memory, value, address=None):
"Execute the instruction, storing the register value into the accumulator"
self.result = cpu.registers[self.register].get()
cpu.registers["A"].set(self.result)
self.set_flags(cpu.flags, cpu.registers)
def set_flags(self, flags, registers):
"""
Sets flags based on the result of the subtract operation
"""
flags["N"].test_result(self.result)
flags["Z"].test_result(self.result)
class TXA(TUA):
"TAX: Transfer Index X to Accumulator"
def __init__(self, name, opcode, description, options=None):
super().__init__(name, opcode, description, options, "X")
class TYA(TUA):
"TAY: Transfer Index Y to Accumulator"
def __init__(self, name, opcode, description, options=None):
super().__init__(name, opcode, description, options, "Y")
```
#### File: bitey/cpu/pin.py
```python
from dataclasses import dataclass
from enum import Enum
class State(Enum):
"State of a pin, can either be LOW or HIGH"
LOW = 1
HIGH = 2
@dataclass
class Pin:
"""
Physical pins on the microprocessor.
Most of the code in this project is higher-level, ignoring
things like clock-cycles and physical structure of the address
and data bus.
The Pin and Pins classes provide some exceptions to this general rule.
In particular, the reset pin provides a safe start to the initialization
of the processor.
"""
name: str = ""
"Name of the pin"
short_name: str = ""
"Short name of the pin"
state: State = State.LOW
"""
State of the pin, low or high
Default to low, so the processor is in reset mode.
Some other pins may need to be broght high to be in a normal state,
such as the IRQ pin.
"""
def set_high(self):
self.state = State.HIGH
def set_low(self):
self.state = State.LOW
def get(self):
return self.state
@dataclass
class RST(Pin):
"""
The reset pin
When the reset pin is low, the processor is in an uninitialized state.
"""
class IRQ(Pin):
"""
The IRQ pin
When the IRQ line is low, an interrupt has been requested.
Multiple lines may be connected to this pin.
"""
@dataclass
class Pins:
"""
The set of pins on the CPU
"""
pins: list[Pin]
def __post_init__(self):
"Create a dictionary so we can access pins by short name"
self.pin_dict = {}
for f in self.pins:
self.pin_dict[f.short_name] = f
def __getitem__(self, i):
return self.pin_dict[i]
```
#### File: cpu/instruction/test_bitey_cpu_instruction_bit.py
```python
import pytest
import tests.computer.computer
import tests.memory.memory
# TODO Maybe refactor so these are not needed
from bitey.cpu.addressing_mode import AbsoluteAddressingMode, ZeroPageAddressingMode
from bitey.cpu.instruction.opcode import Opcode
from bitey.cpu.instruction.bit import BIT
# module scope means run once per test module
@pytest.fixture(scope="module")
def setup():
computer = tests.computer.computer.init_computer()
yield computer
def test_cpu_instruction_bit_zeropage(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x21)
# The zero page location to read the value from
computer.memory.write(0x00, 0x01)
# The value
computer.memory.write(0x01, 0x3C)
i1_opcode = Opcode(0x24, ZeroPageAddressingMode())
i1 = BIT("BIT", i1_opcode, "Test Bits in Memory with Accumulator")
tests.computer.computer.execute_explicit_instruction(
computer, i1_opcode, i1, [], [("Z", False), ("V", False), ("N", False)], []
)
assert i1.result == 0x20
def test_cpu_instruction_bit_zeropage_negative_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x61)
# The zero page location to read the value from
computer.memory.write(0x00, 0x01)
# The value
computer.memory.write(0x01, 0x9D)
i1_opcode = Opcode(0x24, ZeroPageAddressingMode())
i1 = BIT("BIT", i1_opcode, "Test Bits in Memory with Accumulator")
tests.computer.computer.execute_explicit_instruction(
computer, i1_opcode, i1, [], [("Z", False), ("V", False), ("N", True)], []
)
assert i1.result == 0x01
def test_cpu_instruction_bit_zeropage_overflow_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x9D)
# The zero page location to read the value from
computer.memory.write(0x00, 0x01)
# The value
computer.memory.write(0x01, 0x61)
i1_opcode = Opcode(0x24, ZeroPageAddressingMode())
i1 = BIT("BIT", i1_opcode, "Test Bits in Memory with Accumulator")
tests.computer.computer.execute_explicit_instruction(
computer, i1_opcode, i1, [], [("Z", False), ("V", True), ("N", False)], []
)
assert i1.result == 0x01
def test_cpu_instruction_bit_zeropage_overflow_and_negative_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x9D)
# The zero page location to read the value from
computer.memory.write(0x00, 0x01)
# The value
computer.memory.write(0x01, 0xE1)
i1_opcode = Opcode(0x24, ZeroPageAddressingMode())
i1 = BIT("BIT", i1_opcode, "Test Bits in Memory with Accumulator")
tests.computer.computer.execute_explicit_instruction(
computer, i1_opcode, i1, [], [("Z", False), ("V", True), ("N", True)], []
)
assert i1.result == 0x81
def test_cpu_instruction_bit_zeropage_zero_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x1C)
# The zero page location to read the value from
computer.memory.write(0x00, 0x01)
# The value
computer.memory.write(0x01, 0x21)
i1_opcode = Opcode(0x24, ZeroPageAddressingMode())
i1 = BIT("BIT", i1_opcode, "Test Bits in Memory with Accumulator")
tests.computer.computer.execute_explicit_instruction(
computer, i1_opcode, i1, [], [("Z", True), ("V", False), ("N", False)], []
)
assert i1.result == 0x00
def test_cpu_instruction_bit_zeropage_zero_and_negative_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x61)
# The zero page location to read the value from
computer.memory.write(0x00, 0x01)
# The value
computer.memory.write(0x01, 0x9C)
i1_opcode = Opcode(0x24, ZeroPageAddressingMode())
i1 = BIT("BIT", i1_opcode, "Test Bits in Memory with Accumulator")
tests.computer.computer.execute_explicit_instruction(
computer, i1_opcode, i1, [], [("Z", True), ("V", False), ("N", True)], []
)
assert i1.result == 0x00
def test_cpu_instruction_bit_zeropage_zero_and_overflow_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x9C)
# The zero page location to read the value from
computer.memory.write(0x00, 0x01)
# The value
computer.memory.write(0x01, 0x61)
i1_opcode = Opcode(0x24, ZeroPageAddressingMode())
i1 = BIT("BIT", i1_opcode, "Test Bits in Memory with Accumulator")
tests.computer.computer.execute_explicit_instruction(
computer, i1_opcode, i1, [], [("Z", True), ("V", True), ("N", False)], []
)
assert i1.result == 0x00
def test_cpu_instruction_bit_zeropage_zero_and_overflow_and_negative_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x1C)
# The zero page location to read the value from
computer.memory.write(0x00, 0x01)
# The value
computer.memory.write(0x01, 0xE1)
i1_opcode = Opcode(0x24, ZeroPageAddressingMode())
i1 = BIT("BIT", i1_opcode, "Test Bits in Memory with Accumulator")
tests.computer.computer.execute_explicit_instruction(
computer, i1_opcode, i1, [], [("Z", True), ("V", True), ("N", True)], []
)
assert i1.result == 0x00
def test_cpu_instruction_bit_absolute(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x21)
# The memory location to read the value from
computer.memory.write(0x00, 0x02)
computer.memory.write(0x01, 0x00)
# The value
computer.memory.write(0x02, 0x3C)
i1_opcode = Opcode(0x2C, AbsoluteAddressingMode())
i1 = BIT("BIT", i1_opcode, "Test Bits in Memory with Accumulator")
tests.computer.computer.execute_explicit_instruction(
computer, i1_opcode, i1, [], [("Z", False), ("V", False), ("N", False)], []
)
assert i1.result == 0x20
def test_cpu_instruction_bit_absolute_negative_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x61)
# The memory location to read the value from
computer.memory.write(0x00, 0x02)
computer.memory.write(0x01, 0x00)
# The value
computer.memory.write(0x02, 0x9D)
i1_opcode = Opcode(0x2C, AbsoluteAddressingMode())
i1 = BIT("BIT", i1_opcode, "Test Bits in Memory with Accumulator")
tests.computer.computer.execute_explicit_instruction(
computer, i1_opcode, i1, [], [("Z", False), ("V", False), ("N", True)], []
)
assert i1.result == 0x01
def test_cpu_instruction_bit_absolute_overflow_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x9D)
# The memory location to read the value from
computer.memory.write(0x00, 0x02)
computer.memory.write(0x01, 0x00)
# The value
computer.memory.write(0x02, 0x61)
i1_opcode = Opcode(0x2C, AbsoluteAddressingMode())
i1 = BIT("BIT", i1_opcode, "Test Bits in Memory with Accumulator")
tests.computer.computer.execute_explicit_instruction(
computer, i1_opcode, i1, [], [("Z", False), ("V", True), ("N", False)], []
)
assert i1.result == 0x01
def test_cpu_instruction_bit_absolute_overflow_and_negative_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x9D)
# The memory location to read the value from
computer.memory.write(0x00, 0x02)
computer.memory.write(0x01, 0x00)
# The value
computer.memory.write(0x02, 0xE1)
i1_opcode = Opcode(0x2C, AbsoluteAddressingMode())
i1 = BIT("BIT", i1_opcode, "Test Bits in Memory with Accumulator")
tests.computer.computer.execute_explicit_instruction(
computer, i1_opcode, i1, [], [("Z", False), ("V", True), ("N", True)], []
)
assert i1.result == 0x81
def test_cpu_instruction_bit_absolute_zero_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x1C)
# The memory location to read the value from
computer.memory.write(0x00, 0x02)
computer.memory.write(0x01, 0x00)
# The value
computer.memory.write(0x02, 0x21)
i1_opcode = Opcode(0x2C, AbsoluteAddressingMode())
i1 = BIT("BIT", i1_opcode, "Test Bits in Memory with Accumulator")
tests.computer.computer.execute_explicit_instruction(
computer, i1_opcode, i1, [], [("Z", True), ("V", False), ("N", False)], []
)
assert i1.result == 0x00
def test_cpu_instruction_bit_absolute_zero_and_negative_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x61)
# The memory location to read the value from
computer.memory.write(0x00, 0x02)
computer.memory.write(0x01, 0x00)
# The value
computer.memory.write(0x02, 0x9C)
i1_opcode = Opcode(0x2C, AbsoluteAddressingMode())
i1 = BIT("BIT", i1_opcode, "Test Bits in Memory with Accumulator")
tests.computer.computer.execute_explicit_instruction(
computer, i1_opcode, i1, [], [("Z", True), ("V", False), ("N", True)], []
)
assert i1.result == 0x00
def test_cpu_instruction_bit_absolute_zero_and_overflow_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x9C)
# The memory location to read the value from
computer.memory.write(0x00, 0x02)
computer.memory.write(0x01, 0x00)
# The value
computer.memory.write(0x02, 0x61)
i1_opcode = Opcode(0x2C, AbsoluteAddressingMode())
i1 = BIT("BIT", i1_opcode, "Test Bits in Memory with Accumulator")
tests.computer.computer.execute_explicit_instruction(
computer, i1_opcode, i1, [], [("Z", True), ("V", True), ("N", False)], []
)
assert i1.result == 0x00
def test_cpu_instruction_bit_absolute_zero_and_overflow_and_negative_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x1C)
# The memory location to read the value from
computer.memory.write(0x00, 0x02)
computer.memory.write(0x01, 0x00)
# The value
computer.memory.write(0x02, 0xE1)
i1_opcode = Opcode(0x2C, AbsoluteAddressingMode())
i1 = BIT("BIT", i1_opcode, "Test Bits in Memory with Accumulator")
tests.computer.computer.execute_explicit_instruction(
computer, i1_opcode, i1, [], [("Z", True), ("V", True), ("N", True)], []
)
assert i1.result == 0x00
```
#### File: cpu/instruction/test_bitey_cpu_instruction_rti.py
```python
import pytest
from bitey.computer.computer import Computer
def build_computer():
computer = None
with open("chip/6502.json") as f:
chip_data = f.read()
computer = Computer.build_from_json(chip_data)
return computer
return None
# module scope means run once per test module
@pytest.fixture(scope="module")
def setup():
computer = build_computer()
yield computer
def test_build_cpu_instruction_rti(setup):
"Test the RTI instruction"
computer = setup
# BRK instructions
computer.memory.write(0x00, 0x00)
computer.memory.write(0x01, 0xEA)
# The interrupt vector pointer
computer.memory.write(0xFFFE, 0x10)
computer.memory.write(0xFFFF, 0x20)
# Write a RTI instruction
computer.memory.write(0x2010, 0x40)
computer.cpu.registers["PC"].set(0x00)
assert computer.cpu.registers["PC"].get() == 0x00
computer.cpu.get_next_instruction(computer.memory)
assert computer.cpu.registers["PC"].get() == 0x01
computer.cpu.execute_instruction(computer.memory)
# The PC should now be 0x05
assert computer.cpu.registers["PC"].get() == 0x2010
# Stack should be down three (two bytes for address, one for
# process status register)
assert computer.cpu.registers["S"] == 0x01FF - 0x003
assert computer.memory.read(0x1FF) == 0x00
assert computer.memory.read(0x1FE) == 0x01
# Interrupt Disable and Break should be set
assert computer.memory.read(0x1FD) == 0b00010100
computer.cpu.registers["P"].set(0xFF)
assert computer.cpu.registers["P"].get() == 0xFF
# Execute the return from interrupt instruction
computer.cpu.step(computer.memory)
# The PC should now be 0x05
assert computer.cpu.registers["PC"].get() == 0x0001
# Stack should be down three (two bytes for address, one for
# process status register)
assert computer.cpu.registers["S"].get() == 0x01FF
assert computer.cpu.registers["P"].get() == 0b00010000
```
#### File: cpu/instruction/test_bitey_cpu_instruction_sbc.py
```python
import pytest
import re
from bitey.computer.computer import Computer
import tests.computer.computer
import tests.memory.memory
# TODO Maybe refactor so these are not needed
from bitey.cpu.addressing_mode import ImmediateAddressingMode
from bitey.cpu.instruction.opcode import Opcode
from bitey.cpu.instruction.sbc import SBC
def build_computer(chip_line=None):
computer = None
search = re.compile(".*[^a-zA-Z0-9_-].*")
if (chip_line is not None) and (search.search(chip_line) is not None):
raise Exception("Invalid chip_line, contains non-alphanumeric characters")
fn = "chip/6502.json"
if chip_line is not None:
fn = "chip/{}-6502.json".format(chip_line)
with open(fn) as f:
chip_data = f.read()
computer = Computer.build_from_json(chip_data)
return computer
return None
# module scope means run once per test module
@pytest.fixture(scope="module")
def setup():
computer = tests.computer.computer.init_computer()
yield computer
def test_cpu_instruction_sbc_binary_subtract(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x03)
# inverted carry flag
computer.cpu.flags["C"].set()
# The value
computer.memory.write(0x00, 0x02)
i1_opcode = Opcode(0xE9, ImmediateAddressingMode())
i1 = SBC("SBC", i1_opcode, "Subtract Memory from Accumulator with Borrow")
tests.computer.computer.execute_explicit_instruction(
computer,
i1_opcode,
i1,
[("A", 0x01)],
[("C", True), ("Z", False), ("V", False), ("N", False)],
[],
)
def test_cpu_instruction_sbc_binary_subtract_negative_result(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x01)
computer.cpu.flags["C"].set()
# The value
computer.memory.write(0x00, 0x02)
i1_opcode = Opcode(0xE9, ImmediateAddressingMode())
i1 = SBC("SBC", i1_opcode, "Subtract Memory from Accumulator with Borrow")
tests.computer.computer.execute_explicit_instruction(
computer,
i1_opcode,
i1,
[("A", 0xFF)],
[("C", False), ("Z", False), ("V", False), ("N", True)],
[],
)
assert i1.result == 255
def test_cpu_instruction_sbc_binary_subtract_with_borrow(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x04)
computer.cpu.flags["D"].clear()
computer.cpu.flags["C"].clear()
# The value
computer.memory.write(0x00, 0x02)
i1_opcode = Opcode(0xE9, ImmediateAddressingMode())
i1 = SBC("SBC", i1_opcode, "Subtract Memory from Accumulator with Borrow")
tests.computer.computer.execute_explicit_instruction(
computer,
i1_opcode,
i1,
[("A", 0x01)],
[("C", True), ("Z", False), ("V", False), ("N", False)],
[],
)
assert i1.result == 0x101
def test_cpu_instruction_sbc_decimal_subtract(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
# decimal 44
computer.cpu.registers["A"].set(0b01000100)
computer.cpu.flags["D"].set()
computer.cpu.flags["C"].set()
# The value, decimal 29
computer.memory.write(0x00, 0b00101001)
i1_opcode = Opcode(0xE9, ImmediateAddressingMode())
i1 = SBC("SBC", i1_opcode, "Subtract Memory from Accumulator with Borrow")
tests.computer.computer.execute_explicit_instruction(
computer,
i1_opcode,
i1,
[("A", 0b00010101)],
[("C", True), ("Z", False), ("V", False), ("N", False)],
[],
)
def test_cpu_instruction_sbc_decimal_subtract_with_previous_carry(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
# decimal 29
computer.cpu.registers["A"].set(0b00101001)
computer.cpu.flags["D"].set()
computer.cpu.flags["C"].set()
# The value, decimal 44
computer.memory.write(0x00, 0b01000100)
i1_opcode = Opcode(0xE9, ImmediateAddressingMode())
i1 = SBC("SBC", i1_opcode, "Subtract Memory from Accumulator with Borrow")
tests.computer.computer.execute_explicit_instruction(
computer,
i1_opcode,
i1,
[("A", 0x85)], # 0b00010101)],
[("C", False), ("Z", False), ("V", False), ("N", True)],
# [("C", True), ("Z", False), ("V", False), ("N", False)],
[],
)
```
#### File: cpu/instruction/test_bitey_cpu_instruction_ta.py
```python
import pytest
import tests.computer.computer
import tests.memory.memory
# TODO Maybe refactor so these are not needed
from bitey.cpu.addressing_mode import ImpliedAddressingMode
from bitey.cpu.instruction.instruction import IncompleteInstruction
from bitey.cpu.instruction.opcode import Opcode
from bitey.cpu.instruction.ta import TAX
from bitey.cpu.instruction.ta import TAY
from bitey.cpu.instruction.ta import TXA
from bitey.cpu.instruction.ta import TYA
# module scope means run once per test module
@pytest.fixture(scope="module")
def setup():
computer = tests.computer.computer.init_computer()
yield computer
def execute_instruction(
computer, opcode, instruction, expected_registers, expected_z_flag, expected_n_flag
):
"Execute the instruction based on an opcode"
flags = computer.cpu.flags
try:
instruction.execute(computer.cpu, computer.memory)
for register, value in expected_registers:
assert computer.cpu.registers[register].get() == value
assert flags["Z"].status is expected_z_flag
assert flags["N"].status is expected_n_flag
except IncompleteInstruction:
assert False
def test_cpu_instruction_tax(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x7A)
i1_opcode = Opcode(0xAA, ImpliedAddressingMode())
i1 = TAX("TAX", i1_opcode, "Transfer Accumulator to Index X")
execute_instruction(computer, i1_opcode, i1, [("X", 0x7A)], False, False)
def test_cpu_instruction_tax_negative_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0xBC)
i1_opcode = Opcode(0xAA, ImpliedAddressingMode())
i1 = TAX("TAX", i1_opcode, "Transfer Accumulator to Index X")
execute_instruction(computer, i1_opcode, i1, [("X", 0xBC)], False, True)
def test_cpu_instruction_tax_zero_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x00)
computer.cpu.registers["X"].set(0xBC)
i1_opcode = Opcode(0xAA, ImpliedAddressingMode())
i1 = TAX("TAX", i1_opcode, "Transfer Accumulator to Index X")
execute_instruction(computer, i1_opcode, i1, [("X", 0x00)], True, False)
def test_cpu_instruction_tay(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x7A)
i1_opcode = Opcode(0xA8, ImpliedAddressingMode())
i1 = TAY("TAY", i1_opcode, "Transfer Accumulator to Index Y")
execute_instruction(computer, i1_opcode, i1, [("Y", 0x7A)], False, False)
def test_cpu_instruction_tay_negative_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0xFA)
i1_opcode = Opcode(0xA8, ImpliedAddressingMode())
i1 = TAY("TAY", i1_opcode, "Transfer Accumulator to Index Y")
execute_instruction(computer, i1_opcode, i1, [("Y", 0xFA)], False, True)
def test_cpu_instruction_tay_zero_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["A"].set(0x00)
computer.cpu.registers["Y"].set(0xFA)
i1_opcode = Opcode(0xA8, ImpliedAddressingMode())
i1 = TAY("TAY", i1_opcode, "Transfer Accumulator to Index Y")
execute_instruction(computer, i1_opcode, i1, [("Y", 0x00)], True, False)
def test_cpu_instruction_txa(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["X"].set(0x0B)
i1_opcode = Opcode(0x8A, ImpliedAddressingMode())
i1 = TXA("TXA", i1_opcode, "Transfer Index X to Accumulator")
execute_instruction(computer, i1_opcode, i1, [("A", 0x0B)], False, False)
def test_cpu_instruction_txa_negative_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["X"].set(0x8B)
i1_opcode = Opcode(0x8A, ImpliedAddressingMode())
i1 = TXA("TXA", i1_opcode, "Transfer Index X to Accumulator")
execute_instruction(computer, i1_opcode, i1, [("A", 0x8B)], False, True)
def test_cpu_instruction_txa_zero_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["X"].set(0x00)
computer.cpu.registers["A"].set(0x8B)
i1_opcode = Opcode(0x8A, ImpliedAddressingMode())
i1 = TXA("TXA", i1_opcode, "Transfer Index X to Accumulator")
execute_instruction(computer, i1_opcode, i1, [("A", 0x00)], True, False)
def test_cpu_instruction_tya(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["Y"].set(0x0B)
i1_opcode = Opcode(0x98, ImpliedAddressingMode())
i1 = TYA("TYA", i1_opcode, "Transfer Index Y to Accumulator")
execute_instruction(computer, i1_opcode, i1, [("A", 0x0B)], False, False)
def test_cpu_instruction_tya_negative_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["Y"].set(0xBB)
i1_opcode = Opcode(0x98, ImpliedAddressingMode())
i1 = TYA("TYA", i1_opcode, "Transfer Index Y to Accumulator")
execute_instruction(computer, i1_opcode, i1, [("A", 0xBB)], False, True)
def test_cpu_instruction_tya_zero_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["Y"].set(0x00)
computer.cpu.registers["A"].set(0xBB)
i1_opcode = Opcode(0x98, ImpliedAddressingMode())
i1 = TYA("TYA", i1_opcode, "Transfer Index Y to Accumulator")
execute_instruction(computer, i1_opcode, i1, [("A", 0x00)], True, False)
```
#### File: cpu/instruction/test_bitey_cpu_instruction_tsx.py
```python
import pytest
# TODO Maybe refactor so these are not needed
from bitey.cpu.addressing_mode import ImpliedAddressingMode
import tests.computer.computer
import tests.memory.memory
from bitey.cpu.instruction.instruction import IncompleteInstruction
from bitey.cpu.instruction.opcode import Opcode
from bitey.cpu.instruction.tsx import TSX
# module scope means run once per test module
@pytest.fixture(scope="module")
def setup():
computer = tests.computer.computer.init_computer()
yield computer
def execute_instruction(
computer, opcode, expected_x_register, expected_z_flag, expected_n_flag
):
"Execute the instruction based on an opcode"
flags = computer.cpu.flags
i1 = TSX("TSX", opcode, "Transfer Stack Pointer to Index X")
try:
i1.execute(computer.cpu, computer.memory)
assert computer.cpu.registers["X"].get() == expected_x_register
assert flags["Z"].status is expected_z_flag
assert flags["N"].status is expected_n_flag
except IncompleteInstruction:
assert False
def test_cpu_instruction_tsx(setup):
computer = setup
computer.reset()
computer.cpu.registers["S"].set(0x34)
i1_opcode = Opcode(0xBA, ImpliedAddressingMode())
execute_instruction(computer, i1_opcode, 0x34, False, False)
def test_cpu_instruction_tsx_n_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["S"].set(0xB4)
i1_opcode = Opcode(0xBA, ImpliedAddressingMode())
execute_instruction(computer, i1_opcode, 0xB4, False, True)
def test_cpu_instruction_tsx_z_flag(setup):
computer = setup
computer.reset()
computer.cpu.registers["S"].set(0x00)
i1_opcode = Opcode(0xBA, ImpliedAddressingMode())
execute_instruction(computer, i1_opcode, 0x00, True, False)
def test_cpu_instruction_tsx_memory(setup):
computer = setup
computer.reset()
# The TSX instruction
computer.memory.write(0x00, 0xBA)
computer.cpu.registers["PC"].set(0x00)
computer.cpu.registers["S"].set(0x34)
computer.cpu.step(computer.memory)
# Check registers are correct
assert computer.cpu.registers["X"].get() == 0x34
# Check flags are correct
assert computer.cpu.flags["N"].status is False
assert computer.cpu.flags["Z"].status is False
```
#### File: tests/cpu/test_bitey_cpu_arch.py
```python
from bitey.cpu.arch import EightBitArch, IntegerValueError
def test_cpu_cpu_signed_int_to_twos_complement():
"Test conversion from signed int to single-byte twos complement works"
assert EightBitArch.signed_int_to_twos_complement(0) == 0x00
assert EightBitArch.signed_int_to_twos_complement(1) == 0x01
assert EightBitArch.signed_int_to_twos_complement(2) == 0x02
assert EightBitArch.signed_int_to_twos_complement(127) == 0x7F
try:
EightBitArch.signed_int_to_twos_complement(128)
assert False
except IntegerValueError:
assert True
assert EightBitArch.signed_int_to_twos_complement(-1) == 0xFF
assert EightBitArch.signed_int_to_twos_complement(-2) == 0xFE
assert EightBitArch.signed_int_to_twos_complement(-128) == 0x80
try:
EightBitArch.signed_int_to_twos_complement(-129)
assert False
except IntegerValueError:
assert True
def test_cpu_cpu_twos_complement_to_signed_int():
"Test conversion from single-byte twos complmented to signed int works"
assert EightBitArch.twos_complement_to_signed_int(0x00) == 0
assert EightBitArch.twos_complement_to_signed_int(0x01) == 1
assert EightBitArch.twos_complement_to_signed_int(0x02) == 2
assert EightBitArch.twos_complement_to_signed_int(0x7F) == 127
try:
EightBitArch.twos_complement_to_signed_int(-1)
except IntegerValueError:
assert True
assert EightBitArch.twos_complement_to_signed_int(0xFF) == -1
assert EightBitArch.twos_complement_to_signed_int(0xFE) == -2
assert EightBitArch.twos_complement_to_signed_int(0x80) == -128
try:
EightBitArch.twos_complement_to_signed_int(0x100)
except IntegerValueError:
assert True
```
#### File: tests/memory/memory.py
```python
def init_memory(memory, init_list):
"""
Setup memory for tests
The first argument is the Memory
The second argument is a list of 2-tuples
Each 2-tuple contains an address what value should be stored there
"""
for item in init_list:
memory.write(item[0], item[1])
```
#### File: bitey/tests/test_bitey_cpu_instruction_opcode.py
```python
from bitey.cpu.addressing_mode import ImpliedAddressingMode
from bitey.cpu.instruction.opcode import (
Opcode,
OpcodeJSONDecoder,
OpcodesJSONDecoder,
)
def test_cpu_instruction_opcode_init():
opcode = Opcode(0, ImpliedAddressingMode())
assert opcode.opcode == 0
assert opcode.addressing_mode == ImpliedAddressingMode()
def test_cpu_instruction_opcode_json_decoder():
json_string = '{ "opcode": 154, "addressing_mode": "implied" }'
opcode_decoder = OpcodeJSONDecoder()
opcode = opcode_decoder.decode(json_string)
assert opcode.opcode == 154
assert opcode.addressing_mode == ImpliedAddressingMode()
def test_cpu_instruction_opcodes_json_decoder():
json_string = '[ { "opcode": 154, "addressing_mode": "implied" } ]'
opcodes_decoder = OpcodesJSONDecoder()
opcodes = opcodes_decoder.decode(json_string)
assert opcodes is not None
assert len(opcodes.opcodes) == 1
assert 154 in opcodes
assert opcodes[154].opcode == 154
assert opcodes[154].addressing_mode == ImpliedAddressingMode()
```
#### File: bitey/tests/test_bitey_cpu_instruction.py
```python
import pytest
from bitey.cpu.addressing_mode import (
AbsoluteAddressingMode,
AbsoluteXAddressingMode,
AbsoluteYAddressingMode,
AccumulatorAddressingMode,
ImmediateAddressingMode,
ImpliedAddressingMode,
IndexedIndirectAddressingMode,
IndirectIndexedAddressingMode,
RelativeAddressingMode,
ZeroPageAddressingMode,
ZeroPageXAddressingMode,
ZeroPageYAddressingMode,
)
from bitey.computer.computer import Computer
from bitey.cpu.cpu import CPU
from bitey.cpu.instruction.instruction import (
Instruction,
InstructionClass,
Instructions,
InstructionSet,
)
from bitey.cpu.instruction.opcode import Opcode, Opcodes
from bitey.cpu.instruction.cli import CLI
from bitey.cpu.instruction.ld import LDA
from bitey.cpu.instruction.sei import SEI
def test_cpu_instruction_init():
opcode = Opcode(173, AbsoluteAddressingMode())
i = Instruction("LDA", opcode, "Load Accumulator with Memory")
assert i.name == "LDA"
assert i.opcode == opcode
assert i.description == "Load Accumulator with Memory"
def test_cpu_instruction_init_no_type_checking():
"We're not doing strict type checking yet, so this should pass"
opcode = Opcode(173, AbsoluteAddressingMode())
i = Instruction("LDA", opcode, "Load Accumulator with Memory")
assert i.name == "LDA"
assert i.opcode == opcode
assert i.description == "Load Accumulator with Memory"
def test_cpu_instructions_init():
i1_opcode = Opcode(173, AbsoluteAddressingMode())
i1 = Instruction("LDA", i1_opcode, "Load Accumulator with Memory")
i2_opcode = Opcode(141, AbsoluteAddressingMode())
i2 = Instruction("STA", i2_opcode, "Store Accumulator in Memory")
instructions = Instructions([i1, i2])
assert len(instructions.instructions) == 2
lda = instructions.get_by_opcode(173)
assert lda == i1
def test_cpu_instruction_class_init():
i1_opcode_1 = Opcode(173, AbsoluteAddressingMode())
i1_opcode_2 = Opcode(165, ZeroPageAddressingMode())
i1 = Instruction("LDA", i1_opcode_1, "Load Accumulator with Memory")
opcodes = Opcodes([i1_opcode_1, i1_opcode_2])
instruction_class = InstructionClass(
"LDA", i1, opcodes, "Load Accumulator with Memory"
)
assert instruction_class.name == "LDA"
assert len(instruction_class.opcodes) == 2
assert instruction_class.description == "Load Accumulator with Memory"
def test_cpu_instruction_set_init():
i1_opcode_1 = Opcode(173, AbsoluteAddressingMode())
i1_opcode_2 = Opcode(165, ZeroPageAddressingMode())
i1 = Instruction("LDA", i1_opcode_1, "Load Accumulator with Memory")
opcodes = Opcodes([i1_opcode_1, i1_opcode_2])
instruction_class = InstructionClass(
"LDA", i1, opcodes, "Load Accumulator with Memory"
)
instruction_set = InstructionSet([instruction_class])
assert instruction_set.instructions[0].name == "LDA"
assert len(instruction_set.instructions[0].opcodes) == 2
assert instruction_set.instructions[0].description == "Load Accumulator with Memory"
def test_cpu_instruction_short_str():
opcode = Opcode(0x58, ImpliedAddressingMode())
cli = CLI("CLI", opcode, "Clear Interrupt Disable")
assert cli.short_str() == "CLI"
def test_cpu_instruction_assembly_str(setup):
computer = setup
computer.reset()
# Set the PC
computer.cpu.registers["PC"].set(0x00)
# Set the X register
computer.cpu.registers["X"].set(0x4A)
# Set the Y register
computer.cpu.registers["Y"].set(0xEC)
# Implied mode CLI instruction
computer.memory.write(0x00, 0x58)
opcode = Opcode(0x58, ImpliedAddressingMode())
cli = CLI("CLI", opcode, "Clear Interrupt Disable")
assert cli.assembly_str(computer) == "CLI"
# Immediate mode LDA
computer.memory.write(0x01, 0xA9)
opcode = Opcode(0xA9, ImmediateAddressingMode())
lda = LDA("LDA", opcode, "Load Accumulator with Memory")
computer.cpu.registers["PC"].inc()
assert lda.assembly_str(computer) == "LDA #$a9"
# ZeroPage mode LDA
computer.memory.write(0x02, 0x99)
opcode = Opcode(0xA5, ZeroPageAddressingMode())
lda = LDA("LDA", opcode, "Load Accumulator with Memory")
computer.cpu.registers["PC"].set(0x02)
assert lda.assembly_str(computer) == "LDA $99"
# Absolute addressing mode LDA
computer.memory.write(0x03, 0x5C)
computer.memory.write(0x04, 0xB4)
opcode = Opcode(0xAD, AbsoluteAddressingMode())
lda = LDA("LDA", opcode, "Load Accumulator with Memory")
computer.cpu.registers["PC"].set(0x03)
assert lda.assembly_str(computer) == "LDA $b45c"
# Accumulator addressing mode
opcode = Opcode(0x0A, AccumulatorAddressingMode())
asl = Instruction("ASL", opcode, "Shift Left One Bit (Memory or Accumulator")
assert asl.assembly_str(computer) == "ASL"
# AbsoluteX addressing mode
computer.memory.write(0x05, 0x0F)
computer.memory.write(0x06, 0xF7)
opcode = Opcode(0xBD, AbsoluteXAddressingMode())
lda = LDA("LDA", opcode, "Load Accumulator with Memory")
computer.cpu.registers["PC"].set(0x05)
assert lda.assembly_str(computer) == "LDA $f70f,X"
# AbsoluteY addressing mode
computer.memory.write(0x07, 0x13)
computer.memory.write(0x08, 0x16)
opcode = Opcode(0xB9, AbsoluteYAddressingMode())
lda = LDA("LDA", opcode, "Load Accumulator with Memory")
assert lda.assembly_str(computer) == "LDA $1613,Y"
# IndexedIndirect addressing mode
computer.memory.write(0x09, 0x30)
opcode = Opcode(0xA1, IndexedIndirectAddressingMode())
lda = LDA("LDA", opcode, "Load Accumulator with Memory")
assert lda.assembly_str(computer) == "LDA ($30,X)"
# IndirectIndexed addressing mode
computer.memory.write(0x0A, 0x4C)
opcode = Opcode(0xB1, IndirectIndexedAddressingMode())
lda = LDA("LDA", opcode, "Load Accumulator with Memory")
assert lda.assembly_str(computer) == "LDA ($4c),Y"
# ZeroPageX addressing mode
computer.memory.write(0x0B, 0xAC)
opcode = Opcode(0xB5, ZeroPageXAddressingMode())
lda = LDA("LDA", opcode, "Load Accumulator with Memory")
assert lda.assembly_str(computer) == "LDA $ac,X"
# ZeroPageY addressing mode
computer.memory.write(0x0C, 0xEC)
opcode = Opcode(0xB1, ZeroPageYAddressingMode())
lda = Instruction("LDX", opcode, "Load Index X with Memory")
assert lda.assembly_str(computer) == "LDX $ec,Y"
# Relative addressing mode
computer.memory.write(0x0D, 0xA3)
opcode = Opcode(0xB0, RelativeAddressingMode())
lda = Instruction("BCS", opcode, "Branch on Carry Set")
assert lda.assembly_str(computer) == "BCS $ffb1"
def read_flags():
with open("chip/6502.json") as f:
chip_data = f.read()
cpu = CPU.build_from_json(chip_data)
cpu.flags.data = 0
return cpu.flags
return None
def build_computer():
computer = None
with open("chip/6502.json") as f:
chip_data = f.read()
computer = Computer.build_from_json(chip_data)
return computer
return None
# module scope means run once per test module
@pytest.fixture(scope="module")
def setup():
computer = build_computer()
yield computer
def test_cpu_instruction_cli(setup):
computer = setup
computer.reset()
flags = computer.cpu.flags
assert flags["I"].status == 0
assert flags["I"].flags == flags
assert flags.data is not None
flags["I"].set()
assert flags["I"].status is True
i1_opcode = Opcode(88, ImpliedAddressingMode())
i1 = CLI("CLI", i1_opcode, "Clear Interrupt Disable")
i1.execute(computer.cpu, computer.memory)
assert flags["I"].status is False
def test_cpu_instruction_sei(setup):
computer = setup
computer.reset()
flags = computer.cpu.flags
assert flags["I"].status is False
i1_opcode = Opcode(120, ImpliedAddressingMode())
i1 = SEI("SEI", i1_opcode, "Set Interrupt Disable")
i1.execute(computer.cpu, computer.memory)
assert flags["I"].status is True
```
|
{
"source": "jgerrish/tapestry",
"score": 4
}
|
#### File: tapestry/tapestry/screen.py
```python
import curses
import re
class NewlineError(Exception):
"""
Raise an exception if a newline is in a string
This is due to an old bug:
A bug in ncurses, the backend for this Python module, can cause
SegFaults when resizing windows. This is fixed in
ncurses-6.1-20190511. If you are stuck with an earlier ncurses,
you can avoid triggering this if you do not call addstr() with a
str that has embedded newlines. Instead, call addstr() separately
for each line.
A lot of old systems won't have updated curses libraries
"""
pass
class Screen:
"""
Screen acts as a wrapper and RAII object
Pass your main TUI event loop into the initializer
When your event loop exits, the Screen will be cleaned up
"""
def __init__(self, tui_event_loop):
"""
Initialize the Screen object
Pass in your TUI event loop
When the event loop exits, the screen will be cleaned up
"""
self.nl_search = re.compile("\n")
curses.wrapper(self.run, tui_event_loop)
def run(self, stdscr, tui_event_loop):
"Run the event loop with the Screen object as the main parameter"
self.stdscr = stdscr
return tui_event_loop(self)
def refresh(self):
"Refresh the window, drawing any objects to screen"
return self.stdscr.refresh()
def num_cols(self):
"Number of columns in the screen"
return curses.COLS
def num_lines(self):
"Number of lines in the screen"
return curses.LINES
def addstr(self, *args):
"""
Add a string to the screen
This method takes one or three arguments:
addstr(str): Adds a string to the current cursor location
addstr(x, y, str): Adds a string to the location (x, y)
"""
if len(args) == 1:
return self.stdscr.addstr(args[0])
elif len(args) == 3:
return self.stdscr.addstr(args[1], args[0], args[2])
else:
raise Exception
def instr(self, *args):
"""
Get a string at position (x_location, y_location)
This expects zero, one, two or three arguments:
instr(): get the string at the current cursor location
instr(n): get a string of length n at the current cursor location
instr(x, y): get a string at location (x, y)
instr(x, y, n): get a string of length n at location (x, y)
"""
if len(args) == 0:
return self.stdscr.instr()
elif len(args) == 1:
return self.stdscr.instr(args[0])
elif len(args) == 2:
return self.stdscr.instr(args[1], args[0])
elif len(args) == 3:
return self.stdscr.instr(args[1], args[0], args[2])
else:
raise Exception
```
#### File: tapestry/tests/test_screen.py
```python
from contextlib import contextmanager
import curses
import os
import sys
import tempfile
from test.support import (verbose, SaveSignals)
from tapestry.screen import Screen
class TestCurses:
"""
A class that tests the curses library This sets the standard
output stream to go to a temporary file, so that test output
isn't corrupted.
Based on test_curses.py in CPython
"""
@classmethod
def setUpClass(cls):
term = os.environ.get('TERM')
if verbose:
print(f'TERM={term}', file=sys.stderr, flush=True)
# testing setupterm() inside initscr/endwin
# causes terminal breakage
stdout_fd = sys.__stdout__.fileno()
curses.setupterm(fd=stdout_fd)
def setUp(self):
term = os.environ.get('TERM')
if verbose:
print(f'TERM={term}', file=sys.stderr, flush=True)
if (term is None) or (term == "None"):
return
# TODO: re-introduce more condtional setup
self.stack = []
self.isatty = True
self.output = sys.__stdout__
stdout_fd = sys.__stdout__.fileno()
stdin_fd = sys.__stdin__.fileno()
self.stdout_fd = stdout_fd
self.stdin_fd = stdin_fd
# initstr() unconditionally uses C stdout.
# If it is redirected to file or pipe, try to attach it
# to terminal.
# First, save a copy of the file descriptor of stdout, so it
# can be restored after finishing the test.
stdout_dup_fd = os.dup(stdout_fd)
self.stdout_dup_fd = stdout_dup_fd
self.addCustomCleanup(os.close, stdout_dup_fd)
self.addCustomCleanup(os.dup2, stdout_dup_fd, stdout_fd)
# duplicate stdin
stdin_dup_fd = os.dup(stdin_fd)
self.stdin_dup_fd = stdin_dup_fd
self.addCustomCleanup(os.close, stdin_dup_fd)
self.addCustomCleanup(os.dup2, stdin_dup_fd, stdin_fd)
# if not sys.__stdout__.isatty():
tmp = tempfile.TemporaryFile(mode='wb', buffering=0)
self.tmp = tmp
self.isatty = False
self.addCustomCleanup(tmp.close)
self.output = None
# swap out stdout for the temp file
os.dup2(tmp.fileno(), stdout_fd)
self.save_signals = SaveSignals()
self.save_signals.save()
self.addCustomCleanup(self.save_signals.restore)
if verbose and self.output is not None:
# just to make the test output a little more readable
sys.stderr.flush()
sys.stdout.flush()
print(file=self.output, flush=True)
self.stdscr = curses.initscr()
try:
curses.savetty()
self.addCustomCleanup(curses.endwin)
self.addCustomCleanup(curses.resetty)
except Exception as e:
sys.stderr.write("Caught exception: {}\n".format(e))
self.stdscr.erase()
def addCustomCleanup(self, func, *args):
try:
self.stack.append((func, *args))
except Exception as e:
sys.stderr.write("Couldn't add cleanup function: {}\n".format(e))
def cleanUp(self):
try:
for i in range(len(self.stack) - 1, -1, -1):
(func, *args) = self.stack[i]
func(*args)
except Exception as e:
sys.stderr.write("Caught exception in cleanUp: {}\n".format(e))
# This is a context manager for running curses tests
# It wraps the test in a try except finally block that automatically cleans
# up and restores stdin / stdout
@contextmanager
def curses_test(self, mocker):
if not sys.__stdout__.isatty():
mocker.patch('curses.cbreak')
mocker.patch('curses.nocbreak')
mocker.patch('curses.endwin')
term = os.environ.get('TERM')
# This is for headless environments like GitHub Actions use, we'll have
# to create some other kind of fake "framebuffer"
# curses probably provides a test for this, we'll use this for now
if (term == "unknown") or (term == "dumb") or (term == "None") \
or term is None:
mocker.patch('curses.setupterm')
# This may not be the proper pattern for context managers,
# but it works
# Pressure is constraining me from working on this
yield False
else:
self.setUp()
try:
yield True
except Exception as e:
sys.stderr.write(
"Caught exception in curses_test: {}\n".format(e))
self.cleanUp()
raise e
finally:
self.cleanUp()
def test_create_windows(self, mocker):
"Test creating base curses windows"
with self.curses_test(mocker) as valid_term:
if valid_term:
win = curses.newwin(5, 10)
assert win.getbegyx() == (0, 0)
assert win.getparyx() == (-1, -1)
assert win.getmaxyx() == (5, 10)
def test_screen_addstr(self, mocker):
"Test adding a string to the screen"
def event_loop(screen):
screen.addstr(0, 0, "test")
assert screen.instr(0, 0, 4) == b"test"
with self.curses_test(mocker) as valid_term:
if valid_term:
Screen(event_loop)
```
|
{
"source": "jgerschler/ESL-Games",
"score": 3
}
|
#### File: ESL-Games/Camera Pistol/professions.py
```python
from pygame.locals import *
import imutils
import cv2
import pygame
import random
import sys
class PistolGame(object):
FONT_SIZE = 32# font size for words
SCORE_SIZE = 32# font size for score
GAME_TIME = 60# number of seconds to complete the game
BLACK = (0,0,0)
WHITE = (255,255,255)
RED = (255,0,0)
BLUE = (0,162,232)
def __init__(self):
pygame.init()
pygame.mixer.init()
self.sound_shot = pygame.mixer.Sound('audio\\shot.ogg')
self.sound_wrong_shot = pygame.mixer.Sound('audio\\scream.ogg')
self.sound_miss = pygame.mixer.Sound('audio\\ricochet.ogg')
self.image_shot = pygame.image.load('images\\bang.png')
## self.object_lower = (89, 230, 230)# HSV color range for object to be tracked
## self.object_upper = (108, 255, 255)
self.object_lower = (94, 126, 129)# HSV color range for object to be tracked
self.object_upper = (131, 255, 255)
self.translations = {'electrician':'electricista','gardener':'jardinero','mechanic':'mecanico','bus driver':'chofer de autobus',
'hairdresser':'estilista','construction worker':'obrero','mason':'albañil','pump attendant':'despachador',
'garbage man':'recogedor de basura','mailman':'cartero','architect':'arquitecto','cook':'cocinero',
'photographer':'fotografo','butcher':'carnicero','waiter':'mesero','waitress':'mesera','dentist':'dentista',
'baker':'panadero','cleaning lady':'limpiadora','salesman':'vendedor','actress':'actriz','artist':'artista',
'doctor':'doctor','engineer':'ingeniero','fireman':'bombero','housewife':'ama de casa','nurse':'enfermera',
'pilot':'piloto','policeman':'policia','cashier':'cajera','painter':'pintor','secretary':'secretaria',
'singer':'cantante','veterinarian':'veterinario','soldier':'soldado','carpenter':'carpintero',
'businessman':'hombre de negocios','detective':'detective','plumber':'plomero','surgeon':'cirujano',
'writer':'escritor','farmer':'campesino','spy':'espia','lawyer':'abogado','judge':'juez','journalist':'periodista',
'teacher':'maestro','delivery boy':'repartidor'}
self.professions = ['electrician', 'gardener', 'mechanic', 'bus driver', 'hairdresser', 'construction worker', 'mason',
'pump attendant', 'garbage man', 'mailman', 'architect', 'cook', 'photographer', 'butcher', 'waiter',
'waitress', 'dentist', 'baker', 'cleaning lady', 'salesman', 'actress', 'artist', 'doctor', 'engineer',
'fireman', 'housewife', 'nurse', 'pilot', 'policeman', 'cashier', 'painter', 'secretary', 'singer',
'veterinarian', 'soldier', 'carpenter', 'businessman', 'detective', 'plumber', 'surgeon', 'writer',
'farmer', 'spy', 'lawyer', 'judge', 'journalist', 'teacher', 'delivery boy']
self.finished = False
self.game_display = pygame.display.set_mode((0,0), pygame.FULLSCREEN)
pygame.display.set_caption('Pistolero Professions Game')
self.display_width, self.display_height = pygame.display.get_surface().get_size()
self.game_display.fill(PistolGame.WHITE)
pygame.display.update()
def text_objects(self, text, font):
text_surface = font.render(text, True, PistolGame.BLACK)
return text_surface, text_surface.get_rect()
# these message display functions need to be combined!
def message_display(self, text, loc_tuple, loc_int, score_flag):# loc_int: 1 center, 2 top left, 3 bottom left, 4 bottom right, 5 top right. score_flag; 0 default font size, 1 score font size
text_surf, text_rect = self.text_objects(text, pygame.font.Font(None, PistolGame.FONT_SIZE))# improve this section
if score_flag == 1:
text_surf, text_rect = self.text_objects(text, pygame.font.Font(None, PistolGame.SCORE_SIZE))
loc_dict = {1:'text_rect.center', 2:'text_rect.topleft', 3:'text_rect.bottomleft', 4:'text_rect.bottomright', 5:'text_rect.topright'}
exec(loc_dict[loc_int] + ' = loc_tuple')
self.game_display.blit(text_surf, text_rect)
return text_rect
def new_round(self):
self.selected_profession = random.choice(list(self.translations.keys()))
self.professions_list = random.sample(self.professions, 3)
while self.selected_profession in self.professions_list:
self.professions_list = random.sample(self.professions, 3)
self.professions_list.append(self.selected_profession)
random.shuffle(self.professions_list)
def end_game(self):
self.game_display.fill(PistolGame.WHITE)
self.message_display("GAME OVER", (self.display_width/2, self.display_height/2), 1, 0)
self.message_display("SCORE: {0}".format(str(self.score)), (self.display_width/2, self.display_height/3), 1, 0)
pygame.display.update()
pygame.time.delay(3000)
self.finished = True
self.camera.release()
cv2.destroyAllWindows()
pygame.quit()
sys.exit()
def run(self):
self.camera = cv2.VideoCapture(1)# change 1 to 0 if you only have one camera
self.selected_profession = random.choice(list(self.translations.keys()))
self.professions_list = random.sample(self.professions, 3)
while self.selected_profession in self.professions_list:
self.professions_list = random.sample(self.professions, 3)
self.professions_list.append(self.selected_profession)
random.shuffle(self.professions_list)
self.score = 0
int_x, int_y = 0, 0
start_ticks = pygame.time.get_ticks()
while not self.finished:
seconds = (pygame.time.get_ticks() - start_ticks)/1000
if PistolGame.GAME_TIME - seconds <= 0:
self.end_game()
(grabbed, frame) = self.camera.read()
frame = imutils.resize(frame, width=self.display_width)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, self.object_lower, self.object_upper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if len(cnts) > 0:
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
int_x, int_y = self.display_width - int(x), self.display_height - int(y)
# if radius > 10:# left here for troubleshooting purposes
# cv2.circle(frame, (int_x, int_y), int(radius), (0, 255, 255), 2)
# cv2.imshow("Frame", frame)
self.game_display.fill(PistolGame.WHITE)
rect0 = self.message_display(self.professions_list[0], (100, 100), 2, 0)
rect1 = self.message_display(self.professions_list[1], (100, self.display_height - 100), 3, 0)
rect2 = self.message_display(self.professions_list[2], (self.display_width - 100, 100), 5, 0)
rect3 = self.message_display(self.professions_list[3], (self.display_width - 100, self.display_height - 100), 4, 0)
react_score = self.message_display("{0} {1}".format(str(self.score), int(PistolGame.GAME_TIME - seconds)),
(int(self.display_width/2), int(self.display_height - 50)), 1, 1)
base_verb = self.message_display(self.translations[self.selected_profession], (self.display_width/2, 50), 1, 1)
pygame.draw.circle(self.game_display, PistolGame.BLUE, (int(self.display_width/2), int(self.display_height/2)), 40)# change tracking circle radius as necessary
if rect0.collidepoint(int_x, int_y) or rect1.collidepoint(int_x, int_y) or rect2.collidepoint(int_x, int_y) or rect3.collidepoint(int_x, int_y):
pygame.draw.circle(self.game_display, PistolGame.RED,(int_x, int_y), 10)
else:
pygame.draw.circle(self.game_display, PistolGame.BLACK,(int_x, int_y), 10)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.finished = True
if event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
self.finished = True
if event.type == pygame.KEYUP and event.key == pygame.K_a:
# update to use dictionary here?
if (rect0.collidepoint(int_x, int_y) and self.professions_list[0] == self.selected_profession):
self.sound_shot.play()
self.score+=1
self.game_display.blit(self.image_shot, (rect0.center[0]-self.image_shot.get_width()/2, rect0.center[1]-self.image_shot.get_height()/2))
pygame.display.update()
pygame.time.delay(300)
elif (rect1.collidepoint(int_x, int_y) and self.professions_list[1] == self.selected_profession):
self.sound_shot.play()
self.score+=1
self.game_display.blit(self.image_shot, (rect1.center[0]-self.image_shot.get_width()/2, rect1.center[1]-self.image_shot.get_height()/2))
pygame.display.update()
pygame.time.delay(300)
elif (rect2.collidepoint(int_x, int_y) and self.professions_list[2] == self.selected_profession):
self.sound_shot.play()
self.score+=1
self.game_display.blit(self.image_shot, (rect2.center[0]-self.image_shot.get_width()/2, rect2.center[1]-self.image_shot.get_height()/2))
pygame.display.update()
pygame.time.delay(300)
elif (rect3.collidepoint(int_x, int_y) and self.professions_list[3] == self.selected_profession):
self.sound_shot.play()
self.score+=1
self.game_display.blit(self.image_shot, (rect3.center[0]-self.image_shot.get_width()/2, rect3.center[1]-self.image_shot.get_height()/2))
pygame.display.update()
pygame.time.delay(300)
elif (rect0.collidepoint(int_x, int_y) and self.professions_list[0] != self.selected_profession):
self.sound_wrong_shot.play()
self.score-=1
self.game_display.blit(self.image_shot, (rect0.center[0]-self.image_shot.get_width()/2, rect0.center[1]-self.image_shot.get_height()/2))
pygame.display.update()
pygame.time.delay(300)
elif (rect1.collidepoint(int_x, int_y) and self.professions_list[1] != self.selected_profession):
self.sound_wrong_shot.play()
self.score-=1
self.game_display.blit(self.image_shot, (rect1.center[0]-self.image_shot.get_width()/2, rect1.center[1]-self.image_shot.get_height()/2))
pygame.display.update()
pygame.time.delay(300)
elif (rect2.collidepoint(int_x, int_y) and self.professions_list[2] != self.selected_profession):
self.sound_wrong_shot.play()
self.score-=1
self.game_display.blit(self.image_shot, (rect2.center[0]-self.image_shot.get_width()/2, rect2.center[1]-self.image_shot.get_height()/2))
pygame.display.update()
pygame.time.delay(300)
elif (rect3.collidepoint(int_x, int_y) and self.professions_list[3] != self.selected_profession):
self.sound_wrong_shot.play()
self.score-=1
self.game_display.blit(self.image_shot, (rect3.center[0]-self.image_shot.get_width()/2, rect3.center[1]-self.image_shot.get_height()/2))
pygame.display.update()
pygame.time.delay(300)
else:
self.sound_miss.play()
self.new_round()
pygame.display.update()
camera.release()
cv2.destroyAllWindows()
pygame.quit()
sys.exit()
if __name__ == '__main__':
pg = PistolGame()
pg.run()
```
#### File: Classic Joystick/Irregular Verbs Past Simple/IrregularVerbs.py
```python
import pygame
import time
import math
import random
import pygame.font
from pygame.locals import *
class TextRectException:
def __init__(self, message = None):
self.message = message
def __str__(self):
return self.message
class IrregularVerbs(object):
WHITE = (255,255,255)# some colors are not currently used, but left for future modification
BLACK = (0,0,0)
GREEN = (0,128,0)
YELLOW = (255,229,51)
RED = (255,0,0)
BLUE = (0,0,255)
BROWN = (97,65,38)
PURPLE = (128,0,128)
def __init__(self):
pygame.init()
pygame.mixer.init()
self.sound_win = pygame.mixer.Sound('audio\\ping.ogg')
self.sound_loss = pygame.mixer.Sound('audio\\buzzer.ogg')
self.my_font = pygame.font.Font(None, 48)
self.my_rect = pygame.Rect((273,268,252,64))
self.my_rect_frag_1 = pygame.Rect((273,20,252,64))
self.my_rect_frag_2 = pygame.Rect((527,268,252,64))
self.my_rect_frag_3 = pygame.Rect((273,516,252,64))
self.my_rect_frag_4 = pygame.Rect((20,268,252,64))
self.display = pygame.display.set_mode((800, 600))# change to desired resolution -- you'll need to modify rect size.
pygame.display.set_caption("Irregular Verbs Game")
self.display.fill(IrregularVerbs.WHITE)
pygame.display.update()
self.finished = False
def render_textrect(self, string, font, rect, text_color, background_color, justification=0):
final_lines = []
requested_lines = string.splitlines()
for requested_line in requested_lines:
if font.size(requested_line)[0] > rect.width:
words = requested_line.split(' ')
for word in words:
if font.size(word)[0] >= rect.width:
raise TextRectException("The word " + word + " is too long to fit in the rect passed.")
accumulated_line = ""
for word in words:
test_line = accumulated_line + word + " "
if font.size(test_line)[0] < rect.width:
accumulated_line = test_line
else:
final_lines.append(accumulated_line)
accumulated_line = word + " "
final_lines.append(accumulated_line)
else:
final_lines.append(requested_line)
surface = pygame.Surface(rect.size)
surface.fill(background_color)
accumulated_height = 0
for line in final_lines:
if accumulated_height + font.size(line)[1] >= rect.height:
raise TextRectException('After word wrap, the text string was too tall to fit in the provided rect.')
if line != "":
tempsurface = font.render(line, 1, text_color)
if justification == 0:
surface.blit(tempsurface, (0, accumulated_height))
elif justification == 1:
surface.blit(tempsurface, ((rect.width - tempsurface.get_width()) / 2, accumulated_height))
elif justification == 2:
surface.blit(tempsurface, (rect.width - tempsurface.get_width(), accumulated_height))
else:
raise TextRectException("Invalid justification argument: " + str(justification))
accumulated_height += font.size(line)[1]
return surface
def new_user(self):
irregular_verbs = [
["be",[["was/were","a"],["been","q"],["being","q"],["is","q"]]],
["bear",[["bore","a"],["born","q"],["bears","q"],["bearing","q"]]],
["beat",[["beat","a"],["beaten","q"],["beating","q"],["beats","q"]]],
["begin",[["began","a"],["begun","q"],["beginning","q"],["begins","q"]]],
["bite",[["bit","a"],["bitten","q"],["biting","q"],["bite","q"]]],
["blow",[["blew","a"],["blown","q"],["blows","q"],["blowing","q"]]],
["broadcast",[["broadcast","a"],["broadcasting","q"],["broadcasts","q"],["broadcaster","q"]]],
["break",[["broke","a"],["broken","q"],["breaked","q"],["breaking","q"]]],
["bring",[["brought","a"],["bringing","q"],["brings","q"],["brung","q"]]],
["build",[["built","a"],["building","q"],["builded","q"],["builds","q"]]],
["buy",[["bought","a"],["buying","q"],["buys","q"],["buy","q"]]],
["can",[["could","a"],["cans","q"],["canning","q"],["can","q"]]],
["catch",[["caught","a"],["catched","q"],["catches","q"],["catching","q"]]],
["choose",[["chose","a"],["choose","q"],["choosed","q"],["choosing","q"]]],
["come",[["came","a"],["coming","q"],["comed","q"],["comes","q"]]],
["cost",[["cost","a"],["costs","q"],["costed","q"],["costing","q"]]],
["cut",[["cut","a"],["cutted","q"],["cutter","q"],["cutting","q"]]],
["do",[["did","a"],["doing","q"],["doed","q"],["does","q"]]],
["draw",[["drew","a"],["drawn","q"],["draw","q"],["drawing","q"]]],
["drink",[["drank","a"],["drinked","q"],["drink","q"],["drunk","q"]]],
["drive",[["drove","a"],["drives","q"],["driven","q"],["driving","q"]]],
["eat",[["ate","a"],["eaten","q"],["eats","q"],["eating","q"]]],
["fall",[["fell","a"],["fallen","q"],["falls","q"],["fall","q"]]],
["feed",[["fed","a"],["feeding","q"],["feeds","q"],["felt","q"]]],
["feel",[["felt","a"],["feeling","q"],["feels","q"],["feel","q"]]],
["fight",[["fought","a"],["fighting","q"],["fights","q"],["fight","q"]]],
["find",[["found","a"],["finding","q"],["find","q"],["finds","q"]]],
["fly",[["flew","a"],["flown","q"],["flies","q"],["fly","q"]]],
["forget",[["forgot","a"],["forget","q"],["forgotten","q"],["forgets","q"]]],
["freeze",[["froze","a"],["frozen","q"],["freeze","q"],["freezes","q"]]],
["get",[["got","a"],["gotten","q"],["get","q"],["gets","q"]]],
["give",[["gave","a"],["given","q"],["give","q"],["gives","q"]]],
["go",[["went","a"],["gone","q"],["goes","q"],["going","q"]]],
["grow",[["grew","a"],["grown","q"],["grows","q"],["grow","q"]]],
["hang",[["hung","a"],["hangs","q"],["hanging","q"],["hanger","q"]]],
["have",[["had","a"],["have","q"],["has","q"],["halved","q"]]],
["hear",[["heard","a"],["hears","q"],["hearing","q"],["hear","q"]]],
["hide",[["hid","a"],["hidden","q"],["hide","q"],["hides","q"]]],
["hit",[["hit","a"],["hits","q"],["hitting","q"],["hitted","q"]]],
["hold",[["held","a"],["hold","q"],["holds","q"],["holding","q"]]],
["hurt",[["hurt","a"],["hurting","q"],["hurts","q"],["hurted","q"]]],
["keep",[["kept","a"],["keeps","q"],["keep","q"],["keeping","q"]]],
["know",[["knew","a"],["known","q"],["knows","q"],["knowing","q"]]],
["lead",[["led","a"],["leads","q"],["leading","q"],["lead","q"]]],
["leave",[["left","a"],["leaves","q"],["leave","q"],["leaving","q"]]],
["lend",[["lent","a"],["lending","q"],["lend","q"],["lends","q"]]],
["let",[["let","a"],["lets","q"],["letting","q"],["letted","q"]]],
["lose",[["lost","a"],["lose","q"],["loses","q"],["losing","q"]]],
["make",[["made","a"],["make","q"],["maked","q"],["making","q"]]],
["mean",[["meant","a"],["means","q"],["mean","q"],["meaning","q"]]],
["meet",[["met","a"],["meeting","q"],["meet","q"],["meets","q"]]],
["pay",[["paid","a"],["paying","q"],["pay","q"],["pays","q"]]],
["put",[["put","a"],["puts","q"],["putting","q"],["putted","q"]]],
["read",[["read","a"],["reading","q"],["reads","q"],["reader","q"]]],
["ride",[["rode","a"],["ridden","q"],["riding","q"],["ride","q"]]],
["ring",[["rang","a"],["rung","q"],["ring","q"],["ringer","q"]]],
["rise",[["rose","a"],["risen","q"],["rise","q"],["rises","q"]]],
["run",[["ran","a"],["run","q"],["running","q"],["runs","q"]]],
["say",[["said","a"],["says","q"],["say","q"],["saying","q"]]],
["see",[["saw","a"],["seen","q"],["see","q"],["sees","q"]]],
["sell",[["sold","a"],["sell","q"],["sells","q"],["selling","q"]]],
["send",[["sent","a"],["sends","q"],["sending","q"],["send","q"]]],
["set",[["set","a"],["setting","q"],["sets","q"],["setted","q"]]],
["shoot",[["shot","a"],["shooting","q"],["shoot","q"],["shooted","q"]]],
["shut",[["shut","a"],["shutting","q"],["shuts","q"],["shutted","q"]]],
["sing",[["sang","a"],["sung","q"],["sings","q"],["singing","q"]]],
["sit",[["sat","a"],["sits","q"],["sitting","q"],["sitted","q"]]],
["sleep",[["slept","a"],["sleeping","q"],["sleep","q"],["sleeps","q"]]],
["speak",[["spoke","a"],["spoken","q"],["speaks","q"],["speaking","q"]]],
["spend",[["spent","a"],["spend","q"],["spending","q"],["spends","q"]]],
["stand",[["stood","a"],["stands","q"],["stand","q"],["standing","q"]]],
["steal",[["stole","a"],["stolen","q"],["steal","q"],["steals","q"]]],
["stick",[["stuck","a"],["sticked","q"],["sticking","q"],["stick","q"]]],
["swim",[["swam","a"],["swum","q"],["swim","q"],["swimming","q"]]],
["take",[["took","a"],["taken","q"],["takes","q"],["take","q"]]],
["teach",[["taught","a"],["teached","q"],["teach","q"],["teaches","q"]]],
["tell",[["told","a"],["tells","q"],["telling","q"],["telled","q"]]],
["think",[["thought","a"],["thinks","q"],["think","q"],["thinked","q"]]],
["throw",[["threw","a"],["thrown","q"],["throw","q"],["throws","q"]]],
["wake",[["woke","a"],["woken","q"],["wake","q"],["waked","q"]]],
["wear",[["wore","a"],["worn","q"],["wear","q"],["wears","q"]]],
["win",[["won","a"],["wins","q"],["win","q"],["winning","q"]]],
["write",[["wrote","a"],["written","q"],["write","q"],["writes","q"]]]
]
wordlist = random.sample(irregular_verbs,1)[0]
self.answer = wordlist[1][0][0]
random.shuffle(wordlist[1])
self.frag0 = wordlist[1][0][0]
self.frag1 = wordlist[1][1][0]
self.frag2 = wordlist[1][2][0]
self.frag3 = wordlist[1][3][0]
self.display.fill(IrregularVerbs.WHITE)
self.rendered_text_word = self.render_textrect(wordlist[0], self.my_font, self.my_rect, IrregularVerbs.PURPLE, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 0)
self.display.blit(self.rendered_text_word, self.my_rect.topleft)
self.display.blit(self.rendered_text_frag_1, self.my_rect_frag_1.topleft)
self.display.blit(self.rendered_text_frag_2, self.my_rect_frag_2.topleft)
self.display.blit(self.rendered_text_frag_3, self.my_rect_frag_3.topleft)
self.display.blit(self.rendered_text_frag_4, self.my_rect_frag_4.topleft)
pygame.display.update()
return
def deactivate_keys(self):
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYUP and event.key == pygame.K_SPACE:
return
def refresh_screen(self, fragment, player):
if fragment == self.answer:#winner!
self.display.fill(IrregularVerbs.WHITE)
if self.frag0 == fragment and player == 1:
self.rendered_text_word = self.render_textrect("Player 1 Wins!", self.my_font, self.my_rect, IrregularVerbs.GREEN, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.GREEN, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 0)
elif self.frag1 == fragment and player == 1:
self.rendered_text_word = self.render_textrect("Player 1 Wins!", self.my_font, self.my_rect, IrregularVerbs.GREEN, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.GREEN, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 0)
elif self.frag2 == fragment and player == 1:
self.rendered_text_word = self.render_textrect("Player 1 Wins!", self.my_font, self.my_rect, IrregularVerbs.GREEN, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.GREEN, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 0)
elif self.frag3 == fragment and player == 1:
self.rendered_text_word = self.render_textrect("Player 1 Wins!", self.my_font, self.my_rect, IrregularVerbs.GREEN, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.GREEN, IrregularVerbs.WHITE, 0)
elif self.frag0 == fragment and player == 2:
self.rendered_text_word = self.render_textrect("Player 2 Wins!", self.my_font, self.my_rect, IrregularVerbs.GREEN, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.GREEN, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 0)
elif self.frag1 == fragment and player == 2:
self.rendered_text_word = self.render_textrect("Player 2 Wins!", self.my_font, self.my_rect, IrregularVerbs.GREEN, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.GREEN, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 0)
elif self.frag2 == fragment and player == 2:
self.rendered_text_word = self.render_textrect("Player 2 Wins!", self.my_font, self.my_rect, IrregularVerbs.GREEN, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.GREEN, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 0)
elif self.frag3 == fragment and player == 2:
self.rendered_text_word = self.render_textrect("Player 2 Wins!", self.my_font, self.my_rect, IrregularVerbs.GREEN, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.GREEN, IrregularVerbs.WHITE, 0)
self.display.blit(self.rendered_text_word, self.my_rect.topleft)
self.display.blit(self.rendered_text_frag_1, self.my_rect_frag_1.topleft)
self.display.blit(self.rendered_text_frag_2, self.my_rect_frag_2.topleft)
self.display.blit(self.rendered_text_frag_3, self.my_rect_frag_3.topleft)
self.display.blit(self.rendered_text_frag_4, self.my_rect_frag_4.topleft)
pygame.display.update()
self.sound_win.play()
self.deactivate_keys()
if fragment != self.answer:#loser
self.display.fill(IrregularVerbs.WHITE)
if self.frag0 == fragment and player == 1:
self.rendered_text_word = self.render_textrect("Player 1 Loses!", self.my_font, self.my_rect, IrregularVerbs.RED, IrregularVerbs.WHITE, 1)#last 0 is to left align
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.RED, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 0)
elif self.frag1 == fragment and player == 1:
self.rendered_text_word = self.render_textrect("Player 1 Loses!", self.my_font, self.my_rect, IrregularVerbs.RED, IrregularVerbs.WHITE, 1)#last 0 is to left align
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.RED, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 0)
elif self.frag2 == fragment and player == 1:
self.rendered_text_word = self.render_textrect("Player 1 Loses!", self.my_font, self.my_rect, IrregularVerbs.RED, IrregularVerbs.WHITE, 1)#last 0 is to left align
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.RED, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 0)
elif self.frag3 == fragment and player == 1:
self.rendered_text_word = self.render_textrect("Player 1 Loses!", self.my_font, self.my_rect, IrregularVerbs.RED, IrregularVerbs.WHITE, 1)#last 0 is to left align
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.RED, IrregularVerbs.WHITE, 0)
elif self.frag0 == fragment and player == 2:
self.rendered_text_word = self.render_textrect("Player 2 Loses!", self.my_font, self.my_rect, IrregularVerbs.RED, IrregularVerbs.WHITE, 1)#last 0 is to left align
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.RED, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 0)
elif self.frag1 == fragment and player == 2:
self.rendered_text_word = self.render_textrect("Player 2 Loses!", self.my_font, self.my_rect, IrregularVerbs.RED, IrregularVerbs.WHITE, 1)#last 0 is to left align
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.RED, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 0)
elif self.frag2 == fragment and player == 2:
self.rendered_text_word = self.render_textrect("Player 2 Loses!", self.my_font, self.my_rect, IrregularVerbs.RED, IrregularVerbs.WHITE, 1)#last 0 is to left align
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.RED, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 0)
elif self.frag3 == fragment and player == 2:
self.rendered_text_word = self.render_textrect("Player 2 Loses!", self.my_font, self.my_rect, IrregularVerbs.RED, IrregularVerbs.WHITE, 1)#last 0 is to left align
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 2)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, IrregularVerbs.BLACK, IrregularVerbs.WHITE, 1)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, IrregularVerbs.RED, IrregularVerbs.WHITE, 0)
self.display.blit(self.rendered_text_word, self.my_rect.topleft)
self.display.blit(self.rendered_text_frag_1, self.my_rect_frag_1.topleft)
self.display.blit(self.rendered_text_frag_2, self.my_rect_frag_2.topleft)
self.display.blit(self.rendered_text_frag_3, self.my_rect_frag_3.topleft)
self.display.blit(self.rendered_text_frag_4, self.my_rect_frag_4.topleft)
pygame.display.update()
self.sound_loss.play()
self.deactivate_keys()
def run(self):
while not self.finished:
for event in pygame.event.get():
if event.type == pygame.QUIT:
finished = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
self.finished = True
if event.key == pygame.K_SPACE:
self.new_user()
if event.key == pygame.K_a:
self.refresh_screen(self.frag0, player=1)
elif event.key == pygame.K_e:
self.refresh_screen(self.frag0, player=2)
elif event.key == pygame.K_b:
self.refresh_screen(self.frag1, player=1)
elif event.key == pygame.K_f:
self.refresh_screen(self.frag1, player=2)
elif event.key == pygame.K_c:
self.refresh_screen(self.frag2, player=1)
elif event.key == pygame.K_g:
self.refresh_screen(self.frag2, player=2)
elif event.key == pygame.K_d:
self.refresh_screen(self.frag3, player=1)
elif event.key == pygame.K_h:
self.refresh_screen(self.frag3, player=2)
else:
pass
pygame.display.update()
pygame.quit()
sys.exit()
if __name__ == '__main__':
new_game = IrregularVerbs()
new_game.run()
```
#### File: Adjective Adverb/Adverb or Adjective (Deprecated)/AdverbOrAdjectiveOriginalINTRO.py
```python
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
import pygame
import random
import sys
TRACKING_COLOR = pygame.color.Color("green")
HIGHLIGHT_COLOR = pygame.color.Color("red")
BG_COLOR = pygame.color.Color("white")
GAME_TIME = 60# seconds
class BodyGameRuntime(object):
def __init__(self):
pygame.init()
pygame.mixer.init()
self.beep_sound = pygame.mixer.Sound('audio\\beep.ogg')
self.buzz_sound = pygame.mixer.Sound('audio\\buzz.ogg')
self._infoObject = pygame.display.Info()
self._screen = pygame.display.set_mode((self._infoObject.current_w >> 1,
self._infoObject.current_h >> 1),
pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
pygame.display.set_caption("Kinect Game Framework Test")
self.finished = False
self._clock = pygame.time.Clock()
self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color |
PyKinectV2.FrameSourceTypes_Body)
self._frame_surface = pygame.Surface((self._kinect.color_frame_desc.Width,
self._kinect.color_frame_desc.Height), 0, 32)
self._bodies = None
self.score = 0
self.vocab_dict = {"People drive ____ these days.":["quickly", "quick"],
"She has an ____ dog.":["active", "actively"],
"He ____ opens the mail.":["carefully", "careful"],
"The man ____ greets his friends.":["cheerfully", "cheerful"],
"That is a ____ sofa!":["comfortable", "comfortably"],
"The alarm sounds ____.":["continuously", "continuous"],
"That woman is ____!":["crazy", "crazily"],
"The woman speaks ____.":["delightfully", "delightful"],
"Juan is a very ____ carpenter.":["creative", "creatively"],
"Wow! That is a ____ storm!":["destructive", "destructively"],
"The racecar drove ____ by the school.":["powerfully", "powerful"],
"Juana ____ said NO!":["firmly", "firm"],
"He ____ opened the door.":["forcefully", "forceful"],
"It was a ____ day.":["glorious", "gloriously"],
"Maria ____ observed her ex-boyfriend.":["hatefully", "hateful"],
"He had a ___ idea.":["hopeful", "hopefully"],
"It was an ____ phrase.":["insulting", "insultingly"],
"Jenny ____ ate the last cookie.":["intentionally", "intentional"],
"He likes ____ music.":["irritating", "irritatingly"],
"Careful! That is a ___ dog!":["bad", "badly"],
"The man reacted ___ to the good news.":["speedily", "speedy"],
"Susana has always been a ____ girl.":["nice", "nicely"],
"The boys plunged into the ____ water.":["deep", "deeply"],
"The girl ____ saved her cat from the fire.":["bravely", "brave"],
"The man ____ drank too much alcohol.":["foolishly", "foolish"],
"Mario is ____ and never does his homework.":["lazy", "lazily"],
"The teacher is very ____.":["rude", "rudely"],
"The girl plays soccer ____.":["perfectly", "perfect"],
"It was an ____ crash.":["accidental", "accidentally"],
"That is an ____ turtle!.":["angry", "angrily"],
"She ____ ate her beans.":["happily", "happy"],
"John spoke ____.":["seriously", "serious"],
"Firulais is a ____ dog.":["loyal", "loyally"],
"Margie yelled ____ into the night.":["blindly", "blind"],
"He ran ____ toward me.":["wildly", "wild"],
"Pedro is ____!":["innocent", "innocently"],
"The gross man winked at her ____.":["sexually", "sexual"],
"Concepcion is a ____ girlfriend.":["jealous", "jealously"],
"Luis ____ goes to the bar.":["frequently", "frequent"],
"We didn't go out because it was raining ____.":["heavily", "heavy"],
"Our team lost the game because we played ____.":["badly", "bad"],
"We waited ____.":["patiently", "patient"],
"Jimmy arrived ____.":["unexpectedly", "unexpected"],
"Mike stays fit by playing tennis ____.":["regularly", "regular"],
"The driver of the car was ____ injured.":["seriously", "serious"],
"The driver of the car had ____ injuries.":["serious", "seriously"],
"Ismael looked ____ at Eleazar.":["hungrily", "hungry"],
"She is a ____ driver.":["dangerous", "dangerously"]}
self._frame_surface.fill((255, 255, 255))
def text_objects(self, text, font):
text_surface = font.render(text, True, (0, 0, 0))
return text_surface, text_surface.get_rect()
def message_display(self, text, loc_tuple, loc_int):
# loc_int: 1 center, 2 top left, 3 bottom left, 4 bottom right, 5 top right
text_surf, text_rect = self.text_objects(text, pygame.font.Font(None, 64))
loc_dict = {1:'text_rect.center', 2:'text_rect.topleft', 3:'text_rect.bottomleft',
4:'text_rect.bottomright', 5:'text_rect.topright'}
exec(loc_dict[loc_int] + ' = loc_tuple')
self._frame_surface.blit(text_surf, text_rect)
return text_rect
def draw_ind_point(self, joints, jointPoints, color, highlight_color, rect0, rect1, joint0, words, sentence, correct_word):
joint0State = joints[joint0].TrackingState;
if (joint0State == PyKinectV2.TrackingState_NotTracked or
joint0State == PyKinectV2.TrackingState_Inferred):
return
center = (int(jointPoints[joint0].x), int(jointPoints[joint0].y))
if (rect0.collidepoint(center) and words[0] == correct_word) or (rect1.collidepoint(center) and words[1] == correct_word):
self.score += 1
self.beep_sound.play()
pygame.time.delay(500)
self.new_round()
elif rect0.collidepoint(center) or rect1.collidepoint(center):
try:
pygame.draw.circle(self._frame_surface, highlight_color, center, 20, 0)
self.score -= 1
self.buzz_sound.play()
pygame.time.delay(500)
self.new_round()
except: # need to catch it due to possible invalid positions (with inf)
pass
else:
try:
pygame.draw.circle(self._frame_surface, color, center, 20, 0)
except:
pass
def draw_ind_intro_point(self, joints, jointPoints, color, joint0):
joint0State = joints[joint0].TrackingState;
if (joint0State == PyKinectV2.TrackingState_NotTracked or
joint0State == PyKinectV2.TrackingState_Inferred):
return
center = (int(jointPoints[joint0].x), int(jointPoints[joint0].y))
try:
pygame.draw.circle(self._frame_surface, color, center, 20, 0)
except:
pass
def update_intro_screen(self, joints, jointPoints, color):
self._frame_surface.fill(BG_COLOR)# blank screen before drawing points
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_Head)
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_WristLeft)
# may change PyKinectV2.JointType_WristRight to PyKinectV2.JointType_ElbowRight
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_WristRight)
def update_screen(self, joints, jointPoints, color, highlight_color, words, sentence, correct_word, seconds):
self._frame_surface.fill(BG_COLOR)# blank screen before drawing points
self.message_display(sentence, (300, 900), 2)
rect0 = self.message_display(words[0], (400, 300), 1)
rect1 = self.message_display(words[1], (self._frame_surface.get_width() - 400, 300), 1)
self.message_display(str(self.score), (self._frame_surface.get_width() / 2, 800), 1)
self.message_display(str(seconds), (self._frame_surface.get_width() - 300, 800), 1)
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, PyKinectV2.JointType_Head, words, sentence, correct_word)
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, PyKinectV2.JointType_WristRight, words, sentence, correct_word)
# may change PyKinectV2.JointType_WristRight to PyKinectV2.JointType_ElbowRight
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, PyKinectV2.JointType_WristLeft, words, sentence, correct_word)
def end_game(self):
self._frame_surface.fill(BG_COLOR)
self.message_display("Score: {}".format(self.score), (self._frame_surface.get_width() / 2, self._frame_surface.get_height() / 2), 1)
h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
target_height = int(h_to_w * self._screen.get_width())
surface_to_draw = pygame.transform.scale(self._frame_surface,
(self._screen.get_width(), target_height));
self._screen.blit(surface_to_draw, (0,0))
surface_to_draw = None
pygame.display.update()
pygame.time.delay(3000)
self._kinect.close()
pygame.quit()
def new_round(self):
sentence = random.sample(list(self.vocab_dict), 1)[0]
words = self.vocab_dict[sentence][:]
correct_word = words[0]
random.shuffle(words)
pygame.time.delay(500)
while not self.finished:
seconds = int(GAME_TIME - (pygame.time.get_ticks() - self.start_ticks)/1000)
if seconds <= 0:
self.end_game()
if self._kinect.has_new_body_frame():
self._bodies = self._kinect.get_last_body_frame()
if self._bodies is not None:
for i in range(0, self._kinect.max_body_count):
body = self._bodies.bodies[i]
if not body.is_tracked:
continue
joints = body.joints
joint_points = self._kinect.body_joints_to_color_space(joints)
self.update_screen(joints, joint_points, TRACKING_COLOR, HIGHLIGHT_COLOR, words, sentence, correct_word, seconds)
h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
target_height = int(h_to_w * self._screen.get_width())
surface_to_draw = pygame.transform.scale(self._frame_surface,
(self._screen.get_width(), target_height));
self._screen.blit(surface_to_draw, (0,0))
surface_to_draw = None
pygame.display.update()
self._clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.finished = True
if event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
self.finished = True
self.end_game()
def run(self):
self.score = 0
while not self.finished:
if self._kinect.has_new_body_frame():
self._bodies = self._kinect.get_last_body_frame()
if self._bodies is not None:
for i in range(0, self._kinect.max_body_count):
body = self._bodies.bodies[i]
if not body.is_tracked:
continue
joints = body.joints
joint_points = self._kinect.body_joints_to_color_space(joints)
self.update_intro_screen(joints, joint_points, TRACKING_COLOR)
h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
target_height = int(h_to_w * self._screen.get_width())
surface_to_draw = pygame.transform.scale(self._frame_surface,
(self._screen.get_width(), target_height));
self._screen.blit(surface_to_draw, (0,0))
surface_to_draw = None
pygame.display.update()
self._clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.finished = True
if event.type == pygame.KEYUP and event.key == pygame.K_SPACE:
self.start_ticks = pygame.time.get_ticks()
self.new_round()
if event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
self.finished = True
self._kinect.close()
pygame.quit()
sys.exit()
if __name__ == "__main__":
game = BodyGameRuntime()
game.run()
```
#### File: Kinect/Sentence Builder/SentenceBuilder.py
```python
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
from math import ceil
import pygame
import random
import sys
TRACKING_COLOR = pygame.color.Color("green")
HIGHLIGHT_COLOR = pygame.color.Color("red")
BG_COLOR = pygame.color.Color("white")
GAME_TIME = 60# seconds
class BodyGameRuntime(object):
def __init__(self):
pygame.init()
pygame.mixer.init()
self.beep_sound = pygame.mixer.Sound('audio\\beep.ogg')
self.buzz_sound = pygame.mixer.Sound('audio\\buzz.ogg')
self.click_sound = pygame.mixer.Sound('audio\\click.ogg')
self._infoObject = pygame.display.Info()
self._screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN, 32)
pygame.display.set_caption("Sentence Builder Game")
self.finished = False
self._clock = pygame.time.Clock()
self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color |
PyKinectV2.FrameSourceTypes_Body)
self._frame_surface = pygame.Surface((self._kinect.color_frame_desc.Width,
self._kinect.color_frame_desc.Height), 0, 32)
self._bodies = None
self.score = 0
self.sentence_list = [
"It is not acceptable to eat with your mouth open",
"It is acceptable to use a napkin",
"You shouldn't talk with food in your mouth",
"You shouldn't use bad words at the dinner table",
"This is a test sentence for the game",
"These sentences don't have any periods",
"Giraffes are herbivores, and don't eat elephants",
"Elvia came to visit the teacher in his office",
"My favorite fruits are rambutans and chirimoyas",
"The cat likes to eat the dog's food",
"Sometimes the dog gets angry and barks at the cat",
"The 19-year-old is not an exception.",
"She is like many other young women in Nuh, where she lives, in northern India.",
"Only about 30 percent of the women there can read and write.",
"That is about half the national average.",
"<NAME> left school because she had to do housework and help her mother with younger brothers and sisters, she said.",
"And there was no middle school in her community.",
"At the same time, boys living in Papika, in Haryana state, walk to the nearest high school, about four kilometers away.",
"Yet girls are not permitted to leave the small village.",
"In Papika, many of the women work in fields or care for farm animals.",
"Young girls get water, while children play.",
"Men often sit outside their homes in the sun after a cold winter.",
"This saying comes to us from <NAME>.",
"In addition to being a writer, Franklin was a printer, political thinker, politician, scientist, inventor and diplomat.",
"He was also one of the Founding Fathers of the United States.",
"So, he was a busy man.",
"But Franklin still found time to write and offer his advice to others.",
"If he were alive today, he could probably make a good living as a life coach.",
"Now, Franklin lived during the 1700s, before the metric system took effect in Europe.",
"The word ounce means something really small – just two-one-hundredths of a kilogram to be exact.",
"So, his expression meant that, when dealing with a problem, spending a small amount of time and effort early on is a good investment.",
"It can save you more trouble in the end.",
"Education officials and industry experts are debating the future of online learning.",
"And demand for such programs is increasing.",
"Classes meet online through video conferencing.",
"In this way, students are able to communicate with each other and their professors even when they are far away from school.",
"Online learning also permits older students, who work full-time and support families, to work on their education in their free time."
]
self._frame_surface.fill((255, 255, 255))
def text_objects(self, text, font):
text_surface = font.render(text, True, (0, 0, 0))
return text_surface, text_surface.get_rect()
def message_display(self, text, loc_tuple, loc_int):
# loc_int: 1 center, 2 top left, 3 bottom left, 4 bottom right, 5 top right
text_surf, text_rect = self.text_objects(text, pygame.font.Font(None, 36))
loc_dict = {1:'text_rect.center', 2:'text_rect.topleft', 3:'text_rect.bottomleft',
4:'text_rect.bottomright', 5:'text_rect.topright'}
exec(loc_dict[loc_int] + ' = loc_tuple')
self._frame_surface.blit(text_surf, text_rect)
return text_rect
def fragment_sentence(self, sentence):
sentence_list = sentence.split()
sentence_word_count = len(sentence_list)
max_frag_size = ceil(sentence_word_count/3)
frag_list = []
i = 0
while i * max_frag_size <= sentence_word_count:
frag_list.append(sentence_list[i*max_frag_size:(i + 1)*max_frag_size])
i += 1
frag_list = [' '.join(words) for words in frag_list][0:3]
return frag_list
def draw_ind_point(self, joints, jointPoints, color, highlight_color,
rect0, rect1, rect2, joint0, frag_list):
joint0State = joints[joint0].TrackingState;
if (joint0State == PyKinectV2.TrackingState_NotTracked or
joint0State == PyKinectV2.TrackingState_Inferred):
return
center = (int(jointPoints[joint0].x), int(jointPoints[joint0].y))
if rect0.collidepoint(center):
self.built_frag = self.built_frag + " " + frag_list[0]
self.click_sound.play()
frag_list[0] = ""
elif rect1.collidepoint(center):
self.built_frag = self.built_frag + " " + frag_list[1]
self.click_sound.play()
frag_list[1] = ""
elif rect2.collidepoint(center):
self.built_frag = self.built_frag + " " + frag_list[2]
self.click_sound.play()
frag_list[2] = ""
if frag_list[0] == "" and frag_list[1] == "" and frag_list[2] == "":
self.built_frag = self.built_frag[1:]
if self.built_frag == self.sentence:
self.score += 1
self.beep_sound.play()
self.end_round(frag_list)
else:
self.score -= 1
self.buzz_sound.play()
self.end_round(frag_list)
else:
try:
pygame.draw.circle(self._frame_surface, color, center, 40, 0)
except:
pass
def draw_ind_intro_point(self, joints, jointPoints, color, joint0):
joint0State = joints[joint0].TrackingState;
if (joint0State == PyKinectV2.TrackingState_NotTracked or
joint0State == PyKinectV2.TrackingState_Inferred):
return
center = (int(jointPoints[joint0].x), int(jointPoints[joint0].y))
try:
pygame.draw.circle(self._frame_surface, color, center, 40, 0)
except:
pass
def update_intro_screen(self, joints, jointPoints, color):
self._frame_surface.fill(BG_COLOR)# blank screen before drawing points
pygame.draw.rect(self._frame_surface, HIGHLIGHT_COLOR, (400, 300, 50, 50), 0)
pygame.draw.rect(self._frame_surface, HIGHLIGHT_COLOR, (self._frame_surface.get_width() / 2, 200, 50, 50), 0)
pygame.draw.rect(self._frame_surface, HIGHLIGHT_COLOR, (self._frame_surface.get_width() - 400, 300, 50, 50), 0)
# draw rects here as examples
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_Head)
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_WristLeft)
# may change PyKinectV2.JointType_WristRight to PyKinectV2.JointType_ElbowRight
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_WristRight)
def update_screen(self, joints, jointPoints, color, highlight_color, frag_list, seconds):
self._frame_surface.fill(BG_COLOR)# blank screen before drawing points
self.message_display(self.built_frag, (300, 750), 2)
rect0 = self.message_display(frag_list[0], (400, 300), 1)
rect1 = self.message_display(frag_list[1], (self._frame_surface.get_width()/2, 200), 1)
rect2 = self.message_display(frag_list[2], (self._frame_surface.get_width() - 400, 300), 1)
self.message_display(str(self.score), (self._frame_surface.get_width() / 2, 800), 1)
self.message_display(str(seconds), (self._frame_surface.get_width() - 300, 800), 1)
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, rect2, PyKinectV2.JointType_Head, frag_list)
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, rect2, PyKinectV2.JointType_WristRight, frag_list)
# may change PyKinectV2.JointType_WristRight to PyKinectV2.JointType_ElbowRight
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, rect2, PyKinectV2.JointType_WristLeft, frag_list)
def end_round(self, frag_list):
self._frame_surface.fill(BG_COLOR)
self.message_display(self.built_frag, (300, 750), 2)
rect0 = self.message_display(frag_list[0], (300, 300), 1)
rect1 = self.message_display(frag_list[1], (self._frame_surface.get_width() / 2, 100), 1)
rect2 = self.message_display(frag_list[2], (self._frame_surface.get_width() - 300, 300), 1)
self.message_display(str(self.score), (self._frame_surface.get_width() / 2, 800), 1)
h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
target_height = int(h_to_w * self._screen.get_width())
surface_to_draw = pygame.transform.scale(self._frame_surface,
(self._screen.get_width(), target_height));
self._screen.blit(surface_to_draw, (0,0))
surface_to_draw = None
pygame.display.update()
pygame.time.delay(500)
self.new_round()
def end_game(self):
self._frame_surface.fill(BG_COLOR)
self.message_display("Score: {}".format(self.score), (self._frame_surface.get_width() / 2, self._frame_surface.get_height() / 2), 1)
h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
target_height = int(h_to_w * self._screen.get_width())
surface_to_draw = pygame.transform.scale(self._frame_surface,
(self._screen.get_width(), target_height));
self._screen.blit(surface_to_draw, (0,0))
surface_to_draw = None
pygame.display.update()
pygame.time.delay(3000)
self._kinect.close()
pygame.quit()
sys.exit()
def new_round(self):
self.sentence = random.sample(self.sentence_list, 1)[0]
self.built_frag = ""
frag_list = self.fragment_sentence(self.sentence)
random.shuffle(frag_list)
pygame.time.delay(500)
while not self.finished:
seconds = int(GAME_TIME - (pygame.time.get_ticks() - self.start_ticks) / 1000)
if seconds <= 0:
self.end_game()
if self._kinect.has_new_body_frame():
self._bodies = self._kinect.get_last_body_frame()
if self._bodies is not None:
for i in range(0, self._kinect.max_body_count):
body = self._bodies.bodies[i]
if not body.is_tracked:
continue
joints = body.joints
joint_points = self._kinect.body_joints_to_color_space(joints)
self.update_screen(joints, joint_points, TRACKING_COLOR,
HIGHLIGHT_COLOR, frag_list, seconds)# check here
h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
target_height = int(h_to_w * self._screen.get_width())
surface_to_draw = pygame.transform.scale(self._frame_surface,
(self._screen.get_width(), target_height));
self._screen.blit(surface_to_draw, (0,0))
surface_to_draw = None
pygame.display.update()
self._clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.finished = True
if event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
self.finished = True
self.end_game()
def run(self):
self.score = 0
while not self.finished:
if self._kinect.has_new_body_frame():
self._bodies = self._kinect.get_last_body_frame()
if self._bodies is not None:
for i in range(0, self._kinect.max_body_count):
body = self._bodies.bodies[i]
if not body.is_tracked:
continue
joints = body.joints
joint_points = self._kinect.body_joints_to_color_space(joints)
self.update_intro_screen(joints, joint_points, TRACKING_COLOR)
h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
target_height = int(h_to_w * self._screen.get_width())
surface_to_draw = pygame.transform.scale(self._frame_surface,
(self._screen.get_width(), target_height));
self._screen.blit(surface_to_draw, (0,0))
surface_to_draw = None
pygame.display.update()
self._clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.finished = True
if event.type == pygame.KEYUP and event.key == pygame.K_SPACE:
self.start_ticks = pygame.time.get_ticks()
self.new_round()
if event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
self.finished = True
self._kinect.close()
pygame.quit()
sys.exit()
if __name__ == "__main__":
game = BodyGameRuntime()
game.run()
```
#### File: Multiple Pushbutton/6-Team Parts Of Speech Game/PartsOfSpeechTeamGame.py
```python
import pygame
import sys
from pygame.locals import *
from random import randint
class PartsOfSpeechTeamGame(object):
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
def __init__(self):
pygame.init()
pygame.mixer.init()
self.sound_wrong = pygame.mixer.Sound('audio\\wrong.ogg')
self.sound_right = pygame.mixer.Sound('audio\\right.ogg')
self.sound_win = pygame.mixer.Sound('audio\\win.ogg')
self.DISPLAYSURF = pygame.display.set_mode((0,0), pygame.FULLSCREEN)
self.xRes, self.yRes = pygame.display.get_surface().get_size()
pygame.display.set_caption('Parts of Speech Game')
self.DISPLAYSURF.fill(PartsOfSpeechTeamGame.WHITE)
self.font = pygame.font.SysFont(None, 72)
self.team_font = pygame.font.SysFont(None, 32)
self.team_1_name = 'Ants'
self.team_2_name = 'Pharmcists'
self.team_3_name = 'Apples'
self.team_4_name = 'Monkeys'
self.team_5_name = 'Red Stars'
self.team_6_name = 'Wildcats'
#starting scores (add handicap as necessary). Will affect answers required to win, effect varies based on screen resolution.
self.team_1_score = 0
self.team_2_score = 0
self.team_3_score = 0
self.team_4_score = 0
self.team_5_score = 0
self.team_6_score = 0
self.answers_to_win = 20
self.winning_score = (self.yRes-40)/2#self.yRes minus 40px team name rectangle
self.points_per_question = self.winning_score/self.answers_to_win
self.active_word = ''
self.active_word_class = ''
self.vocab_tuples = (('agree','verb'),('allow','verb'),('appear','verb'),('ask','verb'),('be','verb'),('become','verb'),
('begin','verb'),('believe','verb'),('belong','verb'),('bring','verb'),('build','verb'),('carry','verb'),
('choose','verb'),('come','verb'),('connect','verb'),('consider','verb'),('continue','verb'),
('contribute','verb'),('manipulate','verb'),('create','verb'),('decide','verb'),('deliver','verb'),
('destroy','verb'),('develop','verb'),('discover','verb'),('discuss','verb'),('eat','verb'),('encourage','verb'),
('explain','verb'),('follow','verb'),('get','verb'),('give','verb'),('go','verb'),('happen','verb'),
('have','verb'),('hear','verb'),('imagine','verb'),('include','verb'),('involve','verb'),('know','verb'),
('learn','verb'),('let','verb'),('lose','verb'),('make','verb'),('obtain','verb'),('open','verb'),('pay','verb'),
('read','verb'),('realize','verb'),('receive','verb'),('remember','verb'),('say','verb'),('see','verb'),
('seem','verb'),('sell','verb'),('send','verb'),('serve','verb'),('sit','verb'),('speak','verb'),('spend','verb'),
('suffer','verb'),('suggest','verb'),('take','verb'),('teach','verb'),('tell','verb'),('think','verb'),
('try','verb'),('understand','verb'),('want','verb'),('write','verb'),
('red','adj'),('green','adj'),('purple','adj'),('yellow','adj'),('brown','adj'),('different','adj'),
('important','adj'),('new','adj'),('old','adj'),('young','adj'),('fat','adj'),('skinny','adj'),
('pretty','adj'),('ugly','adj'),('beautiful','adj'),('nice','adj'),('fantastic','adj'),('long','adj'),('short','adj'),
('tall','adj'),('big','adj'),('small','adj'),('political','adj'),('best','adj'),('worst','adj'),('happiest','adj'),
('saddest','adj'),('flirtatious','adj'),('stinky','adj'),('smelly','adj'),('squishy','adj'),('greasy','adj'),
('hot','adj'),('cold','adj'),('warm','adj'),('environmental','adj'),('financial','adj'),('scientific','adj'),
('medical','adj'),('smart','adj'),('dumb','adj'),('hairy','adj'),('smooth','adj'),('rough','adj'),('lonely','adj'),
('natural','adj'),('wrong','adj'),('incorrect','adj'),('correct','adj'),('afraid','adj'),('alive','adj'),('bad','adj'),
('good','adj'),('annoying','adj'),('irritating','adj'),('brave','adj'),('broken','adj'),('cheap','adj'),
('expensive','adj'),('dangerous','adj'),('empty','adj'),('full','adj'),('dry','adj'),('wet','adj'),('exciting','adj'),
('boring','adj'),('great','adj'),
('year','noun'),('people','noun'),('way','noun'),('day','noun'),('man','noun'),('thing','noun'),('woman','noun'),
('life','noun'),('child','noun'),('world','noun'),('family','noun'),('student','noun'),('country','noun'),
('problem','noun'),('week','noun'),('company','noun'),('system','noun'),('government','noun'),('night','noun'),
('house','noun'),('car','noun'),('book','noun'),('elephant','noun'),('cat','noun'),('dog','noun'),('hippopotamus','noun'),
('magazine','noun'),('eye','noun'),('leg','noun'),('brain','noun'),('job','noun'),('business','noun'),
('teacher','noun'),('mother','noun'),('father','noun'),('boy','noun'),('girl','noun'),('triangle','noun'),
('rectangle','noun'),('history','noun'),('war','noun'),('art','noun'),('science','noun'),('nursing','noun'),
('chemistry','noun'),('biotechnology','noun'),('money','noun'),('person','noun'),('health','noun'),('door','noun'),
('window','noun'),('ceiling','noun'),('roof','noun'),('office','noun'),('computer','noun'),('Xbox','noun'),
('stethoscope','noun'),('injection','noun'),('tree','noun'),('boat','noun'),('river','noun'),('lake','noun'),
('sky','noun'),('mango','noun'))
self.key_bindings_dict = {pygame.K_a:self.team_1_score_update,
pygame.K_e:self.team_2_score_update,
pygame.K_i:self.team_3_score_update,
pygame.K_m:self.team_4_score_update,
pygame.K_q:self.team_5_score_update,
pygame.K_u:self.team_6_score_update,
pygame.K_b:self.team_1_score_update,
pygame.K_f:self.team_2_score_update,
pygame.K_j:self.team_3_score_update,
pygame.K_n:self.team_4_score_update,
pygame.K_r:self.team_5_score_update,
pygame.K_v:self.team_6_score_update,
pygame.K_c:self.team_1_score_update,
pygame.K_g:self.team_2_score_update,
pygame.K_k:self.team_3_score_update,
pygame.K_o:self.team_4_score_update,
pygame.K_s:self.team_5_score_update,
pygame.K_w:self.team_6_score_update}
def refresh_display(self):
team1Label = self.team_font.render(self.team_1_name, True, PartsOfSpeechTeamGame.BLACK)
team2Label = self.team_font.render(self.team_2_name, True, PartsOfSpeechTeamGame.BLACK)
team3Label = self.team_font.render(self.team_3_name, True, PartsOfSpeechTeamGame.BLACK)
team4Label = self.team_font.render(self.team_4_name, True, PartsOfSpeechTeamGame.BLACK)
team5Label = self.team_font.render(self.team_5_name, True, PartsOfSpeechTeamGame.BLACK)
team6Label = self.team_font.render(self.team_6_name, True, PartsOfSpeechTeamGame.BLACK)
team1LabelPos = team1Label.get_rect()
team2LabelPos = team2Label.get_rect()
team3LabelPos = team3Label.get_rect()
team4LabelPos = team4Label.get_rect()
team5LabelPos = team5Label.get_rect()
team6LabelPos = team6Label.get_rect()
team1LabelPos.centerx = (3*self.xRes/16)/2
team2LabelPos.centerx = ((2*self.xRes/16)+(self.xRes/10)+(2*self.xRes/16)+(self.xRes/10)+(self.xRes/16))/2
team3LabelPos.centerx = ((3*self.xRes/16)+(2*self.xRes/10)+(3*self.xRes/16)+(2*self.xRes/10)+(self.xRes/16))/2
team4LabelPos.centerx = ((4*self.xRes/16)+(3*self.xRes/10)+(4*self.xRes/16)+(3*self.xRes/10)+(self.xRes/16))/2
team5LabelPos.centerx = ((5*self.xRes/16)+(4*self.xRes/10)+(5*self.xRes/16)+(4*self.xRes/10)+(self.xRes/16))/2
team6LabelPos.centerx = ((6*self.xRes/16)+(5*self.xRes/10)+(6*self.xRes/16)+(5*self.xRes/10)+(self.xRes/16))/2
team1LabelPos.centery = (2*self.yRes-40)/2
team2LabelPos.centery = (2*self.yRes-40)/2
team3LabelPos.centery = (2*self.yRes-40)/2
team4LabelPos.centery = (2*self.yRes-40)/2
team5LabelPos.centery = (2*self.yRes-40)/2
team6LabelPos.centery = (2*self.yRes-40)/2
self.DISPLAYSURF.blit(team1Label,team1LabelPos)
self.DISPLAYSURF.blit(team2Label,team2LabelPos)
self.DISPLAYSURF.blit(team3Label,team3LabelPos)
self.DISPLAYSURF.blit(team4Label,team4LabelPos)
self.DISPLAYSURF.blit(team5Label,team5LabelPos)
self.DISPLAYSURF.blit(team6Label,team6LabelPos)
team1Rect = pygame.Rect(self.xRes/16,self.yRes-self.team_1_score-40,self.xRes/16,self.team_1_score)
team2Rect = pygame.Rect((2*self.xRes/16)+(self.xRes/10),self.yRes-self.team_2_score-40,self.xRes/16,self.team_2_score)
team3Rect = pygame.Rect((3*self.xRes/16)+(2*self.xRes/10),self.yRes-self.team_3_score-40,self.xRes/16,self.team_3_score)
team4Rect = pygame.Rect((4*self.xRes/16)+(3*self.xRes/10),self.yRes-self.team_4_score-40,self.xRes/16,self.team_4_score)
team5Rect = pygame.Rect((5*self.xRes/16)+(4*self.xRes/10),self.yRes-self.team_5_score-40,self.xRes/16,self.team_5_score)
team6Rect = pygame.Rect((6*self.xRes/16)+(5*self.xRes/10),self.yRes-self.team_6_score-40,self.xRes/16,self.team_6_score)
pygame.draw.rect(self.DISPLAYSURF, PartsOfSpeechTeamGame.RED, team1Rect)
pygame.draw.rect(self.DISPLAYSURF, PartsOfSpeechTeamGame.RED, team2Rect)
pygame.draw.rect(self.DISPLAYSURF, PartsOfSpeechTeamGame.RED, team3Rect)
pygame.draw.rect(self.DISPLAYSURF, PartsOfSpeechTeamGame.RED, team4Rect)
pygame.draw.rect(self.DISPLAYSURF, PartsOfSpeechTeamGame.RED, team5Rect)
pygame.draw.rect(self.DISPLAYSURF, PartsOfSpeechTeamGame.RED, team6Rect)
pygame.draw.line(self.DISPLAYSURF, PartsOfSpeechTeamGame.BLUE, (0, (self.yRes-40)/2), (self.xRes, (self.yRes-40)/2), 4)
pygame.display.update()
return
def game_over(self, team):
self.DISPLAYSURF.fill(PartsOfSpeechTeamGame.WHITE)
self.sound_win.play()
text = self.font.render(team + ' wins!', True, PartsOfSpeechTeamGame.RED)
textpos = text.get_rect()
textpos.centerx = self.DISPLAYSURF.get_rect().centerx
textpos.y = self.yRes/4
self.DISPLAYSURF.blit(text,textpos)
self.refresh_display()
self.team_1_score = 0
self.team_2_score = 0
self.team_3_score = 0
self.team_4_score = 0
self.team_5_score = 0
self.team_6_score = 0
randomWordInt = randint(0,len(self.vocab_tuples)-1)
self.active_word = self.vocab_tuples[randomWordInt][0]
self.active_word_class = self.vocab_tuples[randomWordInt][1]
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYUP and event.key == pygame.K_SPACE:
return
def deactivate_keys(self):
self.DISPLAYSURF.fill(PartsOfSpeechTeamGame.WHITE)
text = self.font.render(self.active_word, True, PartsOfSpeechTeamGame.BLACK)
textpos = text.get_rect()
textpos.centerx = self.DISPLAYSURF.get_rect().centerx
textpos.y = self.yRes/4
self.DISPLAYSURF.blit(text,textpos)
self.refresh_display()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYUP and event.key == pygame.K_SPACE:
return
def new_word(self):
randomWordInt = randint(0,len(self.vocab_tuples)-1)
self.active_word = self.vocab_tuples[randomWordInt][0]
self.active_word_class = self.vocab_tuples[randomWordInt][1]
self.DISPLAYSURF.fill(PartsOfSpeechTeamGame.WHITE)
text = self.font.render(self.active_word, True, PartsOfSpeechTeamGame.BLACK)
textpos = text.get_rect()
textpos.centerx = self.DISPLAYSURF.get_rect().centerx
textpos.y = self.yRes/4
self.DISPLAYSURF.blit(text,textpos)
self.refresh_display()
def team_1_score_update(self, score):
self.team_1_score += score
if self.team_1_score < 0:
self.team_1_score = 0
if self.team_1_score >= self.winning_score:
self.game_over(self.team_1_name)
def team_2_score_update(self, score):
self.team_2_score += score
if self.team_2_score < 0:
self.team_2_score = 0
if self.team_2_score >= self.winning_score:
self.game_over(self.team_2_name)
def team_3_score_update(self, score):
self.team_3_score += score
if self.team_3_score < 0:
self.team_3_score = 0
if self.team_3_score >= self.winning_score:
self.game_over(self.team_3_name)
def team_4_score_update(self, score):
self.team_4_score += score
if self.team_4_score < 0:
self.team_4_score = 0
if self.team_4_score >= self.winning_score:
self.game_over(self.team_4_name)
def team_5_score_update(self, score):
self.team_5_score += score
if self.team_5_score < 0:
self.team_5_score = 0
if self.team_5_score >= self.winning_score:
self.game_over(self.team_5_name)
def team_6_score_update(self, score):
self.team_6_score += score
if self.team_6_score < 0:
self.team_6_score = 0
if self.team_6_score >= self.winning_score:
self.game_over(self.team_6_name)
def run(self):
self.refresh_display()
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == pygame.K_SPACE:
self.new_word()
if event.key in (pygame.K_a,pygame.K_e,pygame.K_i,pygame.K_m,pygame.K_q,pygame.K_u):
if self.active_word_class == 'adj':
self.sound_right.play()
self.key_bindings_dict[event.key](self.points_per_question)
self.deactivate_keys()#alter as needed to affect classroom speed, use delay if needed
else:
self.sound_wrong.play()
self.key_bindings_dict[event.key](-self.points_per_question)
self.deactivate_keys()
if event.key in (pygame.K_b,pygame.K_f,pygame.K_j,pygame.K_n,pygame.K_r,pygame.K_v):
if self.active_word_class == 'noun':
self.sound_right.play()
self.key_bindings_dict[event.key](self.points_per_question)
self.deactivate_keys()
else:
self.sound_wrong.play()
self.key_bindings_dict[event.key](-self.points_per_question)
self.deactivate_keys()
if event.key in (pygame.K_c,pygame.K_g,pygame.K_k,pygame.K_o,pygame.K_s,pygame.K_w):
if self.active_word_class == 'verb':
self.sound_right.play()
self.key_bindings_dict[event.key](self.points_per_question)
self.deactivate_keys()
else:
self.sound_wrong.play()
self.key_bindings_dict[event.key](-self.points_per_question)
self.deactivate_keys()
pygame.display.update()
if __name__ == '__main__':
new_instance = PartsOfSpeechTeamGame()
new_instance.run()
```
#### File: Multiple Pushbutton/Parts of Speech/PartsOfSpeechPointsScore.py
```python
import pygame
import time
import math
import sqlite3
import random
import pygame.font
from pygame.locals import *
import sys
class TextRectException:
def __init__(self, message = None):
self.message = message
def __str__(self):
return self.message
class PartsOfSpeech(object):
WHITE = (255,255,255)# some colors are not currently used, but left for future modification
BLACK = (0,0,0)
GREEN = (0,128,0)
YELLOW = (255,229,51)
RED = (255,0,0)
BLUE = (0,0,255)
BROWN = (97,65,38)
PURPLE = (128,0,128)
def __init__(self):
pygame.init()
pygame.mixer.init()
self.score = 0
self.start_time = 60
try:
self.conn = sqlite3.connect('student.db')# connect to database
except:
print("Database not found!")
self.c = self.conn.cursor()
self.sound_win = pygame.mixer.Sound('audio\\ping.ogg')
self.sound_loss = pygame.mixer.Sound('audio\\buzzer.ogg')
self.my_font = pygame.font.Font(None, 48)
self.my_rect = pygame.Rect((20, 100, 760, 240))
self.my_rect_user = pygame.Rect((20, 20, 760, 80))
self.my_rect_frag_1 = pygame.Rect((20, 340, 760, 65))
self.my_rect_frag_2 = pygame.Rect((20, 405, 760, 65))
self.my_rect_frag_3 = pygame.Rect((20, 470, 760, 65))
self.my_rect_frag_4 = pygame.Rect((20, 535, 760, 65))
self.display = pygame.display.set_mode((800, 600))# change to desired resolution -- you'll need to modify rect size.
pygame.display.set_caption("Parts of Speech Game")
self.display.fill(PartsOfSpeech.WHITE)
pygame.display.update()
self.finished = False
def render_textrect(self, string, font, rect, text_color, background_color, justification=0):
final_lines = []
requested_lines = string.splitlines()
for requested_line in requested_lines:
if font.size(requested_line)[0] > rect.width:
words = requested_line.split(' ')
for word in words:
if font.size(word)[0] >= rect.width:
raise TextRectException("The word " + word + " is too long to fit in the rect passed.")
accumulated_line = ""
for word in words:
test_line = accumulated_line + word + " "
if font.size(test_line)[0] < rect.width:
accumulated_line = test_line
else:
final_lines.append(accumulated_line)
accumulated_line = word + " "
final_lines.append(accumulated_line)
else:
final_lines.append(requested_line)
surface = pygame.Surface(rect.size)
surface.fill(background_color)
accumulated_height = 0
for line in final_lines:
if accumulated_height + font.size(line)[1] >= rect.height:
raise TextRectException("After word wrap, the text string was too tall to fit in the provided rect.")
if line != "":
tempsurface = font.render(line, 1, text_color)
if justification == 0:
surface.blit(tempsurface, (0, accumulated_height))
elif justification == 1:
surface.blit(tempsurface, ((rect.width - tempsurface.get_width()) / 2, accumulated_height))
elif justification == 2:
surface.blit(tempsurface, (rect.width - tempsurface.get_width(), accumulated_height))
else:
raise TextRectException("Invalid justification argument: " + str(justification))
accumulated_height += font.size(line)[1]
return surface
def special_render_textrect(self, string, font, rect, text_color, special_color, background_color, justification=0):
final_lines = []
requested_lines = string.splitlines()
for requested_line in requested_lines:
if font.size(requested_line)[0] > rect.width:
words = requested_line.split(' ')
for word in words:
if font.size(word)[0] >= rect.width:
raise TextRectException("The word " + word + " is too long to fit in the rect passed.")
accumulated_line = ""
for word in words:
test_line = accumulated_line + word + " "
if font.size(test_line)[0] < rect.width:
accumulated_line = test_line
else:
final_lines.append(accumulated_line)
accumulated_line = word + " "
final_lines.append(accumulated_line)
else:
final_lines.append(requested_line)
surface = pygame.Surface(rect.size)
surface.fill(background_color)
accumulated_height = 0
for line in final_lines:
if accumulated_height + font.size(line)[1] >= rect.height:
raise TextRectException("After word wrap, the text string was too tall to fit in the provided rect.")
if line != "":
if self.sentence_pos_word in line:
tempsurface = font.render(line[0:line.index(self.sentence_pos_word)], 1, text_color)
tempsurface1 = font.render(self.sentence_pos_word, 1, special_color)
tempsurface2 = font.render(line[line.index(self.sentence_pos_word)+len(self.sentence_pos_word):-1], 1, text_color)
surface.blit(tempsurface, (0, accumulated_height))
surface.blit(tempsurface1, (tempsurface.get_width(), accumulated_height))
surface.blit(tempsurface2, (tempsurface.get_width()+tempsurface1.get_width(), accumulated_height))
else:
tempsurface = font.render(line, 1, text_color)
if justification == 0:
surface.blit(tempsurface, (0, accumulated_height))
elif justification == 1:
surface.blit(tempsurface, ((rect.width - tempsurface.get_width()) / 2, accumulated_height))
elif justification == 2:
surface.blit(tempsurface, (rect.width - tempsurface.get_width(), accumulated_height))
else:
raise TextRectException("Invalid justification argument: " + str(justification))
accumulated_height += font.size(line)[1]
return surface
def new_user(self):
self.pos_list = ["noun","verb","adjective","adverb","conjunction","preposition","proper noun","interjection","possessive pronoun","pronoun","article","possessive noun"]
sentence_list = [["The English rock band <NAME> released the album Dark Side of the Moon in 1973.",[["band","noun"],["album","noun"],["released","verb"],["Dark","adjective"]]],
["In 1969, NASA astronauts on the Apollo 10 space mission heard what sounded like outer-space music.",[["outer-space","adjective"],["sounded","verb"],["on","pre"],["mission","noun"]]],
["Nearly 50 years later, those mysterious noises on Apollo 10 have become a trending topic on social media.",[["topic","noun"],["noises","noun"],["mysterious","adjective"],["trending","adjective"]]],
["In May of 1969, the Apollo 10 crew approached the far side of the moon.",[["crew","noun"],["May","proper noun"],["approached","verb"],["far","adjective"]]],
["The three astronauts, <NAME>, <NAME>, and <NAME>, would have no contact with the Earth.",[["astronauts","noun"],["<NAME>","proper noun"],["<NAME>","proper noun"],["three","adjective"]]],
["But just as they approached the far side of the moon, the crew heard strange music.",[["moon","noun"],["strange","adjective"],["music","noun"],["they","pronoun"]]],
["That was the conversation between astronauts <NAME> and <NAME> after hearing the strange sounds.",[["conversation","noun"],["hearing","verb"],["after","con"],["and","con"]]],
["The story of the unusual space noises will appear on a television series called NASA's Unexplained Files.",[["television","noun"],["unusual","adjective"],["story","noun"],["appear","verb"]]],
["A preview of the episode appeared on YouTube earlier this week.",[["week","noun"],["appeared","verb"],["YouTube","proper noun"],["on","preposition"]]],
["Some news reports and social media posts wrongly said that the Apollo 10 audio files were recently made public.",[["files","noun"],["and","conjunction"],["said","verb"],["recently","adverb"]]],
["There has been praise and condemnation for Hollywood star Sean Penn this week.",[["praise","noun"],["Sean Penn","proper noun"],["and","conjunction"],["been","verb"]]],
["Sean Penn has had controversial moments throughout his public life.",[["life","noun"],["has","verb"],["his","possessive pronoun"],["throughout","preposition"]]],
["This week, the public learned that Penn met with the leader of a brutal Mexican crime group.",[["group","noun"],["learned","verb"],["brutal","adjective"],["with","preposition"]]],
["The actor interviewed <NAME> in October 2015.",[["actor","noun"],["interviewed","verb"],["October","proper noun"],["<NAME>","proper noun"]]],
["Guzman had escaped from a Mexican federal prison in July.",[["prison","noun"],["federal","adjective"],["in","preposition"],["escaped","verb"]]],
["It was his second escape.",[["escape","noun"],["second","adjective"],["It","pronoun"],["his","possessive pronoun"]]],
["He was on the lam from 2001 to 2014 after the first prison break.",[["break","noun"],["He","pronoun"],["first","adjective"],["prison","adjective"]]],
["He was serving a 20-year sentence for murder and drug trafficking.",[["murder","noun"],["20-year","adjective"],["drug","adjective"],["serving","verb"]]],
["Rolling Stone magazine published Penn's interview online January 11.",[["magazine","noun"],["published","verb"],["January","proper noun"],["interview","noun"]]],
["Mexican federal police recaptured El Chapo a day earlier.",[["day","noun"],["recaptured","verb"],["federal","adjective"],["day","noun"]]],
["In his article, Penn explained why he wanted to meet the crime leader.",[["article","noun"],["his","possessive pronoun"],["explained","verb"],["wanted","verb"]]],
["Penn is a longtime political activist.",[["activist","noun"],["Penn","proper noun"],["longtime","adjective"],["political","activist"]]],
["He has involved himself in international affairs at high levels.",[["levels","noun"],["international","adjective"],["high","adjective"],["involved","verb"]]],
["In 2007, he met and befriended <NAME>, who was then the socialist president of Venezuela.",[["president","noun"],["Venezuela","proper noun"],["befriended","verb"],["socialist","adjective"]]],
["The US government was not a fan of Chavez, who allied himself with Cuban leaders Fidel and <NAME>.",[["fan","noun"],["Cuban","adjective"],["allied","verb"],["with","preposition"]]],
["Penn also has met with <NAME>.",[["<NAME>","proper noun"],["met","verb"],["Penn","proper noun"],["has","verb"]]],
["He got involved in the Falklands dispute between Britain and Argentina.",[["dispute","noun"],["the","article"],["involved","verb"],["Argentina","proper noun"]]],
["The actor met with Argentinian President <NAME> in 2012, after Britain made military moves toward Argentina.",[["actor","noun"],["Argentinian","proper noun"],["after","preposition"],["toward","preposition"]]],
["Penn sided with Argentina in that dispute.",[["dispute","noun"],["sided","verb"],["Argentina","proper noun"],["Penn","proper noun"]]],
["Penn has written about his experiences for the news media before.",[["experiences","noun"],["his","possessive pronoun"],["about","preposition"],["news","adjective"]]],
["He visited Iraq in 2004, and Iran the following year.",[["year","noun"],["visited","verb"],["Iran","proper noun"],["He","pronoun"]]],
["He wrote about those experiences for the San Francisco Chronicle.",[["experiences","noun"],["about","preposition"],["San Francisco Chronicle","proper noun"],["for","preposition"]]],
["Penn had been to Iraq earlier to protest international military strikes against the country.",[["strikes","noun"],["Iraq","proper noun"],["to","preposition"],["against","preposition"]]],
["The star also helped in times of natural disaster.",[["star","noun"],["disaster","noun"],["in","preposition"],["natural","adjective"]]],
["He started a foundation to help victims of the earthquake in Haiti.",[["earthquake","noun"],["started","verb"],["help","verb"],["Haiti","proper noun"]]],
["He also went to New Orleans shortly after <NAME> and reportedly rescued some survivors.",[["survivors","noun"],["after","preposition"],["New Orleans","proper noun"],["rescued","verb"]]],
["Wow! In 2012, <NAME> went to Bolivia to visit a US businessman jailed there.",[["businessman","noun"],["Wow!","interjection"],["Bolivia","proper noun"],["visit","verb"]]],
["A Russian spacecraft traveling to the International Space Station Thursday had to make an emergency landing when a rocket engine failed to fire.",[["Russian","adjective"],["emergency","adjective"],["Thursday","proper noun"],["make","verb"]]],
["About two minutes after launching, the three-stage Soyuz booster rocket suffered an unspecified failure of its second stage.",[["failure","noun"],["three-stage","adjective"],["two","adjective"],["rocket","noun"]]],
["The Russian space program has suffered several failures in recent years.",[["years","noun"],["suffered","verb"],["program","noun"],["several","adjective"]]],
["The cosmonauts safely escaped in that accident as well.",[["cosmonauts","noun"],["safely","adverb"],["accident","noun"],["the","article"]]],
["Relations between the U.S. and Russia have been very tense in recent years.",[["Relations","noun"],["very","adverb"],["Russia","proper noun"],["tense","adjective"]]],
["However, the two sides have continued their cooperation in space.",[["space","noun"],["two","adjective"],["sides","noun"],["continued","verb"]]],
["Weather officials believe the storm will pass over North and South Carolina before heading out to the Atlantic Ocean in coming days.",[["before","preposition"],["the","article"],["South Carolina","proper noun"],["heading","verb"]]],
["Wood and metal from destroyed buildings covered streets and moved through flooded neighborhoods.",[["neighborhoods","noun"],["flooded","adjective"],["metal","noun"],["covered","verb"]]],
["<NAME> is the governor of Florida.",[["governor","noun"],["the","article"],["<NAME>","proper noun"],["is","verb"]]],
["Ouch! I just broke my leg.",[["Ouch!","interjection"],["broke","verb"],["my","possessive adjective"],["I","pronoun"]]],
["Holy Moly! I love mole.",[["mole","noun"],["Holy Moly!","interjection"],["I","pronoun"],["love","verb"]]],
["Don't touch Ivan's green pochocuiles.",[["pochocuiles","noun"],["Ivan's","possessive noun"],["green","adjective"],["touch","verb"]]],
["Concha and Briseida quickly run to El Chontal.",[["quickly","adverb"],["and","conjunction"],["El Chontal","proper noun"],["run","verb"]]],
["Diego hungrily swallows the gross taco.",[["taco","noun"],["the","article"],["gross","adjective"],["hungrily","adverb"]]],
["Florizet loudly said 'Oh my God!'.",[["said","verb"],["Oh my God!","interjection"],["Florizet","proper noun"],["loudly","adverb"]]],
["Jennifer hurriedly danced with La Chenta at the Video Bar.",[["hurriedly","adverb"],["the","article"],["Jennifer","proper noun"],["danced","verb"]]],
["Silvia likes to read romantic anime.",[["romantic","adjective"],["anime","noun"],["Silvia","proper noun"],["read","verb"]]],
["Yoaly and Jesus secretly pretend not to see the teacher.",[["not","adverb"],["secretly","adverb"],["and","conjunction"],["pretend","verb"]]],
["More than 98 million of the world's adolescent girls do not go to school.",[["world's","possessive noun"],["adolescent","adjective"],["not","adverb"],["go","verb"]]],
["<NAME> had been jailed for months and never charged with a crime.",[["months","noun"],["jailed","verb"],["<NAME>","proper noun"],["with","preposition"]]]]
self.c.execute('select * from users order by random() limit 1;')
userdata = self.c.fetchone()
self.username = str(userdata[1])
self.sentence = sentence_list[random.randint(0,len(sentence_list)-1)]
self.plain_sentence = self.sentence[0]
sentence_pos = self.sentence[1][random.randint(0,len(sentence_list[1])-1)]
self.sentence_pos_word = sentence_pos[0]
self.sentence_pos_wordIND = sentence_pos[1]
answerlist = random.sample(self.pos_list,3)
while sentence_pos[1] in answerlist:
answerlist = random.sample(self.pos_list,3)
answerlist.append(sentence_pos[1])
random.shuffle(answerlist)
self.frag0 = answerlist[0]
self.frag1 = answerlist[1]
self.frag2 = answerlist[2]
self.frag3 = answerlist[3]
self.display.fill(PartsOfSpeech.WHITE)
self.rendered_text = self.special_render_textrect(self.plain_sentence, self.my_font, self.my_rect, PartsOfSpeech.BLACK, PartsOfSpeech.PURPLE, PartsOfSpeech.WHITE, 1)
self.rendered_text_user = self.render_textrect("SCORE: {0}, TIME: {1}".format(self.score, round(self.start_time - self.seconds, 0)), self.my_font, self.my_rect_user, PartsOfSpeech.BROWN, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, PartsOfSpeech.RED, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, PartsOfSpeech.YELLOW, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, PartsOfSpeech.GREEN, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, PartsOfSpeech.BLUE, PartsOfSpeech.WHITE, 0)
self.display.blit(self.rendered_text, self.my_rect.topleft)
self.display.blit(self.rendered_text_user, self.my_rect_user.topleft)
self.display.blit(self.rendered_text_frag_1, self.my_rect_frag_1.topleft)
self.display.blit(self.rendered_text_frag_2, self.my_rect_frag_2.topleft)
self.display.blit(self.rendered_text_frag_3, self.my_rect_frag_3.topleft)
self.display.blit(self.rendered_text_frag_4, self.my_rect_frag_4.topleft)
pygame.display.update()
return
def refresh_screen(self, fragment):
if fragment == self.sentence_pos_wordIND:#winner!
self.score += 1
self.display.fill(PartsOfSpeech.WHITE)
rendered_text = self.special_render_textrect(self.plain_sentence, self.my_font, self.my_rect, PartsOfSpeech.BLACK, PartsOfSpeech.PURPLE, PartsOfSpeech.WHITE, 1)#need to figure out how to bold or color the word we want
rendered_text_user = self.render_textrect("SCORE: {0}, TIME: {1}".format(self.score, round(self.start_time - self.seconds, 0)), self.my_font, self.my_rect_user, PartsOfSpeech.BROWN, PartsOfSpeech.WHITE, 0)#last 0 is to left align
if self.frag0 == fragment:
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, PartsOfSpeech.GREEN, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
elif self.frag1 == fragment:
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, PartsOfSpeech.GREEN, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
elif self.frag2 == fragment:
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, PartsOfSpeech.GREEN, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
elif self.frag3 == fragment:
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, PartsOfSpeech.GREEN, PartsOfSpeech.WHITE, 0)
else:
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.display.blit(self.rendered_text, self.my_rect.topleft)
self.display.blit(self.rendered_text_user, self.my_rect_user.topleft)
self.display.blit(self.rendered_text_frag_1, self.my_rect_frag_1.topleft)
self.display.blit(self.rendered_text_frag_2, self.my_rect_frag_2.topleft)
self.display.blit(self.rendered_text_frag_3, self.my_rect_frag_3.topleft)
self.display.blit(self.rendered_text_frag_4, self.my_rect_frag_4.topleft)
pygame.display.update()
self.sound_win.play()
return
if fragment != self.sentence_pos_wordIND:#loser
self.score -= 1
self.display.fill(PartsOfSpeech.WHITE)
self.rendered_text = self.special_render_textrect(self.plain_sentence, self.my_font, self.my_rect, PartsOfSpeech.BLACK, PartsOfSpeech.PURPLE, PartsOfSpeech.WHITE, 1)
self.rendered_text_user = self.render_textrect("SCORE: {0}, TIME: {1}".format(self.score, round(self.start_time - self.seconds, 0)), self.my_font, self.my_rect_user, PartsOfSpeech.BROWN, PartsOfSpeech.WHITE, 0)
if self.frag0 == fragment:
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, PartsOfSpeech.RED, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
elif self.frag1 == fragment:
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, PartsOfSpeech.RED, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
elif self.frag2 == fragment:
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, PartsOfSpeech.RED, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
elif self.frag3 == fragment:
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, PartsOfSpeech.RED, PartsOfSpeech.WHITE, 0)
else:
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, PartsOfSpeech.BLACK, PartsOfSpeech.WHITE, 0)
self.display.blit(self.rendered_text, self.my_rect.topleft)
self.display.blit(self.rendered_text_user, self.my_rect_user.topleft)
self.display.blit(self.rendered_text_frag_1, self.my_rect_frag_1.topleft)
self.display.blit(self.rendered_text_frag_2, self.my_rect_frag_2.topleft)
self.display.blit(self.rendered_text_frag_3, self.my_rect_frag_3.topleft)
self.display.blit(self.rendered_text_frag_4, self.my_rect_frag_4.topleft)
pygame.display.update()
self.sound_loss.play()
return
self.display.fill(PartsOfSpeech.WHITE)
self.rendered_text = self.special_render_textrect(self.plain_sentence, self.my_font, self.my_rect, PartsOfSpeech.BLACK, PartsOfSpeech.PURPLE, PartsOfSpeech.WHITE, 1)
self.rendered_text_user = self.render_textrect("SCORE: {0}, TIME: {1}".format(self.score, round(self.start_time - self.seconds, 0)), self.my_font, self.my_rect_user, PartsOfSpeech.BROWN, PartsOfSpeech.WHITE, 0)#last 0 is to left align
self.rendered_text_frag_1 = self.render_textrect(self.frag0, self.my_font, self.my_rect_frag_1, PartsOfSpeech.RED, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_2 = self.render_textrect(self.frag1, self.my_font, self.my_rect_frag_2, PartsOfSpeech.YELLOW, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_3 = self.render_textrect(self.frag2, self.my_font, self.my_rect_frag_3, PartsOfSpeech.GREEN, PartsOfSpeech.WHITE, 0)
self.rendered_text_frag_4 = self.render_textrect(self.frag3, self.my_font, self.my_rect_frag_4, PartsOfSpeech.BLUE, PartsOfSpeech.WHITE, 0)
self.display.blit(self.rendered_text, self.my_rect.topleft)
self.display.blit(self.rendered_text_user, self.my_rect_user.topleft)
self.display.blit(self.rendered_text_frag_1, self.my_rect_frag_1.topleft)
self.display.blit(self.rendered_text_frag_2, self.my_rect_frag_2.topleft)
self.display.blit(self.rendered_text_frag_3, self.my_rect_frag_3.topleft)
self.display.blit(self.rendered_text_frag_4, self.my_rect_frag_4.topleft)
pygame.display.update()
return
def run(self):
self.start_ticks = pygame.time.get_ticks()# not needed
while not self.finished:
self.seconds = (pygame.time.get_ticks() - self.start_ticks) / 1000
if 60 - self.seconds <= 0:
print(self.score)
self.finished = True
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.conn.close()
self.finished = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == pygame.K_SPACE:
self.new_user()
if event.key in (pygame.K_a,pygame.K_e,pygame.K_i,pygame.K_m,pygame.K_q,pygame.K_u):
if self.frag0 != "":
self.refresh_screen(self.frag0)
else:
pass
if event.key in (pygame.K_b,pygame.K_f,pygame.K_j,pygame.K_n,pygame.K_r,pygame.K_v):
if self.frag1 != "":
self.refresh_screen(self.frag1)
else:
pass
if event.key in (pygame.K_c,pygame.K_g,pygame.K_k,pygame.K_o,pygame.K_s,pygame.K_w):
if self.frag2 != "":
self.refresh_screen(self.frag2)
else:
pass
if event.key in (pygame.K_d,pygame.K_h,pygame.K_l,pygame.K_p,pygame.K_t,pygame.K_x):
if self.frag3 != "":
self.refresh_screen(self.frag3)
else:
pass
pygame.display.update()
pygame.quit()
sys.exit()
if __name__ == '__main__':
new_game = PartsOfSpeech()
new_game.run()
```
#### File: No Peripheral/Alphabet Speaking Game/Alphabet.py
```python
import pygame
import random
import sys
import time
WHITE = (255,255,255)
BLACK = (0,0,0)
GREEN = (57, 255, 20)
YELLOW = (230,230,0)
RED = (255,0,0)
BLUE = (0,0,255)
BROWN = (97,65,38)
soundwinfile = "audio\\ping.ogg"
finished = False
def new_game():
clock = pygame.time.Clock()
previous_time = 0
time_remaining = 60
score = 0
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
points = 0
finished = False
letter = letter_font.render(alphabet[random.randint(0, 25)], 1, RED)
letter_rect = letter.get_rect()
ticks = pygame.time.get_ticks()
while not finished:
if pygame.time.get_ticks() - previous_time >= 1000:
previous_time = pygame.time.get_ticks()
time_remaining -= 1
if time_remaining == 0:
display.fill(WHITE)
score_text = display_font.render("SCORE: " + str(score), 1, BROWN)
display.blit(score_text, (display_width / 2, display_height / 2))
pygame.display.update()
time.sleep(5)
finished = True
time_text = display_font.render(str(time_remaining), 1, BLUE)
score_text = display_font.render(str(score), 1, RED)
display.fill(WHITE)
display.blit(time_text, rect_time)
display.blit(score_text, rect_score)
letter_rect.center = (display_width / 2, display_height / 2)
display.blit(letter, letter_rect)
bartime = pygame.time.get_ticks() - ticks
if bartime < 3000:
pygame.draw.rect(display, GREEN, [30, display_height - 80, abs(1 - (bartime / 5000)) * (display_width - 60), 40])
elif bartime < 4000:
pygame.draw.rect(display, YELLOW, [30, display_height - 80, abs(1 - (bartime / 5000)) * (display_width - 60), 40])
else:
pygame.draw.rect(display, RED, [30, display_height - 80, abs(1 - (bartime / 5000)) * (display_width - 60), 40])
pygame.draw.rect(display, BLACK, [30, display_height - 80, display_width - 60, 40], 5)
if bartime >= 5000:
ticks = pygame.time.get_ticks()
score -= 1
letter = letter_font.render(alphabet[random.randint(0, 25)], 1, RED)
letter_rect = letter.get_rect()
for event in pygame.event.get():
if event.type == pygame.QUIT:
finished = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == pygame.K_SPACE:
new_game()
if event.key == pygame.K_a:
score += int((5000 - bartime) / 10)
soundwin.play()
ticks = pygame.time.get_ticks()
letter = letter_font.render(alphabet[random.randint(0, 25)], 1, RED)
letter_rect = letter.get_rect()
clock.tick(30)
pygame.display.update()
pygame.quit()
sys.exit()
pygame.init()
pygame.mixer.init()
soundwin = pygame.mixer.Sound(soundwinfile)
display = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
display_width = display.get_width()
display_height = display.get_height()
display_font = pygame.font.Font(None, 48)
letter_font = pygame.font.Font(None, 800)
rect_score = pygame.Rect((display_width - 120, 50, 120, 50))
rect_time = pygame.Rect((display_width - 120, 0, 50, 50))
display.fill(WHITE)
pygame.display.update()
while not finished:
for event in pygame.event.get():
if event.type == pygame.QUIT:
finished = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == pygame.K_SPACE:
new_game()
pygame.quit()
sys.exit()
```
|
{
"source": "jgerschler/OpenWeatherMap-Wrappers",
"score": 4
}
|
#### File: jgerschler/OpenWeatherMap-Wrappers/CurrentWeather.py
```python
import json, time
from datetime import datetime
from urllib2 import Request, urlopen, HTTPError# update to use requests module?
class CurrentWeather(object):
"""A class for getting the current US weather using the OpenWeatherMap API."""
def __init__(self, zipcode, api_key):
self.zipcode = zipcode
self.api_key = api_key
def connect(self):# may need to add more error-checking; API results seem to deviate from documentation
"""connects to OpenWeatherMap server and pull weather data."""
request = Request('http://api.openweathermap.org/data/2.5/weather?zip={0},us&APPID={1}'.format(self.zipcode, self.api_key))
try:
data = urlopen(request).read()
except HTTPError:
print("No current weather data available.")
return 0
self.decoded_dict = json.loads(data)
if self.decoded_dict.get('message') == 'not found':
return "Data unavailable"
self.data_collection_time = self.decoded_dict.get('dt', 'unknown')
self.cloud_cover = self.decoded_dict.get('clouds', 'unknown').get('all', 'unknown')
self.city_name = self.decoded_dict.get('name', 'unknown')
self.longitude = self.decoded_dict.get('coord', 'unknown').get('lon', 'unknown')
self.latitude = self.decoded_dict.get('coord', 'unknown').get('lat', 'unknown')
self.country = self.decoded_dict.get('sys', 'unknown').get('country', 'unknown')
self.sunset_time = self.decoded_dict.get('sys', 'unknown').get('sunset', 'unknown')
self.sunrise_time = self.decoded_dict.get('sys', 'unknown').get('sunrise', 'unknown')
self.weather_cond_id = self.decoded_dict.get('weather', 'unknown')[0].get('id', 'unknown')# multiple weather conditions can be included here.
self.weather_group = self.decoded_dict.get('weather', 'unknown')[0].get('main', 'unknown')# currently, we'll just get the primary.
self.weather_description = self.decoded_dict.get('weather', 'unknown')[0].get('description', 'unknown')
try:
self.rain_3h = self.decoded_dict.get('rain', 'unknown').get('3h', 'unknown')
except:
self.rain_3h = 0
self.pressure = self.decoded_dict.get('main', 'unknown').get('pressure', 'unknown')
self.temp_min = self.decoded_dict.get('main', 'unknown').get('temp_min', 'unknown')
self.temp_max = self.decoded_dict.get('main', 'unknown').get('temp_max', 'unknown')
self.temp = self.decoded_dict.get('main', 'unknown').get('temp', 'unknown')
self.humidity = self.decoded_dict.get('main', 'unknown').get('humidity', 'unknown')
self.city_id = self.decoded_dict.get('id', 'unknown')
self.wind_speed = self.decoded_dict.get('wind', 'unknown').get('speed', 'unknown')
self.wind_gust = self.decoded_dict.get('wind', 'unknown').get('gust', 'unknown')
self.wind_direction = self.decoded_dict.get('wind', 'unknown').get('deg', 'unknown')
return 1
def record_data(self, interval, filename, max_data_points):
"""periodically connect to API and write results to CSV file."""
datafile = open(filename, 'w')
datafile.write("zipcode,data_collection_time,cloud_cover,weather_group,weather_description,pressure,temp,humidity,wind_speed,wind_direction\n")
for i in range(max_data_points):
self.connect()
datafile.write(str(self.zipcode)+","+str(self.data_collection_time)+","+str(self.cloud_cover)+","+str(self.weather_group)+","+str(self.weather_description)+","+str(self.pressure)+","+
str(self.temp)+","+str(self.humidity)+","+str(self.wind_speed)+","+str(self.wind_direction)+"\n")
print("Data point {0} recorded").format(str(i+1))
if i < (max_data_points-1):
time.sleep(interval*60)
datafile.close()
def parse(self, data):# for directly parsing data string
"""parses data directly from a string."""
self.decoded_dict = json.loads(data)
self.data_collection_time = self.decoded_dict.get('dt', 'unknown')
self.cloud_cover = self.decoded_dict.get('clouds', 'unknown').get('all', 'unknown')
self.city_name = self.decoded_dict.get('name', 'unknown')
self.longitude = self.decoded_dict.get('coord', 'unknown').get('lon', 'unknown')
self.latitude = self.decoded_dict.get('coord', 'unknown').get('lat', 'unknown')
self.country = self.decoded_dict.get('sys', 'unknown').get('country', 'unknown')
self.sunset_time = self.decoded_dict.get('sys', 'unknown').get('sunset', 'unknown')
self.sunrise_time = self.decoded_dict.get('sys', 'unknown').get('sunrise', 'unknown')
self.weather_cond_id = self.decoded_dict.get('weather', 'unknown')[0].get('id', 'unknown')
self.weather_group = self.decoded_dict.get('weather', 'unknown')[0].get('main', 'unknown')
self.weather_description = self.decoded_dict.get('weather', 'unknown')[0].get('description', 'unknown')
try:
self.rain_3h = self.decoded_dict.get('rain', 'unknown').get('3h', 'unknown')
except:
self.rain_3h = None
self.pressure = self.decoded_dict.get('main', 'unknown').get('pressure', 'unknown')
self.temp_min = self.decoded_dict.get('main', 'unknown').get('temp_min', 'unknown')
self.temp_max = self.decoded_dict.get('main', 'unknown').get('temp_max', 'unknown')
self.temp = self.decoded_dict.get('main', 'unknown').get('temp', 'unknown')
self.humidity = self.decoded_dict.get('main', 'unknown').get('humidity', 'unknown')
self.city_id = self.decoded_dict.get('id', 'unknown')
self.wind_speed = self.decoded_dict.get('wind', 'unknown').get('deg', 'unknown')
self.wind_gust = self.decoded_dict.get('wind', 'unknown').get('gust', 'unknown')
self.wind_direction = self.decoded_dict.get('wind', 'unknown').get('deg', 'unknown')
def get_data_collection_time(self):
"""returns data collection time as tuple (UTC UNIX time, UTC time)"""
return (self.data_collection_time, datetime.fromtimestamp(int(self.data_collection_time)).strftime('%Y-%m-%d %H:%M:%S'))
def get_cloud_cover(self):
"""returns cloud cover as tuple (value, unit)"""
return (self.cloud_cover, "%")
def get_location(self):
"""returns city and country as string"""
return "{0}, {1}".format(self.city_name, self.country)
def get_lat_lon(self):
"""returns latitude and longitude as tuple (latitude, longitude)"""
return (self.latitude, self.longitude)
def get_sunrise(self):
"""returns sunrise time as tuple (UTC UNIX time, UTC time)"""
return (self.sunrise_time, datetime.fromtimestamp(int(self.sunrise_time)).strftime('%Y-%m-%d %H:%M:%S'))
def get_sunset(self):
"""returns sunrise time as tuple (UTC UNIX time, UTC time)"""
return (self.sunset_time, datetime.fromtimestamp(int(self.sunset_time)).strftime('%Y-%m-%d %H:%M:%S'))
def get_weather(self):
"""returns a tuple: (weather group, weather description)"""
return (self.weather_group, self.weather_description)
def get_rain_3h(self):
"""returns the quantity of rain that has fallen in the last 3 hours as tuple (value, unit)"""
return (self.rain_3h, "cm")
def get_pressure(self):
"""returns the current barometric pressure as tuple (value, unit)"""
return (self.pressure, "mmHg")
def get_temp(self):
"""returns the current temperature as tuple (value, unit)"""
return (self.temp, "K")
def get_humidity(self):
"""returns the current humidity as tuple (value, unit)"""
return (self.humidity, "%")
def get_wind_speed(self):
"""returns the current wind speed as tuple (value, unit)"""
return (self.wind_speed, "meters/second")
def get_wind_direction(self):
"""returns the current wind direction as tuple (direction in degrees, cardinal direction)"""
def cardinal_direction(degrees):# OWM API should technically supply cardinal values, but in case it doesn't, we'll calculate them here.
if (degrees <= 360.0 and degrees > 337.5) or (degrees <= 22.5 and degrees > 0.0):
return 'N'
elif degrees <= 67.5 and degrees > 22.5:
return 'NE'
elif degrees <= 112.5 and degrees > 67.5:
return 'E'
elif degrees <= 157.5 and degrees > 112.5:
return 'SE'
elif degrees <= 202.5 and degrees > 157.5:
return 'S'
elif degrees <= 247.5 and degrees > 202.5:
return 'SW'
elif degrees <= 292.5 and degrees > 247.5:
return 'W'
elif degrees <= 337.5 and degrees > 292.5:
return 'NW'
else:
'Unknown'
return (self.wind_direction, cardinal_direction(int(self.wind_direction)))
# need to continue work on sections below -- add units to returned tuples. Also, improve error handling in case these functions are run before connect()
def connect_co(self):# these functions don't currently seem to work very well for locations inside the US
"""connects to OpenWeatherMap carbon monoxide API"""
request = Request('http://api.openweathermap.org/pollution/v1/co/{0},{1}/current.json?appid={2}'.format(round(self.latitude, 1), round(self.longitude, 1), self.api_key))
try:
data = urlopen(request).read()
except HTTPError:
print("No current CO data available.")
return 0
self.decoded_dict = json.loads(data)
if self.decoded_dict.get('message') == 'not found':
return "Data unavailable"
self.co = []
for entry in self.decoded_dict['data']:
self.co.append((entry['pressure'], entry['value'], entry['precision']))
self.co_location = (self.decoded_dict.get('location').get('latitude'), self.decoded_dict.get('location').get('longitude'))
self.co_datetime = self.decoded_dict.get('time')
return 1
def get_co_details(self):
"""returns nested tuple (sampling datetime, (latitude, longitude),
[(pressure, value, precision)])"""
return (self.co_datetime, self.co_location, self.co)
def connect_o3(self):
"""connects to OpenWeatherMap ozone API"""
request = Request('http://api.openweathermap.org/pollution/v1/o3/{0},{1}/current.json?appid={2}'.format(round(self.latitude, 1), round(self.longitude, 1), self.api_key))
try:
data = urlopen(request).read()
except HTTPError:
print("No current O3 data available.")
return 0
self.decoded_dict = json.loads(data)
if self.decoded_dict.get('message') == 'not found':
return "Data unavailable"
self.o3 = self.decoded_dict.get('data')
self.o3_location = (self.decoded_dict.get('location').get('latitude'), self.decoded_dict.get('location').get('longitude'))
self.o3_datetime = self.decoded_dict.get('time')
return 1
def get_o3_details(self):
"""returns nested tuple (sampling datetime, (latitude, longitude), value)"""
return (self.o3_datetime, self.o3_location, self.o3)
def connect_so2(self):
"""connects to OpenWeatherMap sulfur dioxide API"""
request = Request('http://api.openweathermap.org/pollution/v1/so2/{0},{1}/current.json?appid={2}'.format(round(self.latitude, 1), round(self.longitude, 1), self.api_key))
try:
data = urlopen(request).read()
except HTTPError:
print("No current SO2 data available.")
return 0
self.decoded_dict = json.loads(data)
if self.decoded_dict.get('message') == 'not found':
return "Data unavailable"
self.so2 = []
for entry in self.decoded_dict['data']:
self.so2.append((entry['pressure'], entry['value'], entry['precision']))
self.so2_location = (self.decoded_dict.get('location').get('latitude'), self.decoded_dict.get('location').get('longitude'))
self.so2_datetime = self.decoded_dict.get('time')
return 1
def get_so2_details(self):
"""returns nested tuple (sampling datetime, (latitude, longitude),
[(pressure, value, precision)])"""
return (self.so2_datetime, self.so2_location, self.so2)
def connect_no2(self):
"""connects to OpenWeatherMap nitrogen dioxide API"""
request = Request('http://api.openweathermap.org/pollution/v1/no2/{0},{1}/current.json?appid={2}'.format(round(self.latitude, 1), round(self.longitude, 1), self.api_key))
try:
data = urlopen(request).read()
except HTTPError:
print("No current SO2 data available.")
return 0
self.decoded_dict = json.loads(data)
if self.decoded_dict.get('message') == 'not found':
return "Data unavailable"
try:
self.no2_trop = (self.decoded_dict.get('data').get('no2_trop').get('value',0), self.decoded_dict.get('data').get('no2_trop').get('precision',0))
except:
self.no2_trop = (None, None)
try:
self.no2_strat = (self.decoded_dict.get('data').get('no2_strat').get('value',0), self.decoded_dict.get('data').get('no2_strat').get('precision',0))
except:
self.no2_strat = (None, None)
try:
self.no2 = (self.decoded_dict.get('data').get('no2').get('value',0), self.decoded_dict.get('data').get('no2').get('precision',0))
except:
self.no2 = (None, None)
self.no2_location = (self.decoded_dict.get('location').get('latitude'), self.decoded_dict.get('location').get('longitude'))
self.no2_datetime = self.decoded_dict.get('time')
return 1
def get_no2_details(self):
"""returns a nested tuple (sampling datetime, (latitude, longitude),
(trop. no2 value, precision), (strat. no2 value, precision), (no2 value, precision))"""
return (self.no2_datetime, self.no2_location, self.no2_trop, self.no2_strat, self.no2)
if __name__ == '__main__':
print("Suggested uses: Import as a module, or run in an IDE.")
```
|
{
"source": "jgerschler/python-kinect",
"score": 3
}
|
#### File: ESL Game Frameworks/Things in Common/ThingsInCommonFullScreen.py
```python
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
import pygame
import random
TRACKING_COLOR = pygame.color.Color("purple")
HIGHLIGHT_COLOR = pygame.color.Color("red")
BG_COLOR = pygame.color.Color("white")
GAME_TIME = 60# seconds
class BodyGameRuntime(object):
def __init__(self):
pygame.init()
pygame.mixer.init()
self.beep_sound = pygame.mixer.Sound('audio\\beep.ogg')
self.buzz_sound = pygame.mixer.Sound('audio\\buzz.ogg')
self._infoObject = pygame.display.Info()
self._screen = pygame.display.set_mode((self._infoObject.current_w,
self._infoObject.current_h),
pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
pygame.display.set_caption("Kinect Game Framework Test")
self.finished = False
self._clock = pygame.time.Clock()
self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color |
PyKinectV2.FrameSourceTypes_Body)
self._frame_surface = pygame.Surface((self._kinect.color_frame_desc.Width,
self._kinect.color_frame_desc.Height), 0, 32)
self._bodies = None
self.score = 0
self.sentence_dict = {"""I don't like fish.""":["""I don't either.""", """Me neither."""],
"""I don't know how to swim.""":["""I don't either.""", """Me neither."""],
"""I don't like potatoes.""":["""I don't either.""", """Me neither."""],
"""I don't eat donuts at night.""":["""I don't either.""", """Me neither."""],
"""I don't watch scary movies.""":["""I don't either.""", """Me neither."""],
"""I don't yell when I get angry.""":["""I don't either.""", """Me neither."""],
"""I can't ride a bicycle.""":["""I can't either.""", """Me neither."""],
"""I can't swim.""":["""I can't either.""", """Me neither."""],
"""I can't drive a car.""":["""I can't either.""", """Me neither."""],
"""I can't do a pushup.""":["""I can't either.""", """Me neither."""],
"""I can't run faster than a bus.""":["""I can't either.""", """Me neither."""],
"""I can't flap my arms and fly.""":["""I can't either.""", """Me neither."""],
"""I'm not fat.""":["""I'm not either.""", """Me neither."""],
"""I'm not skinny.""":["""I'm not either.""", """Me neither."""],
"""I'm not an angry person.""":["""I'm not either.""", """Me neither."""],
"""I'm not hungry.""":["""I'm not either.""", """Me neither."""],
"""I'm not happy.""":["""I'm not either.""", """Me neither."""],
"""I'm not a purple hippopotamus.""":["""I'm not either.""", """Me neither."""],
"""I like papayas.""":["""I do too.""", """Me too."""],
"""I know how to drive.""":["""I do too.""", """Me too."""],
"""I know how to swim.""":["""I do too.""", """Me too."""],
"""I like oranges.""":["""I do too.""", """Me too."""],
"""I eat cake on Fridays.""":["""I do too.""", """Me too."""],
"""I love comedies.""":["""I do too.""", """Me too."""],
"""I can eat 12 hamburgers at once.""":["""I can too.""", """Me too."""],
"""I can fly an airplane.""":["""I can too.""", """Me too."""],
"""I can write quickly.""":["""I can too.""", """Me too."""],
"""I can run faster than a snail.""":["""I can too.""", """Me too."""],
"""I can calculate faster than a calculator.""":["""I can too.""", """Me too."""],
"""I can swim with my eyes open.""":["""I can too.""", """Me too."""],
"""I'm angry.""":["""I am too.""", """Me too."""],
"""I'm hungry.""":["""I am too.""", """Me too."""],
"""I'm sad.""":["""I am too.""", """Me too."""],
"""I'm busy with work.""":["""I am too.""", """Me too."""],
"""I'm studying right now.""":["""I am too.""", """Me too."""],
"""I'm at school at the moment.""":["""I am too.""", """Me too."""]}
self.reply_list = ["""I don't either.""", """Me neither.""", """I can't either.""",
"""I'm not either.""", """I do too.""", """Me too.""", """I can too.""",
"""I am too.""", """I do either.""", """I can either.""", """I am either.""",
"""I do neither.""", """I can neither.""", """I am neither.""",
"""I am not too.""", """I don't too.""", """I can't too."""]
self._frame_surface.fill((255, 255, 255))
def text_objects(self, text, font):
text_surface = font.render(text, True, (0, 0, 0))
return text_surface, text_surface.get_rect()
def message_display(self, text, loc_tuple, loc_int):
# loc_int: 1 center, 2 top left, 3 bottom left, 4 bottom right, 5 top right
text_surf, text_rect = self.text_objects(text, pygame.font.Font('arial.ttf', 48))
loc_dict = {1:'text_rect.center', 2:'text_rect.topleft', 3:'text_rect.bottomleft',
4:'text_rect.bottomright', 5:'text_rect.topright'}
exec(loc_dict[loc_int] + ' = loc_tuple')
self._frame_surface.blit(text_surf, text_rect)
return text_rect
def draw_ind_point(self, joints, jointPoints, color, highlight_color, rect0, rect1, rect2, joint0):
joint0State = joints[joint0].TrackingState;
if (joint0State == PyKinectV2.TrackingState_NotTracked or
joint0State == PyKinectV2.TrackingState_Inferred):
return
center = (int(jointPoints[joint0].x), int(jointPoints[joint0].y))
if ((rect0.collidepoint(center) and self.filler_replies[0] == self.selected_answer[0]) or
(rect1.collidepoint(center) and self.filler_replies[1] == self.selected_answer[0]) or
(rect2.collidepoint(center) and self.filler_replies[2] == self.selected_answer[0])):
self.score += 1
self.beep_sound.play()
pygame.time.delay(500)
self.new_round()
elif rect0.collidepoint(center) or rect1.collidepoint(center) or rect2.collidepoint(center):
try:
pygame.draw.circle(self._frame_surface, highlight_color, center, 20, 0)
self.buzz_sound.play()
except: # need to catch it due to possible invalid positions (with inf)
pass
else:
try:
pygame.draw.circle(self._frame_surface, color, center, 20, 0)
except:
pass
def update_screen(self, joints, jointPoints, color, highlight_color, seconds):
self._frame_surface.fill(BG_COLOR)# blank screen before drawing points
self.message_display(self.selected_sentence, (300, 900), 2)
rect0 = self.message_display(self.filler_replies[0], (300, 300), 1)
rect1 = self.message_display(self.filler_replies[1], (self._frame_surface.get_width() / 2, 100), 1)
rect2 = self.message_display(self.filler_replies[2], (self._frame_surface.get_width() - 300, 300), 1)
self.message_display(str(self.score), (self._frame_surface.get_width() / 2, 800), 1)
self.message_display(str(seconds), (self._frame_surface.get_width() - 300, 800), 1)
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, rect2, PyKinectV2.JointType_Head)
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, rect2, PyKinectV2.JointType_WristRight)
# may change PyKinectV2.JointType_WristRight to PyKinectV2.JointType_ElbowRight
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, rect2, PyKinectV2.JointType_WristLeft)
def end_game(self):
self._frame_surface.fill(BG_COLOR)
self.message_display("Score: {}".format(self.score), (self._frame_surface.get_width() / 2, self._frame_surface.get_height() / 2), 1)
h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
target_height = int(h_to_w * self._screen.get_width())
surface_to_draw = pygame.transform.scale(self._frame_surface,
(self._screen.get_width(), target_height));
self._screen.blit(surface_to_draw, (0,0))
surface_to_draw = None
pygame.display.update()
pygame.time.delay(3000)
self.run()
def new_round(self):
self.selected_sentence = random.choice(list(self.sentence_dict.keys()))
self.selected_answer = random.sample(self.sentence_dict[self.selected_sentence], 1)
self.filler_replies = random.sample(self.reply_list, 2)
while ((self.sentence_dict[self.selected_sentence][0] in self.filler_replies) or
(self.sentence_dict[self.selected_sentence][1] in self.filler_replies)):
self.filler_replies = random.sample(self.reply_list, 2)
self.filler_replies += self.selected_answer
random.shuffle(self.filler_replies)
print(self.filler_replies)
print(self.selected_answer)
pygame.time.delay(500)
while not self.finished:
seconds = GAME_TIME - int((pygame.time.get_ticks() - self.start_ticks)/1000)
if seconds <= 0:
self.end_game()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.finished = True
if self._kinect.has_new_body_frame():
self._bodies = self._kinect.get_last_body_frame()
if self._bodies is not None:
for i in range(0, self._kinect.max_body_count):
body = self._bodies.bodies[i]
if not body.is_tracked:
continue
joints = body.joints
joint_points = self._kinect.body_joints_to_color_space(joints)
self.update_screen(joints, joint_points, TRACKING_COLOR, HIGHLIGHT_COLOR, seconds)
h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
target_height = int(h_to_w * self._screen.get_width())
surface_to_draw = pygame.transform.scale(self._frame_surface,
(self._screen.get_width(), target_height));
self._screen.blit(surface_to_draw, (0,0))
surface_to_draw = None
pygame.display.update()
self._clock.tick(60)
self.end_game()
def run(self):
self.score = 0
while not self.finished:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.finished = True
if event.type == pygame.KEYUP and event.key == pygame.K_SPACE:
self.start_ticks = pygame.time.get_ticks()
self.new_round()
self._kinect.close()
pygame.quit()
if __name__ == "__main__":
game = BodyGameRuntime()
game.run()
```
|
{
"source": "jgershen/sportsball",
"score": 2
}
|
#### File: extdata/bsbr/scrape_bsbr.py
```python
import pandas
import bs4
from bs4 import BeautifulSoup
import time
import datetime
import json
import os
import re
import requests
from dateutil import parser
import cPickle as pickle
import sys
import argparse
import logging
import progressbar
import numpy as np
from dfs import GLOBAL_ROOT
from dfs.mlb.playerid import name2brefid, brefid_is_pitcher
from dfs.extdata.common.scraper import getSoupFromURL, soupTableToTable, parsedTableToDF
from dfs.extdata.common.io import combine_dataframe_into_pickle_file
# because Pickle -- try to fix this later
sys.setrecursionlimit(50000)
datadir = os.path.join(GLOBAL_ROOT, 'db/mlb/')
# holy crap a baseball-reference id can even have an apostrophe in it
bsbr_id_regex = re.compile("http://www\.baseball-reference\.com/players/\w/(?P<pid>[\w\.\'\d]+)\.shtml")
batting_stats_template = 'http://www.baseball-reference.com/players/gl.cgi?id={playerid}&t=b&year={year}'
pitching_stats_template = 'http://www.baseball-reference.com/players/gl.cgi?id={playerid}&t=p&year={year}'
fielding_stats_template = 'http://www.baseball-reference.com/players/gl.cgi?id={playerid}&t=f&year={year}'
def quicksave_playerdict(players, picklefn):
with open(picklefn, 'wb') as outf:
pickle.dump(players, outf)
def quickload_playerdict(picklefn):
with open(picklefn, 'rb') as inf:
return pickle.load(inf)
def json_save_playerdict(players):
path = os.path.join(datadir, 'mlb_player_data.dict')
logging.info('saving JSON player dict to %s', str(path))
player_archive = {}
for name, k in players.items():
player_archive[name] = {'gamelog_url_list':k['gamelog_url_list'], 'overview_url':k['overview_url'], 'overview_url_content':k['overview_url_content']}
json.dump(player_archive, open(path, 'wb'))
def json_load_playerdict():
with open(os.path.join(os.path.expandvars(datadir), 'mlb_player_data.dict'), 'rb') as f:
json_string = f.read()
player_archive = json.loads(json_string)
for player in player_archive:
if 'overview_url' in player_archive[player] and 'bref_id' not in player_archive[player]:
player_archive[player]['bref_id'] = bsbr_id_regex.match(player_archive[player]['overview_url']).group('pid')
return player_archive
def create_json_file(argv=sys.argv):
ap = argparse.ArgumentParser(description='create JSON dict of player overview data from baseball-reference.com')
ap.add_argument("--picklefile", default=None, help="pickle file to save/load from if necessary")
cfg = ap.parse_args()
logging.basicConfig(filename="logs/scrapebsbr.log", level=logging.INFO)
if cfg.picklefile and os.path.exists(cfg.picklefile):
players = quickload_playerdict(cfg.picklefile)
else:
logging.info('Getting bref IDs of active MLB players from baseball-reference.com...')
bref_ids = get_active_players()
logging.info('Initializing player dictionary...')
players = create_player_dict(bref_ids)
if cfg.picklefile:
quicksave_playerdict(players, cfg.picklefile)
logging.info('Loading overview pages...')
players = load_overview_pages(players)
if cfg.picklefile:
quicksave_playerdict(players, cfg.picklefile)
json_save_playerdict(players)
# We save separate files for batting / pitching data
player_data_types = ['batting', 'pitching']
def get_attr_filename(bref_id, data_type):
fn = os.path.join(datadir, 'player_data', bref_id + '_' + data_type)
return fn
def save_dataframes(players, overwrite=False):
"""
Save the pandas dataframes (the gamelog_data) from each player in players to a file
:param dict[str, dict] players: the player dict
:param bool overwrite: if True, delete old data
:return:
"""
saved_dataframes = 0
for bref_id, attrs in players.iteritems():
for data_type in player_data_types:
if data_type in attrs and attrs[data_type] is not None:
combine_dataframe_into_pickle_file(attrs[data_type], get_attr_filename(bref_id, data_type), overwrite=overwrite)
saved_dataframes += 1
logging.debug('Saved %d dataframes to %s', saved_dataframes, datadir)
def load_dataframes(players):
"""
Load previously saved dataframes of gamelog data
:param dict[str, dict[str, list]] players: the player dict
:return:
"""
loaded = 0
for bref_id in players.keys():
for data_type in player_data_types:
target_file = get_attr_filename(bref_id, data_type)
if os.path.exists(target_file):
with open(target_file, 'r') as inf:
players[bref_id][data_type] = pickle.load(inf)
loaded += 1
logging.debug('loaded %d dataframes from %s', loaded, datadir)
return players
def _parse_bsbr_prefix_section(prefix_section):
# sample contents:
'''
[u'\n',
<b><a href="/players/b/badenbu01.shtml"><NAME></a> 2008-2015</b>,
u'\n',
<a href="/players/b/baderar01.shtml"><NAME></a>,
u' 1904-1904\n',
<a href="/players/b/baderlo01.shtml"><NAME></a>,
u' 1912-1918\n',
<a href="/players/b/badgrre01.shtml"><NAME></a>,
u' 1929-1930\n']
'''
# the structure of the data makes this a little annoying to parse; we need state-based parsing
last_player = None
last_url = None
curr_year = datetime.date.today().year
is_playing_regex = re.compile('\d+-%d' % curr_year) #regex to find if years player was playing include this year
child_list = prefix_section.children
for element in child_list:
is_tag = isinstance(element, bs4.element.Tag)
if is_tag and element.name == 'b':
# Currently active player -- wrapped in <b> tag
for pl in element.findAll('a'):
if pl.attrs['href'].startswith('/players/'):
player_url = 'http://www.baseball-reference.com' + pl.attrs['href']
bref_id = bsbr_id_regex.match(player_url).group('pid')
yield (bref_id, player_url)
continue
elif is_tag and element.name != 'a':
# I have no idea what this is. Skip it.
continue
elif is_tag: # we know that this is an <a> tag
# Player not wrapped in <b> tag. BSBR doesn't think he's active but maybe he was this year.
# We will parse it and wait to see what years he played
last_url = 'http://www.baseball-reference.com' + element.attrs['href']
last_player = bsbr_id_regex.match(last_url).group('pid')
continue
elif last_player is None: # this is not a tag
# Not currently parsing a player and encountered a string; just skip it
continue
else:
if is_playing_regex.search(element):
yield (last_player, last_url)
last_player = None
last_url = None
continue
def get_active_players():
letters = list('abcdefghijklmnopqrstuvwxyz')
player_and_url_list = []
print 'Checking currently active players on baseball-reference.com...'
pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()])
for letter in pbar(letters):
letter_page = getSoupFromURL('http://www.baseball-reference.com/players/%s/' % (letter))
# we don't just need active players (<b> tags), we need anyone who played in 2015!
prefix_sections = letter_page.findAll('pre')
for section in prefix_sections:
player_and_url_list += list(_parse_bsbr_prefix_section(section))
bref_id_dict = dict(player_and_url_list)
return bref_id_dict
def create_player_dict(bref_ids):
'''
Initialize the player dictionary.
:param dict [str, str] bref_ids: bref_id to URL dictionary
:return:
'''
players = {}
for bref_id, url in bref_ids.iteritems():
players[bref_id] = {'overview_url':url}
players[bref_id]['overview_url_content'] = None
players[bref_id]['gamelog_url_list'] = []
players[bref_id]['gamelog_data'] = None
return players
def load_overview_pages(players):
pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()])
print 'Accessing and parsing overview pages...'
for i, (bref_id, player_dict) in pbar(list(enumerate(players.items()))):
if players[bref_id]['overview_url_content'] is None:
overview_soup = getSoupFromURL(players[bref_id]['overview_url'])
players[bref_id]['overview_url_content'] = overview_soup.text
# the links to each year's game logs are in <li> tags, and the text contains 'Game Logs'
# so we can use those to pull out our urls.
game_log_links = []
for li in overview_soup.find_all('li'):
if 'Game Logs' in li.getText():
game_log_links = li.findAll('a')
for game_log_link in game_log_links:
players[bref_id]['gamelog_url_list'].append('http://www.baseball-reference.com' + game_log_link.get('href'))
return players
def dfFromGameLogURLList(gamelogs):
"""Takes a list of game log urls and returns a concatenated DataFrame"""
return pandas.concat([dfFromGameLogURL(g) for g in gamelogs])
def dfFromGameLogURL(url):
"""Takes a url of a player's game log for a given year, returns a DataFrame"""
glsoup = getSoupFromURL(url)
if not glsoup: #Hmm, this really shouldn't happen?
logging.warning("No soup parsed from %s", url)
return None
stats_table = glsoup.findAll('table', attrs={'class': 'stats_table'}) # id for reg season table
# parse the table header. we'll use this for the creation of the DataFrame
header = []
if not stats_table:
return None
for th in stats_table[0].find("thead").findAll('th'):
if not th.getText() in header:
header.append(th.getText())
# add in headers for home/away and w/l columns. a must to get the DataFrame to parse correctly
header[5] = u'HomeAway'
year = url[-4:] if re.search('(?P<year>\d+)$', url) else datetime.datetime.today().year
date_column = header.index("Date")
# turn soup of the table into a list o' lists
stats_table = soupTableToTable(stats_table)
# Run cleanup for MLB tables on baseball-reference.com -- turn dates into actual dates.
for row_ix in range(len(stats_table)):
raw_date = stats_table[row_ix][date_column]
# Remove non-ASCII characters from the date str and replace with single spaces
# (sometimes the space between month and day is a whacky unicode char; thx baseball-reference.)
raw_date = re.sub(r'[^\x00-\x7F]+',' ', raw_date)
# Ignore if the game was suspended and resumed later
raw_date = re.sub(r'susp','',raw_date)
if '(' not in raw_date and len(raw_date):
stats_table[row_ix][date_column] = parser.parse(raw_date + ' ' + str(year))
elif raw_date:
# This is a doubleheader! Assign doubleheaders to "hours".
# This doesn't do anything smart, except keep the data indexed by separate values so that
# it could conceivably be retrieved later.
dateparts = re.match("(?P<month>\w+) (?P<day>\d+) ?\((?P<gameno>\d+)\)", raw_date)
assembled_date = parser.parse(dateparts.group("month") + " " + dateparts.group("day") + " " +
dateparts.group("gameno") + ":00" + " " + str(year))
stats_table[row_ix][date_column] = assembled_date
else:
# There's not a date here -- it's probably the EOY summary row.
# It could also be a trade notification? Either way, ignore it.
continue
# Discard EOY summary row
stats_table = stats_table[:-1]
# Remove any rows which contain "Player went from" -- trade notifications sneaking in there
stats_table = filter(lambda row: not any(isinstance(cell, basestring) and cell.startswith('Player went from') for cell in row), stats_table)
# Use common function to turn our cleaned-up stats table into a dataframe
parsed_df = parsedTableToDF(stats_table, header, date_index=date_column)
return parsed_df
def get_active_years(player_url_list):
''' We're not doing the old-style scraping of each URL we find on the player page; instead we'll
use load_player_dataframes and manually pass in which years to look up. But rather than rewrite
the entire scraper I'm just going to hack the years off of the previous URLs.
New: it's also possible/likely that one of the player urls ends without a year.
To avoid scraping the same year twice, I used to remove this (and only keep years which are explicitly called out
in the player_url_list).
There seem to be some players for whom this doesn't work, though -- I suspect this is b/c they played their
first current year game after the most recent update to their gamelog URL list. So we will always include the
current year as well, if there is a link without a year included
:param list[basestring] player_url_list: list of urls for the player
:return list[basestring]: list of years that the player was active
'''
# Is this actually a player url?
actual_player_url_re = 'http://www.baseball-reference.com/players/gl.cgi\?.+'
valid_player_urls = filter(lambda x: re.match(actual_player_url_re, x) is not None, player_url_list)
year_re = re.compile('http://www.baseball-reference.com/players/gl.cgi\?id=([\w\.\d]+)&t=\w&year=(?P<year>\d+)')
def get_year(url):
match = year_re.match(url)
return match.group('year') if match else None
year_url_parts = map(get_year, valid_player_urls)
if None in year_url_parts:
year_url_parts.remove(None)
year_url_parts.append(str(datetime.date.today().year))
return list(set(year_url_parts))
def load_player_dataframes(bref_id, year='', player_is_pitcher=False):
batting_log_page = batting_stats_template.format(playerid=bref_id, year=year)
batting_df = dfFromGameLogURL(batting_log_page)
pitching_df = None
if player_is_pitcher:
pitching_log_page = pitching_stats_template.format(playerid=bref_id, year=year)
pitching_df = dfFromGameLogURL(pitching_log_page)
return batting_df, pitching_df
def cli_load_player():
ap = argparse.ArgumentParser(description='scrape detailed player game log from baseball-reference.com & save')
ap.add_argument("--player", default=None, help="player to load (if unspecified, load someone unloaded at random")
cfg = ap.parse_args()
logging.basicConfig(filename="logs/scrapebsbr.log", level=logging.INFO)
players = json_load_playerdict()
players = load_dataframes(players)
if not cfg.player: # unspecified? OK find someone we haven't loaded yet
for p in players.keys():
# Is there any data type unloaded for this guy
if not set(player_data_types).intersection(players[p].keys()):
cfg.player = p
break
else:
logging.info('Data loaded for all players.')
return
# Did we actually find the player?
if cfg.player not in players:
logging.warning("Couldn't find player '%s'", cfg.player)
return
players = scrape_player(players, cfg.player)
save_dataframes(players)
def scrape_player(players, bref_id):
"""
Scrape all games for a player and add the parsed dataframes to the player dict provided.
:param players: player dict
:param bref_id: player bref_id of a player
:return:
"""
# found name, load player data
logging.info('Loading player data for %s...', bref_id)
# Load bref id for player
try:
player_is_pitcher = brefid_is_pitcher(bref_id)
except KeyError:
# player bref id not present in our lookup mapping
# probably no one cares about this guy anyway right.
logging.info('Player info for %s not found; not loading him' % bref_id)
return players
dataframes = []
for year in get_active_years(players[bref_id]['gamelog_url_list']):
dataframes.append(load_player_dataframes(bref_id, year, player_is_pitcher))
# Combine the dataframes for each year.
dataframes_by_type = zip(*dataframes)
# Strip out "None" dataframes, where the player recorded no stats of this type this year.
dataframes_by_type = map(lambda df_list: filter(lambda df: df is not None, df_list), dataframes_by_type)
# Concatenate the dataframes (when we have stats for the given type)
# If the player's not a pitcher they're all None for pitching, so return None if we have no stats of that type for
# any year at all!
built_dataframes = map(lambda l: pandas.concat(l) if len(l) else None, dataframes_by_type)
for df in built_dataframes:
assert df is None or not df.duplicated().any()
# TODO(jgershen): if we start pulling stats for, say, fielding, need to update here and in load_player_dataframes
players[bref_id]['batting'] = built_dataframes[0]
players[bref_id]['pitching'] = built_dataframes[1]
batting_games_loaded = len(built_dataframes[0]) if built_dataframes[0] is not None else 0
pitching_games_loaded = len(built_dataframes[1]) if built_dataframes[1] is not None else 0
logging.info('Loaded %d games batting, %d games pitching for %s.' % (batting_games_loaded, pitching_games_loaded, bref_id))
return players
def add_new_players_to_playerdict():
"""
Look for players new to baseball-reference.
:return:
"""
print 'Checking for players we have never loaded before...'
active_players = get_active_players()
players = json_load_playerdict()
# Find out which players are new!
new_players = {player: overview_page for player, overview_page in active_players.iteritems() if player not in players}
if new_players:
logging.info('Found %d new MLB players since last update!', len(new_players))
new_playerdict = create_player_dict(new_players)
# Load overview pages for those players!
new_playerdict = load_overview_pages(new_playerdict)
# Update the stats page locations for new players
players.update(new_playerdict)
# Save the new player page information to disk
json_save_playerdict(players)
def update_player(players, player_id, year):
"""
Update ONLY the current year's stats for a player
:param dict[str, dict[str, pandas.DataFrame]] players: player dict
:param str player_name: player to refresh stats for
:return dict[str, dict]: players
"""
active_years = get_active_years(players[player_id]['gamelog_url_list'])
assert '2015' in active_years
try:
is_pitcher = brefid_is_pitcher(player_id)
except KeyError:
# still don't care about someone we can't load *any* data for
return players
year_stats = load_player_dataframes(player_id, year, is_pitcher)
year_stats_by_attr = zip(player_data_types, year_stats)
for data_type, stats_df in year_stats_by_attr:
old_data = players[player_id].get(data_type)
if old_data is None and stats_df is not None:
players[player_id][data_type] = stats_df
elif stats_df is not None:
new_data = old_data.combine_first(stats_df)
players[player_id][data_type] = new_data
if players[player_id].get(data_type) is not None:
# Drop NaT values in the index (from e.g. trades) here, if applicable
df = players[player_id][data_type]
if df.index.hasnans:
df = df.loc[df.index.drop_duplicates().drop(np.nan)]
players[player_id][data_type] = df
return players
def cli_update_players():
ap = argparse.ArgumentParser(description='update cached stats from baseball-reference.com')
ap.add_argument("--year", type=int, help="Override stats for the target year instead of this one")
ap.add_argument("--max-players", type=int, help="Only load this many players at most (for testing only!)",
default=0)
ap.add_argument("--specific-player", type=str, help="Load/update only this brefid (also for debugging)")
ap.add_argument("--skip-update", action='store_true', help="Skip the update existing players step")
cfg = ap.parse_args()
logging.basicConfig(filename="logs/scrapebsbr.log", level=logging.INFO)
add_new_players_to_playerdict()
players = json_load_playerdict()
players = load_dataframes(players)
players_to_load = [p for p in players if not len(set(player_data_types).intersection(players[p].keys()))]
if cfg.max_players:
players_to_load = players_to_load[:cfg.max_players]
if cfg.specific_player:
players_to_load = [cfg.specific_player]
if players_to_load:
print 'We have no stats for %d players -- trying to load in full.' % len(players_to_load)
pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()])
for player in pbar(players_to_load):
logging.info('Never loaded player data for %s (crawling now)', player)
scrape_player(players, player)
print 'Done loading players, saving progress!'
save_dataframes(players)
year_to_update = cfg.year or datetime.date.today().year
players_to_update = [x for x in list(players.keys()) if x not in players_to_load]
if cfg.max_players:
players_to_update = players_to_update[:cfg.max_players]
for player in players:
if players[player]:
assert players[player]
if cfg.skip_update:
save_dataframes(players)
return
print 'Updating player stats for %d previously scraped players from baseball-reference.com...' % len(players_to_update)
pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()])
for player in pbar(players_to_update):
# found name, load player data
logging.info('Updating player data for %s...', player)
players = update_player(players, player, year=year_to_update)
save_dataframes(players)
def load_full_gamelogs():
"""
Exportable loading function for using baseball reference data elsewhere in the project
:return:
"""
players = json_load_playerdict()
players = load_dataframes(players)
return players
```
#### File: extdata/numberfire/nba_scraper.py
```python
from argparse import ArgumentParser
from bs4 import BeautifulSoup
import pandas
import simplejson as json
import time
import re
import warnings
import logging
import progressbar
import numpy
from dfs.extdata.common.scraper import getSoupFromURL
from dfs.nba.playerid import name2nbaid
from .io import save_nf_histplayerinfo, save_nf_overview_data, load_nf_histplayerinfo, load_nf_overview_data, save_nf_salary_info
from .utils import parse_terminated_json, pickle_cache_html_helper
import ipdb
import IPython
sport = 'nba' # for shared library functions. could probably do this better.
numberfire_url = 'http://www.numberfire.com/nba/fantasy/full-fantasy-basketball-projections'
nf_player_url = 'http://www.numberfire.com/nba/players/{slug}'
fantasy_sites = [u'fantasy_aces',
u'fantasy_feud',
u'draftster',
u'fantasy_score',
u'draft_kings',
u'draftday',
u'fanduel']
#<table cellspacing="0" cellpadding="0" border="0" class="player-table data-table small">
def load_player_history_table(div_soup):
"""Parse the HTML/Soup table for the numberfire predictions.
Returns a pandas DataFrame
"""
if not div_soup:
return None
rows = div_soup.findAll('tr')
table_header = [x.getText() for x in rows[0].findAll('th')]
table_data = [[x.getText() for x in row.findAll('td')] for row in rows[1:]]
if not table_data:
logging.debug("No predictions found!")
return None
table = pandas.io.parsers.TextParser(table_data,
names=table_header,
index_col=table_header.index('Date'),
parse_dates=True).read()
# Next we want to separate combined projection stats like FGM-A into separate columns for FGM and FGA
dash_cols = [col for col in table.columns if '-' in col]
for col in dash_cols:
name_parts = col.split('-')
series1name = name_parts[0]
series2name = name_parts[0][:-1] + name_parts[1]
series1data = table[col].apply(lambda data: float(data.split('-')[0]))
series2data = table[col].apply(lambda data: float(data.split('-')[1]))
table[series1name] = pandas.Series(data=series1data, name=series1name, index=table.index, dtype=numpy.dtype('float'))
table[series2name] = pandas.Series(data=series2data, name=series2name, index=table.index, dtype=numpy.dtype('float'))
table.drop(dash_cols, axis=1, inplace=True)
return table
def load_player_salary_table(bsp):
""" Load the historical player salaries from the NF_DATA variable on the beautiful soup page
:param BeautifulSoup bsp: beautiful soup of player overview page
:return:
"""
# Extract javascript dict containing salary values, parse as JSON
data = bsp.find_all("script")
regex = re.compile('var NF_DATA = (.*?);')
nf_data_dicts = []
for d in data:
if d.string:
match = regex.search(d.string)
if match:
nf_data_dicts.append(parse_terminated_json(match.group(1)))
if len(nf_data_dicts) != 1:
warnings.warn("found multiple scripts with var NF_DATA... website probably changed")
nf = nf_data_dicts[0]
salaries_dict = nf['dfs_salary_charts']
def parse_site_salary_dict(site_salary_dict): # helper fn to unravel this super-nested json
cols = site_salary_dict['data']['columns'] # the rest is useless graph metadata
prepped = {c[0]: c[1:] for c in cols}
prepped["Date"] = prepped['x']
del prepped['x']
del prepped['FP'] # we already know how many fantasy points they got
df = pandas.DataFrame.from_dict(prepped)
df['Date'] = pandas.to_datetime(df['Date'])
df.set_index('Date', inplace=True)
return df
return pandas.concat([parse_site_salary_dict(salaries_dict[site]) for site in fantasy_sites], axis=1)
def load_stats_tables_from_history_page(url):
"""Load all the prediction tables from a Numberfire history page"""
soup = getSoupFromURL(url)
salary = load_player_salary_table(soup)
projection_months = ['%s-schedule' % month for month in
['October', 'November', 'December', 'January', 'February', 'March', 'April']]
month_tables = []
for month in projection_months:
month_schedule = soup.find('div', attrs={'id': month})
month_table = load_player_history_table(month_schedule)
if month_table is not None:
month_tables.append(month_table)
if month_tables:
all_predictions = pandas.concat(month_tables)
else:
all_predictions = None
return all_predictions, salary
def scrape_numberfire_overview_page(cached_page=None, cache_target=None):
"""
Return the information from the overview page. These should be all stats for current games.
Additionally
:param cached_page:
:param cache_target:
:return DataFrame: current statistics from the overview page
"""
page = pickle_cache_html_helper(numberfire_url, cached_page, cache_target)
bsp = BeautifulSoup(page)
data = bsp.find_all("script")
regex = re.compile('var NF_DATA = (.*?);')
nf_data_dicts = []
for d in data:
if d.string:
match = regex.search(d.string)
if match:
nf_data_dicts.append(parse_terminated_json(match.group(1)))
if len(nf_data_dicts) != 1:
warnings.warn("found multiple scripts with var NF_DATA... website probably changed")
nf = nf_data_dicts[0]
# need to stitch projection info to player / team tables
proj_df = pandas.DataFrame(nf['daily_projections'])
player_df = pandas.DataFrame.from_dict(nf['players'], orient='index')
team_df = pandas.DataFrame.from_dict(nf['teams'], orient='index')
# I'm still not sure what the stuff in the team analytics dataframe is, but, cool?
team_an_df = pandas.DataFrame.from_dict(nf['team_analytics'], orient='index', dtype=float)
joined_df = proj_df.join(player_df, on='nba_player_id')
joined_df = joined_df.join(team_df, on='team_id', lsuffix="_player", rsuffix="_team")
joined_df = joined_df.join(team_an_df, on='team_id', rsuffix="_") # the duplicate fields here are not necessary
# Find the player slugs column. Use it to update our slug mappings
#slug_dict = dict(zip(joined_df["name_player"], joined_df["slug_player"]))
# Update the saved version of the slug dict with any new mappings we've found.
#update_nf_playerslugs(slug_dict)
# EDIT -- slug mappings will be pulled from the overview dataframe if needed
return joined_df
def update_numberfire_history():
# Start by updating our slug dict and overall numberfire player information
overall_stats = scrape_numberfire_overview_page()
save_nf_overview_data(sport, overall_stats)
# We only load & update numberfire slug information for players appearing in the most recent batch of overview data
# and only if we are also able to match this player to a BREF ID. A side effect of this is that we will make no
# predictions for any NBA players who haven't played yet this year.
pids_to_load = []
for ix, row in overall_stats.iterrows():
pid, confidence = name2nbaid(row['name_player'], player_team=row['name_team'], get_confidence=True)
if confidence > 75:
pids_to_load.append((pid, row['slug_player']))
old_predictions = load_nf_histplayerinfo(sport, identifiers_to_load=pids_to_load)
scraped_salaries = {}
new_dataframes, updated_dataframes = 0, 0
print "Scraping updated player predictions from Numberfire..."
pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()])
for pid, slug in pbar(pids_to_load):
time.sleep(1)
player_df, salary_df = load_stats_tables_from_history_page(nf_player_url.format(slug=slug))
old_player_df = old_predictions.get(pid)
if old_player_df is None:
old_predictions[pid] = player_df
new_dataframes += 1
else:
try:
new_data = old_player_df.combine_first(player_df)
old_predictions[pid] = new_data
except ValueError as ex:
ipdb.set_trace()
updated_dataframes += 1
scraped_salaries[pid] = salary_df
logging.info('Saving scraped predictions (%d updated, %d added)', updated_dataframes, new_dataframes)
save_nf_histplayerinfo(sport, old_predictions)
save_nf_salary_info(sport, scraped_salaries)
def scrape_cli():
p = ArgumentParser()
p.add_argument("-v", "--verbose", help="verbose-ish, just print to console")
cfg = p.parse_args()
if cfg.verbose:
logging.getLogger("requests").setLevel(logging.WARNING)
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(filename="logs/scrapenf.log", level=logging.INFO)
update_numberfire_history()
```
#### File: extdata/rotoguru/scraper.py
```python
from __future__ import absolute_import
import bs4
import datetime
import time
import pandas
import progressbar
import logging
import io
from argparse import ArgumentParser
from dateutil import parser
from dfs.extdata.common.scraper import getSoupFromURL
from .rgio import save_rg_salary_info
# MLB parses from CSV -- we should really switch NBA around to doing it this way!
mlb_url_template = 'http://rotoguru1.com/cgi-bin/byday.pl?date={month}{day}&game={game_code}&scsv=1'
nba_url_template = 'http://rotoguru1.com/cgi-bin/hyday.pl?mon={month}&day={day}&game={game_code}'
game_code_dict = {'FanDuel': 'fd'}
nba_position_list = ['PG', 'PF', 'SG', 'SF', 'C']
mlb_position_list = ['P', 'C', '1B', '2B', '3B', 'SS', 'OF']
mlb_position_key = {1: 'P',
2: 'C',
3: '1B',
4: '2B',
5: '3B',
6: 'SS',
7: 'OF'}
def is_actual_player_row(sport, row):
if not isinstance(row.contents[0], bs4.element.Tag):
return False
position_list = nba_position_list if sport == 'nba' else mlb_position_list
for pos in position_list:
if row.contents[0].text == pos:
return True
return False
def parse_player_row(row):
position = row.contents[0].text
name_parts = row.contents[1].text.strip().strip('^').split(',')
name = name_parts[1] + ' ' + name_parts[0]
raw_salary = row.contents[3].text
parsed_salary = raw_salary.strip('$').replace(',', '')
return name, position, parsed_salary
def parse_mlb_csv_from_soup(soup):
magic_header_string = 'Date;GID;MLB_ID;Name;Starter;Bat order;FD posn;FD pts;FD sal;Team;Oppt;dblhdr;'
csv_containing_element = filter(lambda element: magic_header_string in element.text, soup.findAll('p'))[0]
df = pandas.DataFrame.from_csv(io.StringIO(csv_containing_element.text), sep=';', index_col="MLB_ID")
df = df[["Name", "FD posn", "FD sal"]].dropna()
df["Position"] = df["FD posn"].apply(lambda x: mlb_position_key[x])
df["Salary"] = df["FD sal"]
ret = df[["Name", "Position", "Salary"]]
return ret
def load_positions_for_day(sport, game_date, game='FanDuel'):
''' get salaries and positions for eligible players for the given day / fantasy site
:param datetime.datetime game_date:
:param basestring game:
:return:
'''
month, day = game_date.month, game_date.day
url_template = nba_url_template if sport == 'nba' else mlb_url_template
day_part = '%02d' % day
url = url_template.format(month=month, day=day_part, game_code=game_code_dict[game])
soup = getSoupFromURL(url)
if sport == 'nba':
all_rows = soup.findAll('tr')
player_rows = filter(lambda r: is_actual_player_row('nba', r), all_rows)
parsed_rows = map(parse_player_row, player_rows)
day_salaries = pandas.DataFrame.from_records(parsed_rows, columns=['Player', 'Position', 'Salary'])
day_salaries["Salary"] = day_salaries["Salary"].apply(int)
day_salaries["Player"] = day_salaries["Player"].apply(lambda x: x.strip())
day_salaries["Position"] = day_salaries["Position"].apply(lambda x: x.strip())
day_salaries.set_index("Player", inplace=True)
else:
day_salaries = parse_mlb_csv_from_soup(soup)
return day_salaries
def update_salary_history(sport, min_date=None, max_date=None):
min_date = min_date or datetime.datetime.today() - datetime.timedelta(days=1)
max_date = max_date or datetime.datetime.today()
if isinstance(min_date, basestring):
min_date = parser.parse(min_date)
if isinstance(max_date, basestring):
max_date = parser.parse(max_date)
date = min_date
pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()],
maxval=int((max_date-min_date).total_seconds() / (60*60*24)) + 1)
pbar.start()
saved = 0
hit = 0
while date <= max_date:
time.sleep(1)
day_salaries = load_positions_for_day(sport, date)
if len(day_salaries) > 0:
save_rg_salary_info(sport, date, day_salaries)
saved += 1
hit += 1
date += datetime.timedelta(days=1)
pbar.update(value=hit)
pbar.finish()
return saved
def scrape_cli():
p = ArgumentParser()
p.add_argument('sport', help='one of mlb or nba')
p.add_argument("--min-date", default=None, help="First day to scrape (defaults to yesterday)")
p.add_argument("--max-date", default=None, help="Last day to scrape (defaults to today)")
cfg = p.parse_args()
logging.basicConfig(filename="logs/scraperotoguru.log", level=logging.INFO)
print 'Loading salaries for FanDuel games...'
updated_games = update_salary_history(cfg.sport, cfg.min_date, cfg.max_date)
print 'Saved salaries for %d FanDuel gamedays' % updated_games
```
#### File: extdata/sportsbookreview/scrape_nba_odds.py
```python
import pandas as pd
import itertools
import datetime
from dateutil import parser
import progressbar
import logging
from argparse import ArgumentParser
import numpy as np
from dfs.extdata.common.scraper import getSoupFromURL
from .sbrio import save_sbr_odds_info, load_sbr_odds_info
from .common import moneyline_to_implied_odds
from dfs.nba.playerid import team_tla
import IPython, ipdb
logger = logging.getLogger(__name__)
logger.addHandler(logging.FileHandler("logs/scrapeSBR.log"))
logger.setLevel(logging.DEBUG)
nba_template = 'http://www.sportsbookreview.com/nba-basketball/odds-scores/{year}{month}{day}/'
def _get_scoreline_components(team_row):
# Parse the scoreline overunder & odds component
raw_ml = team_row.find('td', {'class': 'tbl-odds-c7'}).text
raw_ml = raw_ml.replace(u'\xbd', '.5') # 1/2 character
if 'N/A' in raw_ml:
return [np.nan, np.nan]
else:
return map(float, raw_ml.split())
def _get_spread_components(team_row):
# Parse the spread and vig.
# this might be exactly the same as _get_scoreline_components, don't tell anyone
raw_ml = team_row.find('td', {'class': 'tbl-odds-c5'}).text
raw_ml = raw_ml.replace(u'\xbd', '.5') # 1/2 character
if 'N/A' in raw_ml:
return [np.nan, np.nan]
else:
return map(float, raw_ml.split())
def parse_odds_table(odds_table):
rows = odds_table.findAll('tr')
team1 = rows[2] # also contains over bet
team2 = rows[3] # also contains under bet
t1name = team1.find('td', {'class': 'tbl-odds-c2'}).text
t2name = team2.find('td', {'class': 'tbl-odds-c2'}).text
t1_current_spread = _get_spread_components(team1)
t2_current_spread = _get_spread_components(team2)
# Get scoreline over / under if present
over_l = _get_scoreline_components(team1)
under_l = _get_scoreline_components(team2)[1:]
# Map sportsbook team names to three-letter abbreviations
t1tla, conf1 = team_tla(t1name, get_confidence=True)
t2tla, conf2 = team_tla(t2name, get_confidence=True)
if conf1 >= 90 and conf2 >= 90:
# Create a row for each team with
# team, moneyline, run over/under, overML, underML
return [[t1tla] + t1_current_spread + over_l + under_l,
[t2tla] + t2_current_spread + over_l + under_l]
else:
logger.warning("Skipping odds for game: %s vs %s", t1name, t2name)
return None
def load_odds_for_day(game_day):
day_part = '%02d' % game_day.day
month_part = '%02d' % game_day.month
url = nba_template.format(year=game_day.year, month=month_part, day=day_part)
soup = getSoupFromURL(url)
odds_tables = soup.findAll('table', {'class': 'tbl-odds'})
if len(odds_tables) < 1:
print 'Hit some weird (intermittent?) bug with no odds tables being found. Needs more investigation!'
IPython.embed()
if odds_tables[0].text == u'No games to display':
return None
try:
odds = list(itertools.chain.from_iterable(filter(lambda x: x is not None,
[parse_odds_table(ot) for ot in odds_tables])))
except TypeError:
IPython.embed()
df = pd.DataFrame(odds, columns=['Team', 'spread', 'vig', 'scoreline', 'rl_over', 'rl_under'])
df.set_index('Team', drop=True, inplace=True)
return df
def scrape_nba_odds_range(min_date=None, max_date=None):
min_date = min_date or datetime.datetime.today() - datetime.timedelta(days=1)
max_date = max_date or datetime.datetime.today()
if isinstance(min_date, basestring):
min_date = parser.parse(min_date)
if isinstance(max_date, basestring):
max_date = parser.parse(max_date)
date = min_date
pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()],
maxval=int((max_date-min_date).total_seconds() / (60*60*24)) + 1)
pbar.start()
saved = 0
hit = 0
while date <= max_date:
day_odds = load_odds_for_day(date)
if day_odds is not None and len(day_odds) > 0:
save_sbr_odds_info('nba', date, day_odds)
saved += 1
hit += 1
date += datetime.timedelta(days=1)
pbar.update(value=hit)
pbar.finish()
return saved
def scrape_cli():
p = ArgumentParser()
p.add_argument("--min-date", default=None, help="First day to scrape (defaults to yesterday)")
p.add_argument("--max-date", default=None, help="Last day to scrape (defaults to today)")
cfg = p.parse_args()
logging.basicConfig()
print 'Loading Vegas odds for NBA games from sportsbookreview...'
loaded_count = scrape_nba_odds_range(cfg.min_date, cfg.max_date)
print 'Saved odds for %d gamedays' % loaded_count
```
#### File: dfs/nba/buildteams.py
```python
from __future__ import print_function
from argparse import ArgumentParser
import pandas as pd, numpy as np
from collections import Counter
from dfs.nba.playerid import id2name
from dfs.knapsack.knapsack import solve_all
from dfs.knapsack.heuristics import best_vorp
MAX_PLAYERS_PER_TEAM = 3
import IPython
positions = {'PG': 2, 'SG': 2, 'SF': 2, 'PF': 2, 'C': 1}
def build_team(datafile, salary_col, position_col, prediction_col, cap=60000, legal_teams=None):
"""
Construct teams from a set of prediction data
:param str datafile: saved prediction data (pickle file)
:param str salary_col: name of salary column
:param str position_col: name of position column
:param str prediction_col: name of prediction column to use
:param list[str] legal_teams: an optional list of legal NBA teams for the game
:return pd.DataFrame: prediction data for chosen team
"""
player_data = pd.read_pickle(datafile)
# Load real names for later use
player_data['name'] = player_data['bref_id'].apply(id2name)
if legal_teams:
player_data = player_data[player_data['Tm'].isin(legal_teams)]
# Ditch any undefined rows for salary / position / prediction as they will break the solver
player_data.dropna(subset=[salary_col, position_col, prediction_col], inplace=True)
# Cast player cost column to integers; this will also break the solver! :)
player_data[salary_col] = player_data[salary_col].astype(int)
# an optimization: speed up computation by only keeping the best-projected two players at each (position, salary).
# this should mean we only keep players we could potentially use
# it is hypothetically true that this could burn us if we get hit by the "too many players from team X" consideration
#grouped_player_data = player_data.groupby([salary_col, position_col], sort=False)
# this actually figures out how many players we need at the given position and keeps only that many at each salary level
#candidates = grouped_player_data.apply(lambda group: group.sort(prediction_col).tail(positions[group[position_col].iloc[0]]))
#
# more detailed, even more aggressive sketchier optimization: remove all players which are strictly worse than others
# (all players for whom two players are better and at least as cheap -- or one for centers. I hard coded that to save time)
# this could burn us pretty hard if we run into a team constraint in the end
def dominators(row):
return len(player_data[(player_data['predicted'] > row['predicted'])
& (player_data['salary'] <= row['salary'])
& (player_data['pos'] == row['pos'])])
player_data['dominators'] = player_data.apply(dominators, axis=1)
candidates = player_data[(player_data['dominators'] == 0) |
((player_data['pos'] != 'C') & (player_data['dominators'] <= 1))]
candidates.set_index('bref_id', inplace=True)
while True: # because python doesn't have do... while
best_team = best_vorp(data=candidates,
cost_column=salary_col,
value_column=prediction_col,
type_column=position_col,
required_types=positions,
cap=cap,
debug_print_fn=print)
# Implement an additional constraint -- we can't have more than 4 players from the same team.
# We'll actually be a little stricter and try to restrict it at 3 (see MAX_PLAYERS_PER_TEAM).
teams_of_selection = Counter(candidates.loc[best_team, 'Tm'].values)
most_common_team, count = teams_of_selection.popitem()
if count <= MAX_PLAYERS_PER_TEAM:
return candidates.loc[best_team]
else:
# Nope, this is an illegal team. Try to help us generate a real one by dropping the lowest-valued player
# on the team from the list of possible candidates.
players_on_most_common_team = [c for c in best_team if candidates.loc[c, 'Tm'] == most_common_team]
team_players = candidates.loc[players_on_most_common_team].copy()
team_players['value'] = team_players[prediction_col].divide(team_players[salary_col])
team_players.sort('value', inplace=True)
worst_player = team_players.iloc[0].name
print('Ideal team had %d players from %s. Banning player: %s' % (count, most_common_team, worst_player))
candidates = candidates.drop([worst_player])
def genteam_to_file(outfile, datafile, salary_col, position_col, prediction_col, cap=60000, legal_teams=None):
best_team = build_team(datafile, salary_col, position_col, prediction_col, cap, legal_teams)
best_team.to_pickle(outfile)
def build_teams_cli():
p = ArgumentParser()
p.add_argument("data", help="pickled dataframe containing players positions, salaries, and predictions")
p.add_argument("outfile", help="output: pickled dataframe containing information for selected players")
p.add_argument("--salary", default="salary", help="name of salary column")
p.add_argument("--position", default="pos", help="name of position column")
p.add_argument("--prediction", default="predicted", help="name of predicted column")
p.add_argument("--cap", default=60000, help="salary cap amount")
p.add_argument("--teams", default=None, nargs='+', help="legal NBA teams for game")
cfg = p.parse_args()
genteam_to_file(outfile=cfg.outfile,
datafile=cfg.data,
salary_col=cfg.salary,
position_col=cfg.position,
prediction_col=cfg.prediction,
cap=cfg.cap,
legal_teams=cfg.teams)
```
#### File: dfs/nba/model.py
```python
from argparse import ArgumentParser
import pandas as pd
import numpy as np
import pickle
from sklearn.linear_model import Ridge, LinearRegression, Lasso, ElasticNet
from sklearn.ensemble import RandomForestRegressor
from .attrs import read_attrs
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def build_model(train_file, attr_file, model_out, algorithm='ridge'):
classifiers = ['ridge', 'linear', 'lasso', 'rf', 'en']
if algorithm not in classifiers:
raise NotImplementedError("only implemented algorithms: " + str(classifiers))
train_data = pd.read_pickle(train_file)
attrs = read_attrs(attr_file)
target_attr = attrs[0]
usable_attrs = attrs[1:]
if algorithm == 'ridge':
clf = Ridge()
elif algorithm == 'linear':
clf = LinearRegression()
elif algorithm == 'lasso':
clf = Lasso()
elif algorithm == 'en':
clf = ElasticNet()
else:
clf = RandomForestRegressor()
logger.debug("Modeling '%s'", target_attr)
logger.debug(" train set (%d): %s", len(train_data), train_file)
logger.debug(" Algorithm: %s", algorithm)
if hasattr(clf, 'coef_'):
logger.debug('Coefficients:')
for i,c in enumerate(clf.coef_):
logger.debug(' %-20s' % usable_attrs[i] + ':', '%20.4f' % c)
clf.fit(train_data[usable_attrs], train_data[target_attr])
pickle.dump(clf, open(model_out, 'wb'))
def apply_model(model_file, test_file, attr_file, predictions_out, live=False):
clf = pickle.load(open(model_file, 'rb'))
test_data = pd.read_pickle(test_file)
attrs = read_attrs(attr_file)
target_attr = attrs[0]
usable_attrs = attrs[1:]
# keep the identifier columns which are already present in the test data
identifier_cols = ['bref_id', 'Opp', 'Tm', 'date', 'salary', 'pos']
identifier_cols = [col for col in identifier_cols if col in test_data.columns]
predictions = clf.predict(test_data[usable_attrs])
if live:
prediction_results = test_data[usable_attrs + identifier_cols].copy()
else:
prediction_results = test_data[[target_attr] + usable_attrs + identifier_cols].copy()
prediction_results['predicted'] = predictions
prediction_results.to_pickle(predictions_out)
if not live:
errors = predictions - test_data[target_attr]
logger.info("Predicting '%s'", target_attr)
logger.debug(" test set (%d): %s", len(test_data), test_file)
logger.info(' MSE : %10.4f' % np.mean(errors ** 2))
logger.info(' medSE: %10.4f' % np.median(errors ** 2))
logger.info(' SSE : %10.4f' % np.sum(errors ** 2))
logger.info(' Variance score: %.4f' % clf.score(test_data[usable_attrs], test_data[target_attr]))
def build_model_cli():
p = ArgumentParser()
p.add_argument("train", default="train.pickle", help="training filename")
p.add_argument("test", default="test.pickle", help="test filename")
p.add_argument("attrs", default="attrs.txt", help="attributes to incorporate into model")
p.add_argument("model", default="model.pickle", help="pickle model to this file")
p.add_argument("--predictions-out", default="predictions.pickle", help="save test preedictions/results here")
p.add_argument("--algo", default="ridge", help="modeling algorithm to use")
cfg = p.parse_args()
logger.addHandler(logging.StreamHandler())
build_model(train_file=cfg.train, attr_file=cfg.attrs, model_out=cfg.model, algorithm=cfg.algo)
apply_model(model_file=cfg.model, test_file=cfg.test, attr_file=cfg.attrs, predictions_out=cfg.predictions_out)
```
#### File: dfs/nba/playerid.py
```python
import warnings
from fuzzywuzzy import process as fzmatch
from dfs.extdata.bbr.mapping_df import load_player_table
mapping_df = load_player_table()
unset_index = mapping_df.reset_index()
unset_index.fillna('', inplace=True)
mapping_by_name = unset_index.set_index('name')
mapping_by_id = unset_index.set_index('brefid')
def name2nbaid(player_name, player_team=None, get_confidence=False):
""" Consolidate all fuzzy-matching name lookups into this function alone.
:param str player_name: player name
:return:
"""
tla = None
if player_team is not None:
tla = team_tla(player_team)
usable_mapping = mapping_by_name[mapping_by_name['team'] == tla]
else:
usable_mapping = mapping_by_name
choices = usable_mapping.index
match, score = fzmatch.extractOne(player_name, choices)
if score < 75 and not get_confidence:
# We only warn if we haven't been asked to give our confidence in the match.
# If we are returning confidence, we assume the caller is dealing with it intelligently.
# I think this will be useful for loading fewer garbage stats when matches don't happen.
warnings.warn("Low confidence NBA name match: %s [%s] -> %s (confidence=%d)" % (player_name, tla, match, score))
if get_confidence:
return usable_mapping.loc[match, 'brefid'], score
else:
return usable_mapping.loc[match, 'brefid']
def id2name(player_id):
return mapping_by_id.loc[player_id, 'name']
def team_tla(team, get_confidence=False):
"""
Try to come up with a canonical TLA for the given team. The values we choose from are in mapping_df.
:param str team:
:return str: TLA
"""
if team in _acceptable_tlas: # We can't use _team_tlas here for a shortcut since we ban some bad TLAs.
return team
else:
match, score = fzmatch.extractOne(team, _team_choices.keys())
actual_team = _team_choices[match]
if score < 90 and not get_confidence:
warnings.warn("Low confidence NBA team match: %s -> %s -> %s (confidence=%d)" % (team, match, actual_team, score))
if get_confidence:
return actual_team, score
else:
return actual_team
_team_tlas = set(mapping_df['team'].values)
# add aliases in the form of (TLA, alias)
_team_alias_list = [
('ATL', "Atlanta Hawks"),
('BOS', "Boston Celtics"),
('BRK', "Brooklyn Nets"),
('BRK', "BKN"),
('CHI', "Chicago Bulls"),
('CHO', "Charlotte Hornets"),
('CLE', "CLV"),
('CLE', "Cleveland Cavaliers"),
('DAL', "Dallas Mavericks"),
('DEN', "Denver Nuggets"),
('DET', "Detroit Pistons"),
('GSW', "Golden State Warriors"),
('GSW', "Golden State"),
('HOU', "Houston Rockets"),
('IND', "Indiana Pacers"),
('LAC', "Los Angeles Clippers"),
('LAC', "L.A. Clippers"),
('LAL', "Los Angeles Lakers"),
('LAL', "L.A. Lakers"),
('MEM', "Memphis Grizzlies"),
('MIA', "Miami Heat"),
('MIL', "Milwaukee Bucks"),
('MIN', "Minnesota Timberwolves"),
('NJN', "New Jersey Nets"),
('NOP', 'NOH'),
('NOP', "New Orleans Hornets"), # Force Hornets references to be understood as Pelicans refs
('NOP', "New Orleans Pelicans"),
('NYK', "New York Knicks"),
('OKC', "Oklahoma City Thunder"),
('ORL', "Orlando Magic"),
('PHI', "Philadelphia 76ers"),
('PHO', "Phoenix Suns"),
('POR', "Portland Trail Blazers"),
('SAC', "Sacramento Kings"),
('SAS', "SA"), # SA = San Antonio
('SAS', "San Antonio Spurs"),
('SEA', "Seattle SuperSonics"), # lol
('TOR', "Toronto Raptors"),
('UTA', "Utah Jazz"),
('WAS', "Washington Wizards"),
] + [(tla, tla) for tla in _team_tlas] # also consider that the TLA may have just been misspelled.
# There are some old team TLA's with no active players that I am just banning here to make it easy.
_bad_tlas = ["NOH"]
_filtered_alias_list = filter(lambda (tla, _): tla not in _bad_tlas, _team_alias_list)
_team_choices = {alias: tla for tla, alias in _filtered_alias_list}
_acceptable_tlas = set(_team_choices.values())
def get_position(player_id):
"""
Quick load a player's position by his bref ID
:param str player_id: bref_id
:return str: position
"""
return mapping_by_id.loc[player_id]['pos']
```
#### File: dfs/nba/predict.py
```python
import pandas as pd, numpy as np
from dfs.extdata.bbr.gamelogs import load_all_game_data
from dfs.nba.featurizers import salary_fzr
from dfs.nba.playerid import get_position, team_tla, name2nbaid
from dfs.fanduel2.io import get_game_info, get_loaded_games, read_player_csv
import datetime
from dateutil.parser import parse
def get_eligible_players_df(game_day, fd_game_id=None, guess_historical=False):
"""
Get a dataframe of eligible NBA players on the target day for a particular FanDuel game ID.
:param datetime.datetime game_day: day to grab players for
:param int game_id: FanDuel game ID
:param bool guess_historical: whether to guess who was an "eligible" player that day -- only include anyone who actually played
:return pd.DataFrame: rows for eligible players
"""
parsed_date = parse(game_day)
if not guess_historical:
if fd_game_id is None:
print "Loading possible game IDs for this day..."
loaded_games = get_loaded_games('nba')
loaded_games['gameday'] = loaded_games['gamedate'].apply(lambda d: d.date())
todays_games = loaded_games[loaded_games['gameday'] == parsed_date.date()]
#import IPython
#IPython.embed()
chosen_game = todays_games.iloc[0]
fd_game_id = chosen_game['game_id']
print 'Choosing game', chosen_game['title'], '(ID', fd_game_id, ')'
print ' Starts at:', chosen_game['gameday']
print ' Fee: $', chosen_game['fee']
player_df = read_player_csv('nba', parsed_date, fd_game_id)
player_df['Tm'] = player_df['Team'].apply(team_tla)
player_df['Opp'] = player_df['Opponent'].apply(team_tla)
player_df['date'] = pd.Series(parsed_date, index=player_df.index)
player_df['bref_id'] = player_df.apply(lambda row: name2nbaid(row['First Name'] + ' ' + row['Last Name'], row['Tm']),
axis=1)
player_df['bref_id_confidence'] = player_df.apply(lambda row: name2nbaid(row['First Name'] + ' ' + row['Last Name'], row['Tm'], get_confidence=True)[1],
axis=1)
player_df.rename(columns={'Position': 'pos', 'Salary': 'salary'}, inplace=True)
# Dump anyone we had low confidence in matching by team/name
player_df = player_df[player_df['bref_id_confidence'] >= 75]
# Finally, dump anyone who is injured (todo: keep GTD players?)
safe_players = player_df[player_df['Injury Indicator'].isnull()].copy()
return safe_players
elif guess_historical and parsed_date.date() >= datetime.date.today():
raise ValueError("Can't guess historical players for a game that hasn't happened yet...")
else:
# load historical players from our archive as if it were a game about to be played
game_date = parsed_date.date()
all_games = load_all_game_data()
day_players = all_games[all_games['date'] == game_date]
actual_players = day_players[day_players['MP'] > 0]
return_df = actual_players[['bref_id', 'Opp', 'Tm']].copy()
# set the date to the gameday everywhere
return_df['date'] = pd.Series(parsed_date, index=return_df.index)
# Grab salary information from our scraped numberfire cache
return_df['salary'] = return_df.apply(lambda row: salary_fzr(row), axis=1)
# Assemble player positional information from our gamelogs
return_df['pos'] = return_df['bref_id'].apply(lambda pid: get_position(pid))
return return_df
def dump_eligible_players_df(outfile, *args, **kwargs):
"""
Get a dataframe of eligible NBA players on the target day for a particular FanDuel game ID and save to file
Replaces dumping for the live pipeline.
:param str outfile: save pickle dump to this file
:param datetime.datetime game_day: day to grab players for
:param int game_id: FanDuel game ID
:param bool guess_historical: whether to guess who was an "eligible" player that day -- only include anyone who actually played
:return pd.DataFrame: rows for eligible players
"""
df = get_eligible_players_df(*args, **kwargs)
df.to_pickle(outfile)
```
#### File: dfs/nba/split.py
```python
from argparse import ArgumentParser
import pandas as pd
import numpy as np
from .attrs import read_attrs
DATE_COL = "date"
def strip_and_process_na(data, attrfile, na_strategy, include_target=True):
attrs = read_attrs(attrfile)
if not include_target:
attrs = attrs[1:]
# Keep only the attributes we care about as well as any relevant indexing columns
relevant_columns = attrs
if DATE_COL not in relevant_columns:
relevant_columns.append(DATE_COL)
# This day we can drop NA values without destroying all the data just b/c some irrelevant column is missing
if na_strategy == 'drop':
fixed_data = data.dropna(subset=relevant_columns)
elif na_strategy == 'zero':
only_updated_cols = data[relevant_columns]
nonzero = only_updated_cols.fillna(value=0)
fixed_data = data.copy()
fixed_data.update(nonzero)
else:
raise NotImplementedError("invalid na_strategy")
return fixed_data
def split_data(expanded_data, trainpct, split_randomly):
train_example_count = int(len(expanded_data.index) * trainpct / 100.0)
if split_randomly:
train_indices = np.random.choice(expanded_data.index, size=train_example_count, replace=False)
else:
train_indices = expanded_data.sort(DATE_COL).index[:train_example_count]
train_data = expanded_data.ix[train_indices]
test_data = expanded_data.drop(train_indices)
return train_data, test_data
def strip_and_process_to_files(expanded_file, stripped_file, attrfile, na_strategy, include_target):
data = pd.read_pickle(expanded_file)
stripped_data = strip_and_process_na(data=data,
attrfile=attrfile,
na_strategy=na_strategy,
include_target=include_target)
stripped_data.to_pickle(stripped_file)
def split_to_files(trainfile, testfile, stripped, trainpct, split_randomly):
expanded_data = pd.read_pickle(stripped)
train_data, test_data = split_data(expanded_data=expanded_data,
trainpct=trainpct,
split_randomly=split_randomly)
train_data.to_pickle(trainfile)
test_data.to_pickle(testfile)
def split_cli():
p = ArgumentParser()
p.add_argument("expanded", default="expanded.pickle", help="Expanded pickle file targets.")
p.add_argument("stripped", default="test.pickle", help="stripped data filename")
p.add_argument("train", default="train.pickle", help="training filename")
p.add_argument("test", default="test.pickle", help="test filename")
p.add_argument("attrfile", default="attrs.txt", help="attrs to care about for NA purposes")
p.add_argument("--na-strategy", default="drop", help="what to do with NA rows (default is drop them)")
p.add_argument("--trainpct", default=70, type=int, help="percentage of data to put into training set")
p.add_argument("--random", action='store_true', help="split train/test sets randomly (default is by time)")
cfg = p.parse_args()
strip_and_process_to_files(expanded_file=pd.read_pickle(cfg.expanded),
stripped_file=cfg.stripped,
attrfile=cfg.attrfile,
na_strategy=cfg.na_strategy)
split_to_files(trainfile=cfg.train,
testfile=cfg.test,
stripped=cfg.stripped,
trainpct=cfg.trainpct,
split_randomly=cfg.random)
```
#### File: test/nba/test_expansion.py
```python
import pandas
import pkg_resources
from unittest import TestCase
from dfs.nba.expansion import get_expansion_targets, encode_names, expand_nba_data, discretize_data
class ExpansionTestCase(TestCase):
def setUp(self):
# A little test data from the past few years, useful for testing BREF data
testfn = pkg_resources.resource_filename(__name__, 'test.pickle')
self.data = pandas.read_pickle(testfn)
# More recent test data -- used for testing external data
recentfn = pkg_resources.resource_filename(__name__, 'recent.pickle')
self.recentdata = pandas.read_pickle(recentfn)
# grab only one entry from each for super simple testing
self.ezdata = pandas.concat([self.data.tail(1), self.recentdata.tail(1)])
def test_get_expansion_targets(self):
live_targets = list(get_expansion_targets(expanding_live=False))
old_targets = list(get_expansion_targets())
# Check types
for name, (fn, targets) in live_targets + old_targets:
assert isinstance(name, basestring)
assert hasattr(fn, '__call__')
assert isinstance(targets, list)
for i in targets:
assert isinstance(i, basestring)
def test_encode_names(self):
self.assertDictEqual({"A":"FeatureName:A", "B":"FeatureName:B", "C":"FeatureName:C"},
encode_names("FeatureName",["A","B","C"]))
def test_expansion(self):
# basically just make sure these don't crash
expand_nba_data(self.data, live=False)
expand_nba_data(self.recentdata, live=True)
ez_expand = expand_nba_data(self.ezdata, live=False)
ez_expand_live = expand_nba_data(self.ezdata, live=True)
# this stuff sucks and was always getting killed b/c of data updates
#self.maxDiff = None
#count_dict = {'bref_id': 2, u'FT': 2, 'NF:STL': 1, 'OpponentLast2Weeks:MaxFPPMP': 2, u'3P': 2, u'TOV': 2, 'OpponentLast2Weeks:MaxFPPG': 2, u'Tm': 2, u'GmSc': 2, u'FG': 2, u'3PA': 2, u'DRB': 2, u'Rk': 2, 'NF:BLK': 1, u'Opp': 2, u'AST': 2, u'HomeAway': 0, u'FT%': 1, 'NF:Minutes': 1, u'PF': 2, 'NF:TOV': 1, u'PTS': 2, u'FGA': 2, 'Vegas:Spread': 2, 'OpponentLast2Weeks:AvgFPPG': 2, u'GS': 2, u'G': 2, 'NF:FP': 1, u'STL': 2, 'Last5:PPG': 2, u'Age': 2, u'TRB': 2, u'DFS': 1, u'FTA': 2, u'BLK': 2, 'date': 2, u'FG%': 2, 'OpponentLast2Weeks:AvgFPPMP': 2, 'Vegas:OverUnder': 2, u'+/-': 2, u'WinLoss': 2, 'NF:PTS': 1, 'Target:FDFP': 2, 'NF:REB': 1, 'NF:AST': 1, u'MP': 2, 'NF:PF': 1, 'OpponentLast2Weeks:FPPMP': 2, u'ORB': 2, u'3P%': 2, 'Salary:FanDuel Salary':1}
#self.assertDictEqual(count_dict, ez_expand.count().to_dict())
#live_count_dict = {'bref_id': 2, u'FT': 2, 'NF:STL': 1, 'OpponentLast2Weeks:MaxFPPMP': 2, u'3P': 2, u'TOV': 2, 'OpponentLast2Weeks:MaxFPPG': 2, u'Tm': 2, u'GmSc': 2, u'FG': 2, u'3PA': 2, u'DRB': 2, u'Rk': 2, 'NF:BLK': 1, u'Opp': 2, u'AST': 2, u'HomeAway': 0, u'FT%': 1, 'NF:Minutes': 1, u'PF': 2, 'NF:TOV': 1, u'PTS': 2, u'FGA': 2, 'Vegas:Spread': 2, 'OpponentLast2Weeks:AvgFPPG': 2, u'GS': 2, u'G': 2, 'NF:FP': 1, u'STL': 2, 'Last5:PPG': 2, u'Age': 2, u'TRB': 2, u'DFS': 1, u'FTA': 2, u'BLK': 2, 'date': 2, u'FG%': 2, 'OpponentLast2Weeks:AvgFPPMP': 2, 'Vegas:OverUnder': 1, u'+/-': 2, u'WinLoss': 2, 'NF:PTS': 1, 'NF:PF': 1, 'NF:REB': 1, 'NF:AST': 1, u'MP': 2, 'OpponentLast2Weeks:FPPMP': 2, u'ORB': 2, u'3P%': 2}
#self.assertDictEqual(live_count_dict, ez_expand_live.count().to_dict())
def test_discretization(self):
stadium_series = pandas.Series(data=["Lambeau", "Levis", "Qwest"]) # Pretend this is an expanded field
awesomeness_series = pandas.Series(data=[100, 30, 0]) # this is a continuous field
name_series = pandas.Series(data=["Packers", "49ers", "Seahawks"]) # and this is a not-expanded discrete field
df = pandas.DataFrame.from_dict({"Team:Stadium": stadium_series,
"Awesomeness": awesomeness_series,
"Team Name": name_series})
discretized = discretize_data(df)
# make sure only the expanded discrete fields were discretized
self.assertItemsEqual(["Team:Stadium=Lambeau","Team:Stadium=Levis","Team:Stadium=Qwest","Awesomeness","Team Name"],
discretized.columns)
```
#### File: test/nba/test_fzrs.py
```python
import tempfile
import shutil
import os
import pandas
import numpy as np
import datetime
import pkg_resources
from unittest import TestCase
from dfs.nba.featurizers import feature_generators
from dfs.nba.featurizers import fantasy_points_fzr, last5games_fzr, nf_stats_fzr, vegas_fzr, \
opp_ffpg_fzr, salary_fzr
class FeaturizersTest(TestCase):
def setUp(self):
# A little test data from the past few years, useful for testing BREF data
testfn = pkg_resources.resource_filename(__name__, 'test.pickle')
self.data = pandas.read_pickle(testfn)
# More recent test data -- necessary for testing external data
recentfn = pkg_resources.resource_filename(__name__, 'recent.pickle')
self.recentdata = pandas.read_pickle(recentfn)
def testDataIntegrity(self):
assert len(self.data) == 10
assert self.data.iloc[0]['bref_id'] == 'gallola01'
assert self.data.iloc[9]['bref_id'] == 'dunlemi02'
assert len(self.recentdata) == 10
assert self.recentdata.iloc[0]['bref_id'] == 'barnema02'
assert self.recentdata.iloc[9]['bref_id'] == 'lawsoty01'
def testDecorator(self):
# Make sure the decorator is properly wrapping functions and turning their list outputs into pandas.Series
for func_name in feature_generators:
assert isinstance(func_name, basestring)
wrapper, columns, live = feature_generators[func_name]
output = wrapper(self.data.iloc[0])
self.assertTrue(isinstance(output, pandas.Series))
self.assertItemsEqual(columns, output.index)
def applyFeaturizer(self, fzr_function, expected_output, use_recent=False):
data = self.recentdata if use_recent else self.data
for integer_index, (_, row) in enumerate(data.iterrows()):
actual_output = fzr_function(row)
for i in range(len(expected_output[integer_index])):
# First check if they're both NaN
if np.isnan(expected_output[integer_index][i]) and np.isnan(actual_output.iloc[i]):
continue
self.assertAlmostEqual(expected_output[integer_index][i],
actual_output.iloc[i],
places=3,
msg="Error in row %d item %d of %s. Reference %s, actual output %s." % (
integer_index,
i,
'recentdata' if use_recent else 'data',
expected_output[integer_index][i],
actual_output.iloc[i]
))
def test_fantasy_points_fzr(self):
self.applyFeaturizer(fantasy_points_fzr, [[20.1],
[4.0],
[17.3],
[4.2],
[22.5],
[36.3],
[27.9],
[31.3],
[17.8],
[11.7]])
def test_last5games_fzr(self):
self.applyFeaturizer(last5games_fzr, [[25.1],
[6.78],
[18.78],
[6.26],
[19.24],
[29.56],
[30.74],
[31.36],
[13.94],
[23.72]])
def test_nf_stats_fzr(self):
self.applyFeaturizer(nf_stats_fzr,
[[23.76,6.0,2.7,1.4,0.6,0.2,0.8,1.9,12.14],
[35.97,19.0,6.1,4.0,1.1,0.2,2.1,2.9,32.82],
[23.58,12.9,2.7,1.7,0.7,0.2,1.2,2.4,19.29],
[np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan],
[27.23,10.4,4.4,2.9,0.6,0.3,1.8,2.3,20.03],
[23.39,7.10,3.0,1.0,0.5,0.3,0.6,2.1,13.2],
[24.62,8.1,4.2,1.6,0.6,0.2,1.4,2.4,15.74],
[18.26,9.2,3.0,1.1,0.5,0.4,0.7,1.4,15.55],
[23.38,8.1,3.5,0.9,0.6,0.2,0.8,1.7,14.45],
[35.62,18.8,4,7.5,1.5,0.1,2.8,2.4,35.25]],
use_recent=True)
def test_vegas_fzr(self):
self.applyFeaturizer(vegas_fzr,
[[10.5, 189.5],
[6.5, 199.5],
[9.5, 194.5],
[4.5, 194.0],
[8.5, 195.5],
[-1, 190.5],
[-5, 198],
[2.5, 196.5],
[-19, 200.0],
[-9, 181.0]
])
self.applyFeaturizer(vegas_fzr,
[[9.0, 204.5],
[-6.0, 200.5],
[4.5, 217.5],
[-5.5, 202.5],
[-5.5, 202.5],
[2.0, 195],
[13.0, 195],
[-4.0, 203.5],
[-6.0, 200.5],
[4.5, 217.5]],
use_recent=True)
def test_opp_ffpg_fzr(self):
self.applyFeaturizer(opp_ffpg_fzr,
[[18.389285714285712, 48.0, 0.85816666666666663, 1.1187666671058538, 20.0],
[17.040909090909093, 67.2, 0.76771331058020498, 0.76122548332443785, 2.0055710306406684],
[20.261666666666667, 42.4, 0.85140009104385328, 0.80628334990429773, 1.5840597758405979],
[15.684848484848485, 35.3, 0.71887224832758501, 0.67037347774416234, 1.3499043977055449],
[20.426530612244896, 52.4, 0.83409491798497215, 0.81556700238463165, 1.9865319865319866],
[17.885365853658534, 51.8, 0.7638541666666665, 0.69248549436529994, 1.3061224489795917],
[18.26969696969697, 66.2, 0.83735141954375503, 0.89284459636178026, 10.105263157894738],
[19.694339622641515, 54.6, 0.86982125248260445, 0.80132994567677285, 1.7091633466135459],
[17.863636363636363, 46.4, 0.81874052383653018, 0.80001770931620431, 1.5218658892128281],
[16.608974358974361, 56.2, 0.77021403091557705, 0.7193626173392953, 1.3805774278215222]],
use_recent=False)
def test_salary_fzr(self):
self.applyFeaturizer(salary_fzr, [[3500],
[8200],
[3700],
[np.nan],
[4100],
[3500],
[3500],
[4000],
[3700],
[7100]],
use_recent=True)
```
#### File: test/nba/test_playerid.py
```python
from dfs.nba.playerid import name2nbaid, team_tla, get_position
def test_name2nbaid():
# TODO: test using team lookup here as well?
good_matches = [("<NAME>", "davised01"),
("<NAME>", "collini01"),
("<NAME>", "ibakase01"),
("<NAME>", "snellto01"),
("<NAME>", "mirotni01"),
("<NAME>", "harklma01"),
("<NAME>", "bjeline01"),
("<NAME>", "haywago01"),
("<NAME>", "adamsst01"),
("<NAME>", "gasolpa01"),
("<NAME>", "roberbr01"),
("<NAME>", "millean02"),
("<NAME>", "millsel01"),
("<NAME>", "morroan01"),
("<NAME>", "rubiori01"),
("<NAME>", "whiteha01"),
("<NAME>", "duranke01"),
("<NAME>", "cottobr01"),
("<NAME>", "nowitdi01"),
("<NAME>", "lylestr01")]
for name, match in good_matches:
assert name2nbaid(name) == match
def test_team_lookup():
assert team_tla("LA Lakers") == "LAL"
assert team_tla("Phoenix Suns") == "PHO"
assert team_tla("BRK") == "BRK"
assert team_tla("CLV") == "CLE"
assert team_tla("gsw") == "GSW"
assert team_tla("NOH") == "NOP"
assert team_tla("SAC") == "SAS"
def test_player_position():
assert 'SF' == get_position('jamesle01')
assert 'PG' == get_position('curryst01')
assert 'C' == get_position('duncati01')
assert 'PF' == get_position('couside01')
assert 'SG' == get_position('hardeja01')
```
#### File: test/nba/test_predict.py
```python
from dfs.nba.predict import get_eligible_players_df
from unittest import TestCase
class PredictDayTestCase(TestCase):
def setUp(self):
pass
def test_get_eligible_players_df(self):
players = get_eligible_players_df('2015-11-10', guess_historical=True)
self.assertEqual(163, len(players))
self.assertListEqual(['ibakase01',
'roberan03',
'bassbr01',
'nealga01',
'johnsja01',
'kaminfr01',
'dragigo01',
'brownan02',
'ajincal01',
'loveke01'],
list(players['bref_id'].values[:10]))
# Make sure we have all the columns we need to get started
self.assertItemsEqual(['bref_id', 'Tm', 'Opp', 'date', 'salary', 'pos'], list(players.columns))
```
#### File: sportsball/test/test_mlb_playermatcher.py
```python
from unittest import TestCase
import random
from dfs.mlb.playerid import *
from dfs.extdata.crunchtime import load_latest_mapping
random.seed(0)
class TestMatcher(TestCase):
def setUp(self):
mapping = load_latest_mapping()
# Get all player names from Baseball Reference for testing
self.player_names = filter(lambda x: len(x) > 0, mapping["bref_name"].fillna('').values)
self.player_brefids = filter(lambda x: len(x) > 0, mapping["bref_id"].fillna('').values)
self.mlb2name = {425877: '<NAME>',
444432: '<NAME>',
474384: '<NAME>',
516589: '<NAME>',
429665: '<NAME>'}
self.name2pos = {
'<NAME>' : 'P',
'<NAME>' : 'P',
'<NAME>' : 'P',
'<NAME>' : '1B',
'<NAME>' : 'C',
'<NAME>' : 'P',
'<NAME>' : 'P',
'<NAME>' : 'P',
'<NAME>' : 'SS',
'<NAME>' : 'SS'
}
self.duplicate_named_players = {'<NAME>': [542432, 608070]}
def tearDown(self):
pass
def test_name2position(self):
for ref_name, ref_pos in self.name2pos.iteritems():
assert name2position(ref_name) == ref_pos
def test_name2mlbid(self):
for ref_id, ref_name in self.mlb2name.iteritems():
assert name2mlbid(ref_name) == ref_id
def test_espnid2mlbid(self):
assert espnid2mlbid(29769) == 451109
assert espnid2mlbid(33637) == 592091
assert espnid2mlbid(12459810384670134) is None
def test_brefid2mlbid(self):
assert brefid2mlbid('crowaa01') == 543070
assert brefid2mlbid('cunniaa01') == 488811
assert brefid2mlbid('sldglawlegawe') is None
def test_playerispitcher(self):
for brefid in self.player_brefids:
result = brefid_is_pitcher(brefid)
if type(result) != bool:
print brefid
print result
assert False
```
#### File: sportsball/test/test_simple_projections.py
```python
from mock import MagicMock
import unittest
from dfs.nba.simple_projections import prepare_team_dataframes, simulate_player_stats
from dfs.extdata.bbr.gamelogs import load_gamelogs
class TestProjections(unittest.TestCase):
def setUp(self):
self.playerdata = load_gamelogs()
def tearDown(self):
pass
def testSimPlayerStats(self):
pass
```
|
{
"source": "jgersti/avorion-obj-exporter",
"score": 2
}
|
#### File: src/avorion_obj_exporter/model.py
```python
import vtk
import numpy as np
import pyvista as pv
from avorion_obj_exporter.shapes import getCell
def _getColors(blocks):
helper = vtk.vtkNamedColors()
return np.asarray([helper.HTMLColorToRGBA(f"#{b['color'][2:]}") for b in blocks], dtype=np.uint8)
def _getOrientations(blocks, comp):
return np.array([b['orientation'][comp] for b in blocks], dtype=np.uint64)
def _getMaterials(blocks):
return np.array([b['material'] for b in blocks], dtype=np.uint64)
def _getTypes(blocks):
return np.array([b['type'] for b in blocks], dtype=np.uint64)
def createModel(blocks, merge=False, tolerance=1e-6):
def dist2(x,y):
z = x-y
return np.einsum('...i,...i->...', z, z)
def insert(points, newPoints, indices):
if merge:
pIndices = []
for p in newPoints:
try:
i = next(i for i, q in enumerate(points) if dist2(p, q) < tolerance)
except:
i = len(points)
points.append(p)
finally:
pIndices.append(i)
return np.array(pIndices)
else:
n = len(points)
points.extend(newPoints)
return np.arange(n, n+len(newPoints))
cells = []
offsets = []
points = []
types = []
n = len(blocks)
for i, _block in enumerate(blocks):
_type, _indices, _points = getCell(_block)
pIndices = insert(points, _points, _indices)
if type(_indices) is list:
cell = [[1+len(_indices), len(_indices)]]
for sub in _indices:
cell.extend([[len(sub)], pIndices[sub]])
cell[0][0] += len(sub)
else:
cell = [[len(_indices)], pIndices[_indices]]
cells.extend(np.concatenate(cell))
offsets.append(1 + cell[0][0] + (offsets[-1] if offsets else 0))
types.append(_type)
if pv._vtk.VTK9:
model = pv.UnstructuredGrid(np.asarray(cells), np.asarray(types), np.asarray(points))
else:
model = pv.UnstructuredGrid(np.asarray(offsets), np.asarray(cells), np.asarray(types), np.asarray(points))
data = model.cell_arrays
data['color'] = _getColors(blocks)
data['material'] = _getMaterials(blocks)
data['type'] = _getTypes(blocks)
return model.extract_surface(pass_pointid=False)
```
#### File: src/avorion_obj_exporter/reader.py
```python
import numpy as np
try:
import xml.etree.cElementTree as ET
except:
import xml.etree.ElementTree as ET
from pathlib import Path
from dataclasses import dataclass
@dataclass
class Block:
lower: np.ndarray
upper: np.ndarray
orientation: np.ndarray
type: int
material: int
color: str
def _blockStats(block, legacy=False):
stats = {}
if block.tag == 'block':
attr = block.attrib
stats['orientation'] = np.array([attr['look'], attr['up']], dtype=np.int64)
stats['type'] = int(attr['index'])
stats['material'] = int(attr['material'])
stats['color'] = attr['color']
if legacy:
stats['lower'] = np.array([attr['lowerX'], attr['lowerY'], attr['lowerZ']], dtype=np.float64)
stats['upper'] = np.array([attr['upperX'], attr['upperY'], attr['upperZ']], dtype=np.float64)
else:
stats['lower'] = np.array([attr['lx'], attr['ly'], attr['lz']], dtype=np.float64)
stats['upper'] = np.array([attr['ux'], attr['uy'], attr['uz']], dtype=np.float64)
else:
raise ValueError('invalid tag.')
return stats
def readShipXML (file):
print(f'Reading File: {file}')
tree = ET.parse(str(file))
root = tree.getroot()
format = root.find('.//block[@lx]') == None
blocks = [_blockStats(i, legacy=format) for i in root.findall('.//block' if format else 'plan//block')]
if len(blocks) == 0:
raise IOError(f'Could not read file \'{file}\'.')
return blocks
```
#### File: src/avorion_obj_exporter/shapes.py
```python
import numpy as np
from avorion_obj_exporter.categories import SHAPES
def _rotateReference(points, orientation):
# np.any((a < 1) | (a > 5))
if np.any((orientation < 0) | (orientation > 5)):
raise ValueError(f'Invalid orientation: {orientation}')
o = np.asarray([0.5, 0.5,0.5])
R = np.zeros((3,3))
sign = lambda b: 2*b-1
i, j = orientation // 2
k = 3 - i - j
u, v = sign(orientation % 2)
w = u*v*sign(i < j)*sign(k != 1)
R[i,0] = u
R[j,1] = v
R[k,2] = w
return np.einsum('ij,...j->...i', R, points-o) + o
def _getHexahedron(lower, upper, orientation):
ref = np.asarray([[0, 0, 0],
[0, 0, 1],
[1, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 1, 1],
[1, 1, 1],
[1, 1, 0]])
indices = np.asarray([0, 1, 2, 3, 4, 5, 6, 7])
points = _rotateReference(ref, orientation)
points = np.einsum('...i,i->...i', points, upper-lower) + lower
return '12', indices, points
def _getWedge(lower, upper, orientation):
ref = np.asarray([[0, 0, 0],
[1, 0, 0],
[1, 0, 1],
[0, 0, 1],
[1, 1, 0],
[1, 1, 1]])
indices = np.asarray([3, 2, 5, 0, 1, 4])
points = _rotateReference(ref, orientation)
points = np.einsum('...i,i->...i', points, upper-lower) + lower
return '13', indices, points
def _getPyramid1(lower, upper, orientation):
ref = np.asarray([[0, 0, 0],
[0, 0, 1],
[1, 0, 1],
[1, 0, 0],
[1, 1, 0]])
indices = np.asarray([0, 1, 2, 3, 4])
points = _rotateReference(ref, orientation)
points = np.einsum('...i,i->...i', points, upper-lower) + lower
return '14', indices, points
def _getPyramid2(lower, upper, orientation):
ref = np.asarray([[0, 0, 0],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
indices = np.asarray([0, 1, 2, 3, 4])
points = _rotateReference(ref, orientation)
points = np.einsum('...i,i->...i', points, upper-lower) + lower
return '14', indices, points
def _getTetra1(lower, upper, orientation):
ref = np.asarray([[0, 0, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]])
indices = np.asarray([0, 1, 3, 2])
points = _rotateReference(ref, orientation)
points = np.einsum('...i,i->...i', points, upper-lower) + lower
return '10', indices, points
def _getTetra2(lower, upper, orientation):
ref = np.asarray([[0, 0, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 0]])
indices = np.asarray([0, 1, 2, 3])
points = _rotateReference(ref, orientation)
points = np.einsum('...i,i->...i', points, upper-lower) + lower
return '10', indices, points
def _getTetra3(lower, upper, orientation):
ref = np.asarray([[1, 0, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 0]])
indices = np.asarray([0, 1, 2, 3])
points = _rotateReference(ref, orientation)
points = np.einsum('...i,i->...i', points, upper-lower) + lower
return '10', indices, points
def _getPolyhedron(lower, upper, orientation):
ref = np.asarray([[0, 0, 0],
[0, 0, 1],
[1, 0, 1],
[1, 0, 0],
[0, 1, 0],
[1, 1, 1],
[1, 1, 0]])
indices = [np.asarray([0, 1, 2, 3]),
np.asarray([0, 4, 6, 3]),
np.asarray([2, 3, 6, 5]),
np.asarray([0, 1, 4]),
np.asarray([1, 2, 5]),
np.asarray([4, 5, 6]),
np.asarray([1, 5, 4])]
points = _rotateReference(ref, orientation)
points = np.einsum('...i,i->...i', points, upper-lower) + lower
return '42', indices, points
def getCell(block):
index = block['type']
lower = block['lower']
upper = block['upper']
orientation = block['orientation']
if index in SHAPES['Edge']:
return _getWedge(lower, upper, orientation)
elif index in SHAPES['Corner 1']:
return _getTetra1(lower, upper, orientation)
elif index in SHAPES['Corner 2']:
return _getPolyhedron(lower, upper, orientation)
elif index in SHAPES['Corner 3']:
return _getPyramid1(lower, upper, orientation)
elif index in SHAPES['Twisted Corner 1']:
return _getTetra2(lower, upper, orientation)
elif index in SHAPES['Twisted Corner 2']:
return _getTetra3(lower, upper, orientation)
elif index in SHAPES['Flat Corner']:
return _getPyramid2(lower, upper, orientation)
else:
return _getHexahedron(lower, upper, orientation)
```
|
{
"source": "jgersti/blender-avorion-importer",
"score": 3
}
|
#### File: blender-avorion-importer/avorion_utils/categories.py
```python
from dataclasses import dataclass
@dataclass(frozen=True)
class Material:
index: int
name: str
color: str
MATERIALS = (
Material(0, "Iron", "#ffb380"),
Material(1, "Titanium", "#ffffff"),
Material(2, "Naonite", "#4dff4d"),
Material(3, "Trinium", "#4d9aff"),
Material(4, "Xanion", "#ffff4d"),
Material(5, "Ogonite", "#ff8133"),
Material(6, "Avorion", "#ff2626"),
)
SHAPES = {
"Cube": (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 22, 50, 51, 52, 53, 54, 55, 56, 57, 58, 60, 61,
121, 122, 150, 170, 180, 190, 510, 520, 600, 650, 700),
"Edge": (21, 23, 100, 104, 114, 123, 151, 171, 181, 185, 191, 511, 521,
601, 651, 701),
"Corner 1": (101, 105, 115, 124, 152, 172, 182, 186, 192, 512, 522, 702),
"Corner 2": (102, 106, 116, 125, 153, 173, 183, 187, 193, 513, 523, 703),
"Corner 3": (103, 107, 117, 126, 154, 174, 184, 188, 194, 514, 524, 704),
"Twisted Corner 1": (108, 110, 118, 128, 155, 175, 195, 197, 199, 515, 525, 705),
"Twisted Corner 2": (109, 111, 119, 129, 156,176, 196, 198, 200, 516, 526, 706),
"Flat Corner": (112, 113, 120, 127, 157, 177, 201, 202, 203, 517, 527, 707),
}
CATEGORIES = {
"Smart Hull": (0, 1, 2, 100, 101, 102, 103, 108, 109, 112),
"Hull": (121, 122, 123, 124, 125, 126, 127, 128, 129),
"Armor": (8, 104, 105, 106, 107, 110, 111, 113),
"Crew": (6, 114, 115, 116, 117, 118, 119, 120),
"Glow": (150, 151, 152, 153, 154, 155, 156, 157),
"Glass": (170, 171, 172, 173, 174, 175, 176, 177),
"Reflector": (180, 181, 182, 184, 195, 196, 201),
"Stone": (4, 185, 186, 187, 188, 197, 198, 202),
"Hologram": (190, 191, 192, 193, 194, 199, 200, 203),
"Rich Stone": (510, 511, 512, 513, 514, 515, 516, 517),
"Super Rich Stone": (520, 521, 522, 523, 524, 525, 526, 527),
"Wreckage": (700, 701, 702, 703, 704, 705, 706, 707),
# Rot. Lock, +Edge, Torp. Launcher, Frontal+, Turret Base, +Edge
"Hardpoints": (12, 23, 18, 22, 20, 21),
# Engine, Thruster, Dir. Thruster, Gyro, Inert. Dampener
"Propulsion": (3, 7, 13, 14, 15),
# Shield, Energy Cont., Generator, Int. Field, Comp. Core, Hyperspace Core,
"Systems": (50, 51, 52, 53, 54, 55),
# Cargo, Framework, Hangar, Dock, Flight Recorder, Assembly, Torp. Storage, Transporter,
# Academy, Cloning Pods, Solar Panel, Light, Name, +Edge, Logo, +Edge
"The Rest": (5, 9, 10, 11, 16, 17, 19, 56, 57, 58, 60, 61, 600, 601, 650, 651),
}
def get_shape(index: int) -> str:
try:
return next((key for key, indices in SHAPES.items() if index in indices), "Cube")
except:
raise ValueError(f"invalid type index '{index}'.")
def get_category(index: int) -> str:
try:
return next((key for key, indices in CATEGORIES.items() if index in indices))
except:
raise ValueError(f"invalid type index '{index}'.")
def get_material(index: int) -> Material:
try:
return next((m for m in MATERIALS if index == m.index))
except:
raise ValueError(f"invalid material index '{index}'.")
```
#### File: blender-avorion-importer/avorion_utils/geometry.py
```python
from __future__ import annotations
import numpy as np
from . categories import get_shape
from . parser import Block
def _rotate_translate(points: np.ndarray,
orientation: np.ndarray,
lower: np.ndarray,
upper: np.ndarray) -> np.ndarray:
def rotation(o):
def sign(b): return 2*b-1
i, j = o // 2
k = 3 - i - j
u, v = sign(o % 2)
w = u*v*sign(i < j)*sign(k != 1)
R = np.zeros((3, 3))
R[i, 0] = u
R[j, 1] = v
R[k, 2] = w
return R
_o = np.array([0.5, 0.5, 0.5])
points = np.einsum('ij,...j->...i', rotation(orientation), points-_o) + _o
points = np.einsum('...i,i->...i', points, upper-lower) + lower
return points
def _create_hexahedron() -> tuple[np.ndarray, np.ndarray, np.ndarray]:
points = np.asarray([[0, 0, 0],
[0, 0, 1],
[1, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 1, 1],
[1, 1, 1],
[1, 1, 0]])
faces = np.asarray([0, 3, 2, 1,
4, 5, 6, 7,
0, 1, 5, 4,
2, 3, 7, 6,
1, 2, 6, 5,
0, 4, 7, 3],
dtype = np.int64)
offsets = np.asarray([4, 4, 4, 4, 4, 4], dtype=np.int64)
return points, faces, offsets
def _create_wedge() -> tuple[np.ndarray, np.ndarray, np.ndarray]:
points = np.asarray([[0, 0, 0],
[1, 0, 0],
[1, 0, 1],
[0, 0, 1],
[1, 1, 0],
[1, 1, 1]])
faces = np.asarray([0, 1, 2, 3,
0, 3, 5, 4,
1, 4, 5, 2,
2, 5, 3,
0, 4, 1], dtype=np.int64)
offsets = np.asarray([4, 4, 4, 3, 3], dtype=np.int64)
return points, faces, offsets
def _create_pyramid_1() -> tuple[np.ndarray, np.ndarray, np.ndarray]:
points = np.asarray([[0, 0, 0],
[0, 0, 1],
[1, 0, 1],
[1, 0, 0],
[1, 1, 0]])
faces = np.asarray([0, 3, 2, 1,
0, 1, 4,
0, 4, 3,
1, 2, 4,
2, 3, 4],
dtype=np.int64)
offsets = np.asarray([4, 3, 3, 3, 3], dtype=np.int64)
return points, faces, offsets
def _create_pyramid_2() -> tuple[np.ndarray, np.ndarray, np.ndarray]:
points = np.asarray([[0, 0, 0],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
faces = np.asarray([0, 3, 2, 1,
0, 1, 4,
0, 4, 3,
1, 2, 4,
2, 3, 4],
dtype=np.int64)
offsets = np.asarray([4, 3, 3, 3, 3], dtype=np.int64)
return points, faces, offsets
def _create_tetrahedron_1() -> tuple[np.ndarray, np.ndarray, np.ndarray]:
points = np.asarray([[0, 0, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]])
faces = np.asarray([0, 2, 1,
0, 2, 3,
0, 3, 1,
1, 3, 2],
dtype=np.int64)
offsets = np.asarray([3, 3, 3, 3], dtype=np.int64)
return points, faces, offsets
def _create_tetrahedron_2() -> tuple[np.ndarray, np.ndarray, np.ndarray]:
points = np.asarray([[0, 0, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 0]])
faces = np.asarray([0, 1, 3,
0, 2, 1,
0, 3, 2,
1, 2, 3],
dtype=np.int64)
offsets = np.asarray([3, 3, 3, 3], dtype=np.int64)
return points, faces, offsets
def _create_tetrahedron_3() -> tuple[np.ndarray, np.ndarray, np.ndarray]:
points = np.asarray([[1, 0, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 0]])
faces = np.asarray([0, 1, 3,
0, 2, 1,
0, 3, 2,
1, 2, 3],
dtype=np.int64)
offsets = np.asarray([3, 3, 3, 3], dtype=np.int64)
return points, faces, offsets
def _create_polyhedron() -> tuple[np.ndarray, np.ndarray, np.ndarray]:
points = np.asarray([[0, 0, 0],
[0, 0, 1],
[1, 0, 1],
[1, 0, 0],
[0, 1, 0],
[1, 1, 1],
[1, 1, 0]])
faces = np.asarray([0, 3, 2, 1,
0, 4, 6, 3,
2, 3, 6, 5,
0, 1, 4,
1, 2, 5,
4, 5, 6,
1, 5, 4],
dtype=np.int64)
offsets = np.asarray([4, 4, 4, 3, 3, 3, 3], dtype=np.int64)
return points, faces, offsets
def generate_geometry(block: Block) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
shape2geometry = {
'Cube': _create_hexahedron,
'Edge': _create_wedge,
'Corner 1': _create_tetrahedron_1,
'Corner 2': _create_polyhedron,
'Corner 3': _create_pyramid_1,
'Flat Corner': _create_pyramid_2,
'Twisted Corner 1': _create_tetrahedron_2,
'Twisted Corner 2': _create_tetrahedron_3,
}
points, faces, offsets = shape2geometry[get_shape(block.type)]()
points = _rotate_translate(points, block.orientation, block.lower, block.upper)
return points, faces, offsets
def get_bounds(block: Block) -> tuple[np.ndarray, np.ndarray]:
return block.lower, block.upper
```
#### File: jgersti/blender-avorion-importer/__init__.py
```python
bl_info = {
"name" : "Avorion XML Format",
"author" : "<NAME>",
"description": "Import Avorion XML",
"blender" : (2, 90, 0),
"version" : (0, 0, 1),
"location" : "File > Import-Export",
"warning" : "",
"category" : "Import-Export"
}
if "bpy" in locals():
import importlib
if "import_avorion_xml" in locals():
importlib.reload(import_avorion_xml)
if "avorion_utils" in locals():
importlib.reload(avorion_utils)
import bpy
from bpy.props import BoolProperty, FloatProperty, StringProperty, EnumProperty
from bpy.types import Operator, Panel
from bpy_extras.io_utils import orientation_helper, path_reference_mode, axis_conversion
@orientation_helper(axis_forward='-Z', axis_up='Y')
class ImportAvorionXML(Operator):
bl_idname = "avorion.import_xml"
bl_label = "Import Avorion XML"
bl_options = {'PRESET', 'UNDO'}
filename_ext = ".xml"
filter_glob: StringProperty(
default="*.xml",
options={'HIDDEN'}
)
filepath: StringProperty(
name="File Path",
description="Filepath used for importing the file."
"(WARNING! disables turret rigging.)",
maxlen=1024,
subtype='FILE_PATH'
)
seperate_blocks: BoolProperty(
name="Seperate Blocks",
description="Seperate Blocks into indiviual Meshes",
default=False
)
def draw(self, context):
pass
def execute(self, context):
from . import import_avorion_xml
keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "filter_glob"))
global_matrix = axis_conversion(from_forward=self.axis_forward, from_up=self.axis_up)
keywords["global_matrix"] = global_matrix.to_4x4()
return import_avorion_xml.load(context, **keywords)
def invoke(self, context, _event):
from pathlib import Path
from . appdirs import user_data_dir
path = Path(user_data_dir('Avorion', appauthor=False, roaming= True))
path /= "ships"
if path.exists() and path.is_dir():
self.filepath = str(path) + "//"
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class AVORION_PT_import_transform(Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Transform"
bl_parent_id = "FILE_PT_operator"
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "AVORION_OT_import_xml"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator, "axis_forward")
layout.prop(operator, "axis_up")
class AVORION_PT_import_geometry(Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Geometry"
bl_parent_id = "FILE_PT_operator"
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "AVORION_OT_import_xml"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator, "seperate_blocks")
def menu_func_import(self, context):
self.layout.operator(ImportAvorionXML.bl_idname, text="Avorion (.xml)")
classes = (
ImportAvorionXML,
AVORION_PT_import_transform,
AVORION_PT_import_geometry
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
def unregister():
bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()
```
|
{
"source": "jgesc/MinesweeperAPI",
"score": 3
}
|
#### File: MinesweeperAPI/minesweeper/handler.py
```python
from http.server import BaseHTTPRequestHandler
import json
import random, string
from minesweeper import Minesweeper
games = {}
class MinesweeperRequestHandler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
super().__init__(request, client_address, server)
self.protocol_version = 'HTTP/1.1' # Required for Keep-Alive
def send_keep_alive_headers(self, content_length=0):
self.send_header('Content-length', content_length)
self.send_header("Connection", "keep-alive")
self.send_header("Keep-Alive", "timeout=30, max=30000")
def do_GET(self):
try:
global games
# Check path
path = list(filter(bool, self.path[:].split('/')))
if len(path) != 1 or path[0] not in games:
self.send_response(404)
self.send_keep_alive_headers()
self.end_headers()
return
# Build response body
id = path[0]
game = games[id]
body = {
'state': game.game_state,
'width': game.width,
'height': game.height,
'mine_count': game.mine_count,
'board': game.get_visible_cells()
}
payload = json.dumps(body).encode('utf-8')
# Send response
self.send_response(200)
self.send_keep_alive_headers(len(payload))
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(payload)
except Exception as exception:
self.send_error(500, explain=repr(exception))
self.send_keep_alive_headers()
self.end_headers()
def do_POST(self):
try:
global games
# Check path
path = list(filter(bool, self.path[:].split('/')))
if len(path) != 1 or path[0] not in games:
self.send_response(404)
self.send_keep_alive_headers()
self.end_headers()
return
# Parse body parameters
id = path[0]
game = games[id]
parameters = {}
content_len_str = self.headers.get('Content-Length')
content_len = int(content_len_str) if content_len_str else 0
if content_len:
request_body = self.rfile.read(content_len)
parameters = json.loads(request_body)
else:
self.send_response(400)
self.send_keep_alive_headers()
self.end_headers()
return
# Perform action
game.open_cell(parameters['x'], parameters['y'])
# Send new game state
body = {
'new_state': game.game_state
}
payload = json.dumps(body).encode('utf-8')
# Send response
self.send_response(200)
self.send_keep_alive_headers(len(payload))
self.end_headers()
self.wfile.write(payload)
except Exception as exception:
self.send_error(500, explain=repr(exception))
self.send_keep_alive_headers()
self.end_headers()
def do_PUT(self):
try:
global games
# Check path
path = list(filter(bool, self.path[:].split('/')))
if len(path) != 0:
self.send_response(400)
self.send_keep_alive_headers()
self.end_headers()
return
# Parse body parameters
parameters = {}
content_len_str = self.headers.get('Content-Length')
content_len = int(content_len_str) if content_len_str else 0
if content_len:
request_body = self.rfile.read(content_len)
parameters = json.loads(request_body)
# Create game
id = None
while not id or id in games:
id = ''.join(random.choice(string.ascii_uppercase) for i in range(8))
games[id] = Minesweeper(
width=parameters.get('width', 10),
height=parameters.get('height', 10),
mine_count=parameters.get('mine_count', 10)
)
# Build response body
body = {
'id': id
}
payload = json.dumps(body).encode('utf-8')
# Send response
self.send_response(200)
self.send_keep_alive_headers(len(payload))
self.end_headers()
self.wfile.write(payload)
except Exception as exception:
try:
del games[id]
except Exception:
pass
finally:
self.send_error(500, explain=repr(exception))
self.send_keep_alive_headers()
self.end_headers()
def do_DELETE(self):
try:
global games
# Check path
path = list(filter(bool, self.path[:].split('/')))
if len(path) != 1 or path[0] not in games:
self.send_response(404)
self.send_keep_alive_headers()
self.end_headers()
return
# Build response body
id = path[0]
del games[id]
# Send response
self.send_response(200)
self.send_keep_alive_headers()
self.end_headers()
except Exception as exception:
self.send_error(500, explain=repr(exception))
self.send_keep_alive_headers()
self.end_headers()
```
|
{
"source": "jgeskens/django-advanced-reports",
"score": 2
}
|
#### File: backoffice/contrib/views.py
```python
from django.contrib import messages
from django.http.request import QueryDict
from django.http.response import HttpResponseBase
from django.shortcuts import redirect
from django.template.context import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from advanced_reports.backoffice.base import BackOfficeView
from advanced_reports import get_report_for_slug
from advanced_reports.defaults import ActionException
from advanced_reports.views import api_list, api_action, api_form
class AdvancedReportView(BackOfficeView):
"""
A BackOffice view that renders an Advanced Report.
Usage:
<div view="advanced_report" params="{slug: 'report_slug', updateLocation: true|false}"></div>
``slug`` is the slug of your registered Advanced Report
``updateLocation`` is a boolean. If true, the location bar will be updated with querystring parameters
reflecting the current filters and ordering.
"""
slug = 'advanced_report'
template = 'advanced_reports/backoffice/contrib/advanced-reports/advanced-report.html'
def get(self, request):
report_slug = request.view_params.get('slug')
advreport = get_report_for_slug(report_slug)
context = {'advreport': advreport}
return render_to_string(self.template, context, context_instance=RequestContext(request))
def fetch(self, request):
obj_id = request.view_params.get('pk', None)
ids = [obj_id] if obj_id else None
return api_list(request, request.view_params.get('slug'), ids=ids)
def form(self, request):
method = request.action_params.get('method')
pk = request.action_params.get('pk')
slug = request.view_params.get('slug')
return api_form(request, slug, method, pk or None)
def action(self, request):
method = request.action_params.get('method')
pk = request.action_params.get('pk')
slug = request.view_params.get('slug')
data = request.action_params.get('data')
if data:
# We have to do str(data) because otherwise QueryDict is too lazy to decode...
post = QueryDict(str(data), encoding='utf-8')
request.POST = post
return api_action(request, slug, method, pk or None)
def action_view(self, request):
report_slug = request.view_params.get('slug')
method = request.view_params.get('report_method')
pk = request.view_params.get('pk')
return api_action(request, report_slug, method, pk or None)
def multiple_action(self, request):
report_slug = request.view_params.get('slug')
method = request.action_params.get('report_method')
items = request.action_params.get('items').split(',')
global_select = request.action_params.get('global')
advreport = get_report_for_slug(report_slug)
advreport.set_request(request)
data = request.action_params.get('data')
advreport.set_request(request)
if data:
# We have to do str(data) because otherwise QueryDict is too lazy to decode...
post = QueryDict(str(data), encoding='utf-8')
request.POST = post
if global_select:
items, context = advreport.get_object_list(request)
else:
items = [advreport.get_item_for_id(pk) for pk in items]
if hasattr(advreport, '%s_multiple' % method):
try:
action = advreport.find_action(method)
if action.form:
form = action.form(request.POST, prefix='actionform')
if form.is_valid():
if action.is_regular_view and request.is_ajax():
return {'link_action': {'method': method, 'data': request.POST}}
response = getattr(advreport, '%s_multiple' % method)(items, form)
else:
if action.form_template:
response_form = render_to_string(action.form_template, {'form': form})
else:
response_form = unicode(form)
return {'response_form': response_form}
else:
response = getattr(advreport, '%s_multiple' % method)(items)
if response:
return response
messages.success(request, _(u'Successfully executed action on all selected items.'))
except ActionException, e:
messages.error(request, e.msg)
return {'succeeded': {}}
else:
succeeded, failed = {}, {}
for item in items:
try:
action = advreport.find_object_action(item, method)
if action:
if action.is_allowed(request):
result = getattr(advreport, method)(item)
if isinstance(result, HttpResponseBase) and result.status_code == 200:
messages.warning(request, _(u'This action does not support batch operations.'))
else:
succeeded[advreport.get_item_id(item)] = action.get_success_message()
else:
failed[advreport.get_item_id(item)] = _(u'You are not allowed to execute this action.')
else:
failed[advreport.get_item_id(item)] = _(u'This action is not applicable to this item.')
except ActionException, e:
failed[advreport.get_item_id(item)] = e.msg
if succeeded and not failed:
messages.success(request, _(u'Successfully executed action on all selected items.'))
elif succeeded and failed:
messages.warning(request, _(u'Some actions were successful, but some were also failed.'))
else:
messages.error(request, _(u'No action on the selected items was successful.'))
return {'succeeded': succeeded, 'failed': failed}
def multiple_action_view(self, request):
report_slug = request.view_params.get('slug')
method = request.view_params.get('report_method')
items = request.view_params.get('items').split(',')
global_select = request.view_params.get('global')
advreport = get_report_for_slug(report_slug)
advreport.set_request(request)
if global_select == 'true':
items = advreport.get_object_list(request)[0]
else:
items = [advreport.get_item_for_id(pk) for pk in items]
items = [item for item in items if advreport.find_object_action(item, method)]
if items:
action = advreport.find_action(method)
if action.form:
form = action.form(request.GET, prefix='actionform')
if form.is_valid():
return getattr(advreport, '%s_multiple' % method)(items, form)
else:
return getattr(advreport, '%s_multiple' % method)(items)
messages.error(request, _(u'No items were applicable for this action.'))
return redirect(request.META['HTTP_REFERER'])
def auto_complete(self, request):
partial = request.action_params.pop('partial')
report_slug = request.view_params.get('slug')
advreport = get_report_for_slug(report_slug)
return advreport.auto_complete(request, partial, request.action_params)
class AdvancedReportActionView(BackOfficeView):
slug = 'actionview'
def get(self, request):
report_slug = request.view_params.get('slug')
method = request.view_params.get('method')
pk = request.view_params.get('pk')
advreport = get_report_for_slug(report_slug)
advreport.set_request(request)
item = advreport.get_item_for_id(pk)
advreport.enrich_object(item, request=request)
response = getattr(advreport, method)(item)
return response.content
```
|
{
"source": "Jgessy/pythonbirds",
"score": 3
}
|
#### File: pythonbirds/oo/pessoa.py
```python
class Pessoa:
def __init__(self, nome = None, idade=48):
self.idade = idade
self.nome = nome
def cumprimentar(self):
return f'ola {id(self)}'
if __name__ == '__main__':
p = Pessoa('fabio')
print(Pessoa.cumprimentar(p))
print(id(p))
print(p.cumprimentar())
print(p.nome)
p.nome = 'james'
print(p.nome)
print(p.idade)
```
|
{
"source": "jgeudens/dfetch",
"score": 2
}
|
#### File: dfetch/project/git.py
```python
import os
import pathlib
from typing import List
from dfetch.log import get_logger
from dfetch.manifest.project import ProjectEntry
from dfetch.manifest.version import Version
from dfetch.project.vcs import VCS
from dfetch.util.util import safe_rmtree
from dfetch.vcs.git import GitLocalRepo, GitRemote, get_git_version
logger = get_logger(__name__)
class GitRepo(VCS):
"""A git repository."""
DEFAULT_BRANCH = "master"
NAME = "git"
def __init__(self, project: ProjectEntry):
"""Create a Git project."""
super().__init__(project)
self._remote_repo = GitRemote(self.remote)
self._local_repo = GitLocalRepo(self.local_path)
def check(self) -> bool:
"""Check if is GIT."""
return bool(self._remote_repo.is_git())
def _latest_revision_on_branch(self, branch: str) -> str:
"""Get the latest revision on a branch."""
return str(self._remote_repo.last_sha_on_branch(branch))
def _list_of_tags(self) -> List[str]:
"""Get list of all available tags."""
return [str(tag) for tag in self._remote_repo.list_of_tags()]
def metadata_revision(self) -> str:
"""Get the revision of the metadata file."""
return str(self._local_repo.get_last_file_hash(self.metadata_path))
def current_revision(self) -> str:
"""Get the revision of the metadata file."""
return str(self._local_repo.get_current_hash())
def get_diff(self, old_hash: str, new_hash: str) -> str:
"""Get the diff of two revisions."""
return str(self._local_repo.create_diff(old_hash, new_hash))
@staticmethod
def revision_is_enough() -> bool:
"""See if this VCS can uniquely distinguish branch with revision only."""
return True
@staticmethod
def list_tool_info() -> None:
"""Print out version information."""
tool, version = get_git_version()
VCS._log_tool(tool, version)
def _fetch_impl(self, version: Version) -> Version:
"""Get the revision of the remote and place it at the local path."""
rev_or_branch_or_tag = self._determine_what_to_fetch(version)
# When exporting a file, the destination directory must already exist
pathlib.Path(self.local_path).mkdir(parents=True, exist_ok=True)
self._local_repo.checkout_version(
self.remote, rev_or_branch_or_tag, self.source
)
safe_rmtree(os.path.join(self.local_path, self._local_repo.METADATA_DIR))
return self._determine_fetched_version(version)
def _determine_what_to_fetch(self, version: Version) -> str:
"""Based on asked version, target to fetch."""
if version.revision and 0 < len(version.revision) < 40:
raise RuntimeError(
"Shortened revisions (SHA) in manifests cannot be used,"
" use complete revision or a branch (or tags instead)"
)
return version.revision or version.tag or version.branch or self.DEFAULT_BRANCH
def _determine_fetched_version(self, version: Version) -> Version:
"""Based on asked version, determine info of fetched version."""
branch = version.branch or self.DEFAULT_BRANCH
revision = version.revision
if not version.tag and not version.revision:
revision = self._remote_repo.last_sha_on_branch(branch)
return Version(tag=version.tag, revision=revision, branch=branch)
```
#### File: dfetch/project/vcs.py
```python
import os
from abc import ABC, abstractmethod
from typing import List, Optional, Tuple
from patch_ng import fromfile
import dfetch.manifest.manifest
from dfetch.log import get_logger
from dfetch.manifest.version import Version
from dfetch.project.metadata import Metadata
from dfetch.util.util import hash_directory, safe_rm
from dfetch.util.versions import latest_tag_from_list
logger = get_logger(__name__)
class VCS(ABC):
"""Abstract Version Control System object.
This object represents one Project entry in the Manifest.
It can be updated.
"""
NAME = ""
DEFAULT_BRANCH = ""
def __init__(self, project: dfetch.manifest.project.ProjectEntry) -> None:
"""Create the VCS."""
self.__project = project
self.__metadata = Metadata.from_project_entry(self.__project)
def check_wanted_with_local(self) -> Tuple[Optional[Version], Optional[Version]]:
"""Given the project entry in the manifest, get the relevant version from disk.
Returns:
Tuple[Optional[Version], Optional[Version]]: Wanted, Have
"""
on_disk = self.on_disk_version()
if not on_disk:
return (self.wanted_version, None)
if self.wanted_version.tag:
return (Version(tag=self.wanted_version.tag), Version(tag=on_disk.tag))
wanted_branch, on_disk_branch = "", ""
if not (self.wanted_version.revision and self.revision_is_enough()):
wanted_branch = self.wanted_version.branch or self.DEFAULT_BRANCH
on_disk_branch = on_disk.branch
wanted_revision = (
self.wanted_version.revision
or self._latest_revision_on_branch(wanted_branch)
)
return (
Version(
revision=wanted_revision,
branch=wanted_branch,
),
Version(revision=on_disk.revision, branch=on_disk_branch),
)
def update_is_required(self, force: bool = False) -> Optional[Version]:
"""Check if this project should be upgraded.
Args:
force (bool, optional): Ignore if versions match.
Defaults to False.
"""
wanted, current = self.check_wanted_with_local()
if not force and wanted == current:
self._log_project(f"up-to-date ({current})")
return None
logger.debug(f"{self.__project.name} Current ({current}), Available ({wanted})")
return wanted
def update(self, force: bool = False) -> None:
"""Update this VCS if required.
Args:
force (bool, optional): Ignore if version is ok or any local changes were done.
Defaults to False.
"""
to_fetch = self.update_is_required(force)
if not to_fetch:
return
if not force and self._are_there_local_changes():
self._log_project(
"skipped - local changes after last update (use --force to overwrite)"
)
return
if os.path.exists(self.local_path):
logger.debug(f"Clearing destination {self.local_path}")
safe_rm(self.local_path)
actually_fetched = self._fetch_impl(to_fetch)
self._log_project(f"Fetched {actually_fetched}")
applied_patch = ""
if self.__project.patch:
if os.path.exists(self.__project.patch):
self.apply_patch()
applied_patch = self.__project.patch
else:
logger.warning(f"Skipping non-existent patch {self.__project.patch}")
self.__metadata.fetched(
actually_fetched,
hash_=hash_directory(self.local_path, skiplist=[self.__metadata.FILENAME]),
patch_=applied_patch,
)
logger.debug(f"Writing repo metadata to: {self.__metadata.path}")
self.__metadata.dump()
def apply_patch(self) -> None:
"""Apply the specified patch to the destination."""
patch_set = fromfile(self.__project.patch)
if patch_set:
if patch_set.apply(0, root=self.__project.destination, fuzz=True):
self._log_project(f'Applied path "{self.__project.patch}"')
else:
raise RuntimeError(f'Applying path "{self.__project.patch}" failed')
else:
raise RuntimeError(f'Invalid patch file: "{self.__project.patch}"')
def check_for_update(self) -> None:
"""Check if there is an update available."""
on_disk_version = self.on_disk_version()
latest_version = self._check_for_newer_version()
if not on_disk_version:
wanted = (
f"wanted ({self.wanted_version}), " if any(self.wanted_version) else ""
)
self._log_project(f"{wanted}available ({latest_version})")
elif latest_version == on_disk_version:
self._log_project(f"up-to-date ({latest_version})")
elif on_disk_version == self.wanted_version:
self._log_project(
f"wanted & current ({on_disk_version}), available ({latest_version})"
)
else:
self._log_project(
f"wanted ({str(self.wanted_version) or 'latest'}), "
f"current ({on_disk_version}), available ({latest_version})"
)
def _log_project(self, msg: str) -> None:
logger.print_info_line(self.__project.name, msg)
@staticmethod
def _log_tool(name: str, msg: str) -> None:
logger.print_info_line(name, msg.strip())
@property
def local_path(self) -> str:
"""Get the local destination of this project."""
return self.__project.destination
@property
def wanted_version(self) -> Version:
"""Get the wanted version of this VCS."""
return self.__metadata.version
@property
def metadata_path(self) -> str:
"""Get the path of the metadata."""
return self.__metadata.path
@property
def remote(self) -> str:
"""Get the remote URL of this VCS."""
return self.__metadata.remote_url
@property
def source(self) -> str:
"""Get the source folder of this VCS."""
return self.__project.source
@abstractmethod
def check(self) -> bool:
"""Check if it can handle the type."""
@staticmethod
@abstractmethod
def revision_is_enough() -> bool:
"""See if this VCS can uniquely distinguish branch with revision only."""
@abstractmethod
def _latest_revision_on_branch(self, branch: str) -> str:
"""Get the latest revision on a branch."""
@abstractmethod
def _list_of_tags(self) -> List[str]:
"""Get list of all available tags."""
@staticmethod
@abstractmethod
def list_tool_info() -> None:
"""Print out version information."""
def on_disk_version(self) -> Optional[Version]:
"""Get the version of the project on disk.
Returns:
Version: Could be None of no on disk version
"""
return (
None
if not os.path.exists(self.__metadata.path)
else Metadata.from_file(self.__metadata.path).version
)
def _on_disk_hash(self) -> Optional[str]:
"""Get the hash of the project on disk.
Returns:
Str: Could be None if no on disk version
"""
return (
None
if not os.path.exists(self.__metadata.path)
else Metadata.from_file(self.__metadata.path).hash
)
def _check_for_newer_version(self) -> Version:
"""Check if a newer version is available on the given branch."""
if self.wanted_version.tag:
return Version(
tag=latest_tag_from_list(self.wanted_version.tag, self._list_of_tags())
)
if self.wanted_version.branch == " ":
branch = ""
else:
branch = self.wanted_version.branch or self.DEFAULT_BRANCH
return Version(revision=self._latest_revision_on_branch(branch), branch=branch)
def _are_there_local_changes(self) -> bool:
"""Check if there are local changes.
Returns:
Bool: True if there are local changes, false if no were detected or no hash was found.
"""
logger.debug(f"Checking if there were local changes in {self.local_path}")
on_disk_hash = self._on_disk_hash()
return bool(on_disk_hash) and on_disk_hash != hash_directory(
self.local_path, skiplist=[self.__metadata.FILENAME]
)
@abstractmethod
def _fetch_impl(self, version: Version) -> Version:
"""Fetch the given version of the VCS, should be implemented by the child class."""
```
#### File: features/steps/svn_steps.py
```python
import os
import pathlib
import subprocess
from behave import given # pylint: disable=no-name-in-module
from dfetch.util.util import in_directory
from features.steps.generic_steps import generate_file, list_dir
def create_svn_server_and_repo(context, name="svn-server"):
"""Create an local svn server and repo and return the path to the repo."""
server_path = os.path.relpath(context.remotes_dir_path) + "/" + name
repo_path = "svn-repo"
pathlib.Path(server_path).mkdir(parents=True, exist_ok=True)
subprocess.call(["svnadmin", "create", "--fs-type", "fsfs", server_path])
current_path = "/".join(os.getcwd().split(os.path.sep) + [server_path])
subprocess.call(["svn", "checkout", f"file:///{current_path}", repo_path])
return repo_path
def create_stdlayout():
pathlib.Path("trunk").mkdir(parents=True, exist_ok=True)
pathlib.Path("branches").mkdir(parents=True, exist_ok=True)
pathlib.Path("tags").mkdir(parents=True, exist_ok=True)
def add_and_commit(msg):
subprocess.call(["svn", "add", "--force", "."])
subprocess.call(["svn", "ci", "-m", f'"{msg}"'])
def commit_all(msg):
subprocess.call(["svn", "commit", "--depth", "empty", ".", "-m", f'"{msg}"'])
def add_externals(externals):
"""Add the given list of dicts as externals."""
with open("externals", "w") as external_list:
for external in externals:
revision = f"@{external['revision']}" if external["revision"] else ""
external_list.write(f"{external['url']}{revision} {external['path']}\n")
subprocess.call(["svn", "propset", "svn:externals", "-F", external_list.name, "."])
commit_all("Added externals")
subprocess.call(["svn", "update"])
@given("a svn repo with the following externals")
def step_impl(context):
repo_path = create_svn_server_and_repo(context)
os.chdir(repo_path)
add_externals(context.table)
@given('a svn-server "{name}" with the files')
def step_impl(context, name):
repo_path = create_svn_server_and_repo(context, name)
with in_directory(repo_path):
create_stdlayout()
with in_directory("trunk"):
for file in context.table:
generate_file(file["path"], "some content")
add_and_commit("Added files")
@given('a non-standard svn-server "{name}" with the files')
def step_impl(context, name):
repo_path = create_svn_server_and_repo(context, name)
with in_directory(repo_path):
for file in context.table:
generate_file(file["path"], "some content")
add_and_commit("Added files")
@given('a non-standard svn-server "{name}"')
def step_impl(context, name):
repo_path = create_svn_server_and_repo(context, name)
with in_directory(repo_path):
generate_file("SomeFolder/SomeFile.txt", "some content")
add_and_commit("Added files")
```
|
{
"source": "j-geuter/CINDy",
"score": 2
}
|
#### File: j-geuter/CINDy/CINDy_algorithm.py
```python
import numpy as np
import time
from scipy.sparse import isspmatrix_csr, issparse
# Import functions for the step_sizes and the updates.
from auxiliary_functions import step_size, polish_solution
# Import functions for active set management
from auxiliary_functions import (
new_vertex_fail_fast,
delete_vertex_index,
max_min_vertex,
)
def CINDy(
function,
feasible_region,
tolerance_outer,
tolerance_inner,
max_time_inner,
testing_function,
max_iterations=2000,
type_criterion="FW",
primal_improvement=1.0e-5,
):
x = feasible_region.initial_point()
loss_evolution = [testing_function.evaluate(x)]
real_loss = [testing_function.compare_exact(x)]
timing = [time.time()]
testing_losses = []
true_losses = []
x = feasible_region.initial_point()
if type_criterion == "FCFW":
(
x,
testing_loss,
true_loss,
testing_running_avg,
_,
_,
all_true_losses,
all_testing_losses,
all_running_avg,
timing_values,
gap_values,
) = approximate_fully_corrective_FW(
function,
x,
[x.copy()],
[1.0],
feasible_region,
tolerance_outer,
tolerance_inner,
max_time_inner,
testing_function,
max_iterations,
type_criterion=type_criterion,
primal_improvement=primal_improvement,
)
if type_criterion == "BCG":
(
x,
testing_loss,
true_loss,
testing_running_avg,
_,
_,
all_true_losses,
all_testing_losses,
all_running_avg,
timing_values,
gap_values,
) = blended_conditional_gradients(
function,
x,
[x.copy()],
[1.0],
feasible_region,
tolerance_outer,
tolerance_inner,
max_time_inner,
testing_function,
max_iterations,
type_criterion=type_criterion,
primal_improvement=primal_improvement,
)
testing_losses = testing_losses + all_testing_losses
true_losses = true_losses + all_true_losses
timing.append(time.time())
loss_evolution.append(testing_loss)
real_loss.append(true_loss)
timing[:] = [t - timing[0] for t in timing]
return (
x,
loss_evolution,
real_loss,
timing,
true_losses,
testing_losses,
timing_values,
gap_values,
)
def approximate_fully_corrective_FW(
function,
x,
active_set,
lambdaVal,
feasible_region,
outer_tolerance,
inner_tolerance,
max_time,
testing_function,
max_iterations=2000,
type_criterion="FW",
threshold=1.0e-9,
num_moving_average=5,
primal_improvement=1.0e-5,
):
time_ref = time.time()
xbest = x.copy()
active_set_best = active_set.copy()
lambda_val_best = lambdaVal.copy()
all_true_losses = [testing_function.compare_exact(x)]
all_testing_losses = [testing_function.evaluate(x)]
running_average = [testing_function.evaluate(x)]
itCount = 1
grad = function.gradient(x)
gap = (x - feasible_region.linear_programming_oracle(grad)).T.dot(grad)
timing = [time.time()]
gap_values = [gap]
while True:
# print(x)
for i in range(10):
x, gap_aux = away_step_CG(
function, feasible_region, x, active_set, lambdaVal, "EL"
)
grad = function.gradient(x)
gap = (x - feasible_region.linear_programming_oracle(grad)).T.dot(grad)
if len(active_set) >= 2 and gap != 0.0:
x, active_set[:], lambdaVal[:] = polish_solution(
function.psi_val(),
function.y_val(),
active_set,
lambdaVal,
tolerance=inner_tolerance,
threshold=threshold,
type_criterion="FW",
time_limit=max_time,
max_steps=max_iterations,
)
grad = function.gradient(x)
gap = (x - feasible_region.linear_programming_oracle(grad)).T.dot(grad)
timing.append(time.time())
gap_values.append(gap)
all_true_losses.append(testing_function.compare_exact(x))
all_testing_losses.append(testing_function.evaluate(x))
if len(all_testing_losses) < num_moving_average:
running_average.append(np.mean(np.asarray(all_testing_losses)))
else:
running_average.append(
np.mean(np.asarray(all_testing_losses)[-num_moving_average:])
)
if running_average[-1] < min(running_average[:-1]):
xbest = x.copy()
active_set_best = active_set.copy()
lambda_val_best = lambdaVal.copy()
if (
time.time() - time_ref > max_time
or itCount > max_iterations
or gap < outer_tolerance
or np.abs(all_testing_losses[-2] - all_testing_losses[-1])
< primal_improvement
):
xbest, active_set_best[:], lambda_val_best[:] = polish_solution(
function.psi_val(),
function.y_val(),
active_set_best,
lambda_val_best,
tolerance=1.0e-6,
threshold=1.0e-4,
type_criterion="FW",
time_limit=120,
max_steps=np.inf,
)
xbest, active_set_best[:], lambda_val_best[:] = polish_solution(
function.psi_val(),
function.y_val(),
active_set_best,
lambda_val_best,
tolerance=1.0e-6,
threshold=0.0,
type_criterion="FW",
time_limit=120,
max_steps=np.inf,
)
timing[:] = [t - timing[0] for t in timing]
return (
xbest,
testing_function.evaluate(xbest),
testing_function.compare_exact(xbest),
min(running_average),
active_set_best,
lambda_val_best,
all_true_losses,
all_testing_losses,
running_average,
timing,
gap_values,
)
itCount += 1
def blended_conditional_gradients(
function,
x,
active_set,
lambdaVal,
feasible_region,
outer_tolerance,
inner_tolerance,
max_time,
testing_function,
max_iterations=np.inf,
type_criterion="FW",
threshold=1.0e-9,
K=4.0,
num_moving_average=5,
primal_improvement=1.0e-5,
):
from auxiliary_functions import polish_solution
time_ref = time.time()
xbest = x.copy()
active_set_best = active_set.copy()
lambda_val_best = lambdaVal.copy()
all_true_losses = [testing_function.compare_exact(x)]
all_testing_losses = [testing_function.evaluate(x)]
running_average = [testing_function.evaluate(x)]
itCount = 1
grad = function.gradient(x)
gap = (x - feasible_region.linear_programming_oracle(grad)).T.dot(grad)
phi_val = gap / 2.0
timing = [time.time()]
gap_values = [gap]
phi_values = [phi_val]
while True:
x, active_set[:], lambdaVal[:] = polish_solution(
function.psi_val(),
function.y_val(),
active_set,
lambdaVal,
tolerance=phi_val / 4.0,
threshold=threshold,
type_criterion="blended",
time_limit=max_time,
)
grad = function.gradient(x)
gap = (x - feasible_region.linear_programming_oracle(grad)).T.dot(grad)
timing.append(time.time())
gap_values.append(gap)
phi_values.append(phi_val)
if testing_function.evaluate(x) < min(all_testing_losses):
xbest = x.copy()
active_set_best = active_set.copy()
lambda_val_best = lambdaVal.copy()
all_true_losses.append(testing_function.compare_exact(x))
all_testing_losses.append(testing_function.evaluate(x))
if len(all_testing_losses) < num_moving_average:
running_average.append(np.mean(np.asarray(all_testing_losses)))
else:
running_average.append(
np.mean(np.asarray(all_testing_losses)[-num_moving_average:])
)
if running_average[-1] < min(running_average[:-1]):
xbest = x.copy()
active_set_best = active_set.copy()
lambda_val_best = lambdaVal.copy()
if (
time.time() - time_ref > max_time
or itCount > max_iterations
or gap < outer_tolerance
):
timing[:] = [t - timing[0] for t in timing]
return (
xbest,
testing_function.evaluate(xbest),
testing_function.compare_exact(xbest),
min(running_average),
active_set_best,
lambda_val_best,
all_true_losses,
all_testing_losses,
running_average,
timing,
gap_values,
)
if gap >= phi_val / K:
x, gap_aux = away_step_CG(
function, feasible_region, x, active_set, lambdaVal, "EL"
)
else:
phi_val = gap / 2.0
itCount += 1
def accelerated_projected_gradient_descent(
f,
feasible_region,
active_set,
tolerance,
alpha0,
time_limit=60,
max_iteration=100,
type_criterion="FW",
):
"""
Run Nesterov's accelerated projected gradient descent.
References
----------
Nesterov, Y. (2018). Lectures on convex optimization (Vol. 137).
Berlin, Germany: Springer. (Constant scheme II, Page 93)
Parameters
----------
x0 : numpy array.
Initial point.
function: function being minimized
Function that we will minimize. Gradients are computed through a
function.grad(x) function that returns the gradient at x as a
numpy array.
feasible_region : feasible region function.
Returns projection oracle of a point x onto the feasible region,
which are computed through the function feasible_region.project(x).
Additionally, a LMO is used to compute the Frank-Wolfe gap (used as a
stopping criterion) through the function
feasible_region.linear_optimization_oracle(grad) function, which
minimizes <x, grad> over the feasible region.
tolerance : float
Frank-Wolfe accuracy to which the solution is outputted.
Returns
-------
x : numpy array
Outputted solution with primal gap below the target tolerance
"""
from collections import deque
# Quantities we want to output.
L = f.largest_eigenvalue()
mu = f.smallest_eigenvalue()
x = deque([np.asarray(alpha0)], maxlen=2)
y = np.asarray(alpha0)
q = mu / L
if mu < 1.0e-3:
alpha = deque([0], maxlen=2)
else:
alpha = deque([np.sqrt(q)], maxlen=2)
grad = f.gradient(x[-1])
if type_criterion == "FW":
FWGap = grad.dot(x[-1] - feasible_region.linear_programming_oracle(grad))
if type_criterion == "blended":
away_vertex, _ = feasible_region.away_oracle(grad,[], x[-1])
FWGap = grad.dot(
away_vertex
- feasible_region.linear_programming_oracle(grad)
)
time_ref = time.time()
it_count = 0
gap_values = [FWGap]
while FWGap > tolerance:
x.append(feasible_region.project(y - 1 / L * f.gradient(y)))
if mu < 1.0e-3:
alpha.append(0.5 * (1 + np.sqrt(1 + 4 * alpha[-1] * alpha[-1])))
beta = (alpha[-2] - 1.0) / alpha[-1]
else:
root = np.roots([1, alpha[-1] ** 2 - q, -alpha[-1] ** 2])
root = root[(root >= 0.0) & (root < 1.0)]
assert len(root) != 0, "Root does not meet desired criteria.\n"
alpha.append(root[0])
beta = alpha[-2] * (1 - alpha[-2]) / (alpha[-2] ** 2 - alpha[-1])
y = x[-1] + beta * (x[-1] - x[-2])
grad = f.gradient(x[-1])
if type_criterion == "FW":
FWGap = grad.dot(x[-1] - feasible_region.linear_programming_oracle(grad))
if type_criterion == "blended":
away_vertex, _ = feasible_region.away_oracle(grad,[], x[-1])
FWGap = grad.dot(
away_vertex
- feasible_region.linear_programming_oracle(grad)
)
it_count += 1
if time.time() - time_ref > time_limit or it_count > max_iteration:
break
gap_values.append(FWGap)
w = np.zeros(len(active_set[0]))
for i in range(len(active_set)):
w += x[-1][i] * active_set[i]
return w, x[-1].tolist(), gap_values
def away_step_CG(function, feasible_region, x, active_set, lambdas, type_of_step):
"""
Performs a single step of the ACG/AFW algorithm.
Parameters
----------
function: function being minimized
Function that we will minimize.
feasible_region : feasible region function.
Returns LP oracles over feasible region.
x : numpy array.
Point.
active_set : list of numpy arrays.
Initial active set.
lambdas : list of floats.
Initial barycentric coordinates.
type_of_step : str
Type of step size used.
Returns
-------
x + alpha*d
Output point
FWGap
FW gap at initial point.
"""
grad = function.gradient(x)
v = feasible_region.linear_programming_oracle(grad)
a, indexMax = feasible_region.away_oracle(grad, active_set, x)
# Choose FW direction, can overwrite index.
FWGap = (x - feasible_region.linear_programming_oracle(grad)).T.dot(grad)
away_gap = (a - x).T.dot(grad)
if issparse(FWGap):
FWGap = FWGap.todense().item()
away_gap = away_gap.todense().item()
if FWGap >= away_gap:
d = v - x
alphaMax = 1.0
optStep = step_size(function, d, grad, x, type_of_step)
alpha = min(optStep, alphaMax)
if alpha != alphaMax:
# newVertex returns true if vertex is new.
flag, index = feasible_region.new_vertex(v, active_set)
lambdas[:] = [i * (1 - alpha) for i in lambdas]
if flag:
active_set.append(v)
lambdas.append(alpha)
else:
# Update existing weights
lambdas[index] += alpha
# Max step length away step, only one vertex now.
else:
active_set[:] = [v]
lambdas[:] = [alphaMax]
else:
d = x - a
alphaMax = lambdas[indexMax] / (1.0 - lambdas[indexMax])
optStep = step_size(function, d, grad, x, type_of_step, maxStep=alphaMax)
alpha = min(optStep, alphaMax)
lambdas[:] = [i * (1 + alpha) for i in lambdas]
# Max step, need to delete a vertex.
if alpha != alphaMax:
lambdas[indexMax] -= alpha
else:
delete_vertex_index(indexMax, active_set, lambdas)
return x + alpha * d, FWGap
```
#### File: j-geuter/CINDy/dynamics.py
```python
import numpy as np
def kuramoto_time(frequencies, max_time, number_of_snapshots, number_of_experiments = 10, K = 2.0, forcing_param = 0.2, relative_tolerance = 1.0e-10, absolute_tolerance = 1.0e-13):
from scipy.integrate import solve_ivp
number_of_oscillators = len(frequencies)
def kuramoto_ode(_, theta):
[theta_i, theta_j] = np.meshgrid(theta, theta)
return frequencies + K / number_of_oscillators * np.sin(theta_j - theta_i).sum(0) + forcing_param * np.sin(theta)
list_snapshots = []
list_derivatives = []
list_times = []
for i in range(number_of_experiments):
theta_init = 2 * np.pi * np.random.rand(number_of_oscillators)
sol = solve_ivp(kuramoto_ode, [0, max_time], theta_init, method='DOP853', t_eval=np.linspace(0, max_time, number_of_snapshots), rtol = relative_tolerance, atol = absolute_tolerance)
snapshots = sol.y
derivatives = np.zeros([number_of_oscillators, number_of_snapshots])
for i in range(number_of_snapshots):
derivatives[:, i] = kuramoto_ode(0, snapshots[:, i])
list_derivatives.append(derivatives)
list_times.append(sol['t'])
list_snapshots.append(snapshots)
return list_snapshots, list_derivatives, list_times
def kuramoto_time_individual(frequencies, initial_position, start_time, end_time, number_of_samples, K = 2.0, forcing_param = 0.2, relative_tolerance = 1.0e-10, absolute_tolerance = 1.0e-13):
from scipy.integrate import solve_ivp
number_of_oscillators = len(frequencies)
def kuramoto_ode(_, theta):
[theta_i, theta_j] = np.meshgrid(theta, theta)
return frequencies + K / number_of_oscillators * np.sin(theta_j - theta_i).sum(0) + forcing_param * np.sin(theta)
sol = solve_ivp(kuramoto_ode, [start_time, end_time], initial_position, method='DOP853', t_eval=np.linspace(start_time, end_time, number_of_samples), rtol = relative_tolerance, atol = absolute_tolerance)
return sol.y, sol['t']
#Convert angles from a
def angle_conversion(angles):
return angles - np.floor(angles/(2*np.pi))*2.0*np.pi
def kuramoto_time_backup(theta_init, frequencies, max_time, number_of_snapshots, K = 2.0, forcing_param = 0.2):
from scipy.integrate import solve_ivp
number_of_oscillators = len(theta_init)
def kuramoto_ode(_, theta):
[theta_i, theta_j] = np.meshgrid(theta, theta)
return frequencies + K / number_of_oscillators * np.sin(theta_j - theta_i).sum(0) + forcing_param * np.sin(theta)
sol = solve_ivp(kuramoto_ode, [0, max_time], theta_init, method='BDF',
t_eval=np.linspace(0, max_time, number_of_snapshots))
snapshots = sol.y
derivatives = np.zeros([number_of_oscillators, number_of_snapshots])
for i in range(number_of_snapshots):
derivatives[:, i] = kuramoto_ode(0, snapshots[:, i])
return snapshots, derivatives, sol['t']
def kuramoto_random(num_oscillators, number_of_snapshots, frequencies, K = 2.0, forcing_param = 0.2):
"""Kuramoto model
Generate data for the Kuramoto model represented by the differential equation
d/dt x_i = w_i + (2/d) * (sin(x_1 - x_i) + ... + sin(x_d - x_i)) + 0.2 * sin(x_i).
See [1]_ and [2]_ for details.
Parameters
----------
theta_init: ndarray
initial distribution of the oscillators
frequencies: ndarray
natural frequencies of the oscillators
time: float
integration time for BDF method
number_of_snapshots: int
number of snapshots
Returns
-------
snapshots: ndarray(number_of_oscillators, number_of_snapshots)
snapshot matrix containing random displacements of the oscillators in [-0.1,0.1]
derivatives: ndarray(number_of_oscillators, number_of_snapshots)
matrix containing the corresponding derivatives
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, "Multidimensional Approximation of Nonlinear Dynamical Systems",
arXiv:1809.02448, 2018
.. [2] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, "The Kuramoto model: A simple
paradigm for synchronization phenomena", Rev. Mod. Phys. 77, pp. 137-185 , 2005
"""
# snapshots = 2 * np.pi * np.random.rand(num_oscillators, number_of_snapshots) - np.pi
snapshots = 2 * np.pi * np.random.rand(num_oscillators, number_of_snapshots)
derivatives = np.zeros(snapshots.shape)
for i in range(number_of_snapshots):
for j in range(num_oscillators):
derivatives[j,i] = frequencies[j] + K / num_oscillators * np.sin(snapshots[:,i] - snapshots[j,i]).sum() + forcing_param * np.sin(snapshots[j,i])
return snapshots, derivatives
# Build the exact dynamics of the fermi_pasta_ulam if we have polynomials of order up to three.
# In this case the basis functions are sines and cosines.
# The first number_of_oscillators x are cosines, and the rest are sines.
def exact_solution_kuramoto(number_of_oscillators, frequencies, polinomial, K = 2.0, forcing_param = 0.2):
reference_polynomial = polinomial.get_feature_names()
num_basis_functions = len(reference_polynomial)
# Build the exact solution at the boundary
exact_solution = np.zeros((number_of_oscillators, num_basis_functions))
# Build the exact solution.
for i in range(0, number_of_oscillators):
# First order terms.
exact_solution[i, reference_polynomial.index("1")] = frequencies[i]
exact_solution[i, reference_polynomial.index("x" + str(number_of_oscillators + i))] = forcing_param
for j in range(number_of_oscillators):
exact_solution[i, reference_polynomial.index("x" + str(i) + ' x' + str(number_of_oscillators + j))] += K/number_of_oscillators
exact_solution[i, reference_polynomial.index("x" + str(j) + ' x' + str(number_of_oscillators + i))] += -K/number_of_oscillators
return exact_solution
def add_constraints_symmetry_kuramoto(polinomial, number_of_oscillators, normalization_factors):
feature_names = polinomial.get_feature_names()
num_basis_functions = len(feature_names)
pair_symmetries = []
#Constraint on the cos^2 terms:
for i in range(number_of_oscillators):
for j in range(number_of_oscillators):
index_first = feature_names.index("x" + str(j) + '^2')
index_first_transformed = index_first + int(num_basis_functions*i)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1, 'constant': 0})
#Constraint on the sin^2 terms:
for i in range(number_of_oscillators):
for j in range(number_of_oscillators):
index_first = feature_names.index("x" + str(number_of_oscillators + j) + '^2')
index_first_transformed = index_first + int(num_basis_functions*i)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1, 'constant': 0})
type_symmetries = [('sin', 'cos'), ('cos'), ('sin')]
for symmetry in type_symmetries:
if(symmetry == ('sin', 'cos') or symmetry == ('cos', 'sin')):
#Symmetry cos-sin
#For i and j---> \xi_i(cos(x_i) sin(x_j)) = \xi_j(cos(x_j) sin(x_i))
for i in range(number_of_oscillators):
for j in range(number_of_oscillators):
if(i != j):
index_first = feature_names.index("x" + str(i) + ' x' + str(number_of_oscillators + j))
index_second = feature_names.index("x" + str(j) + ' x' + str(number_of_oscillators + i))
index_first_transformed = index_first + int(num_basis_functions*i)
index_second_transformed = index_second + int(num_basis_functions*j)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
#Symmetry sin-cos
#For i and j---> \xi_i(sin(x_i) cos(x_j)) = \xi_j(sin(x_j) cos(x_i))
for i in range(number_of_oscillators):
for j in range(number_of_oscillators):
if(i != j):
index_first = feature_names.index("x" + str(j) + ' x' + str(i + number_of_oscillators))
index_second = feature_names.index("x" + str(i) + ' x' + str(j + number_of_oscillators))
index_first_transformed = index_first + + int(num_basis_functions*i)
index_second_transformed = index_second + int(num_basis_functions*j)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
#Symmetry cos-cos
#For i and j---> \xi_i(cos(x_i) cos(x_j)) = \xi_j(cos(x_j) cos(x_i))
for i in range(number_of_oscillators):
for j in range(number_of_oscillators):
if(i != j):
if(i <= j):
index = feature_names.index("x" + str(i) + ' x' + str(j))
else:
index = feature_names.index("x" + str(j) + ' x' + str(i))
index_first_transformed = index + int(num_basis_functions*i)
index_second_transformed = index + int(num_basis_functions*j)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
#Symmetry sin-sin
#For i and j---> \xi_i(sin(x_i) sin(x_j)) = \xi_j(sin(x_j) sin(x_i))
for i in range(number_of_oscillators):
for j in range(number_of_oscillators):
if(i != j):
if(i <= j):
index = feature_names.index("x" + str(i + number_of_oscillators) + ' x' + str(j + number_of_oscillators))
else:
index = feature_names.index("x" + str(j + number_of_oscillators) + ' x' + str(i + number_of_oscillators))
index_first_transformed = index + int(num_basis_functions*i)
index_second_transformed = index + int(num_basis_functions*j)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
for symmetry in type_symmetries:
if(symmetry == ('cos')):
#Symmetry cos
#For i and j---> \xi_i(cos(x_i)) = \xi_j(cos(x_j))
for i in range(1, number_of_oscillators):
index_first = feature_names.index("x" + str(0))
index_second = feature_names.index("x" + str(i))
index_first_transformed = index_first
index_second_transformed = index_second + int(num_basis_functions*i)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
for symmetry in type_symmetries:
if(symmetry == ('cos')):
#Symmetry cos
#For i and j---> \xi_i(cos(x_j)) = \xi_j(cos(x_i))
for i in range(number_of_oscillators):
for j in range(number_of_oscillators):
if(i != j):
index_first = feature_names.index("x" + str(j))
index_second = feature_names.index("x" + str(i))
index_first_transformed = index_first + + int(num_basis_functions*i)
index_second_transformed = index_second + int(num_basis_functions*j)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
for symmetry in type_symmetries:
if(symmetry == ('sin')):
#Symmetry sin
#For i and j---> \xi_i(sin(x_i)) = \xi_j(sin(x_j))
for i in range(1, number_of_oscillators):
index_first = feature_names.index("x" + str(number_of_oscillators + 0))
index_second = feature_names.index("x" + str(number_of_oscillators + i))
index_first_transformed = index_first
index_second_transformed = index_second + int(num_basis_functions*i)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
for symmetry in type_symmetries:
if(symmetry == ('sin')):
#Symmetry sin
#For i and j---> \xi_i(sin(x_j)) = \xi_j(sin(x_i))
for i in range(number_of_oscillators):
for j in range(number_of_oscillators):
if(i != j):
index_first = feature_names.index("x" + str(j + number_of_oscillators))
index_second = feature_names.index("x" + str(i + number_of_oscillators))
index_first_transformed = index_first + + int(num_basis_functions*i)
index_second_transformed = index_second + int(num_basis_functions*j)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
# Remove duplicates before we output.
# pair_symmetries = [dict(t) for t in {tuple(d.keys()) for d in pair_symmetries}]
pair_symmetries =delete_dictionaries_with_duplicate_keys(pair_symmetries)
return pair_symmetries
def add_constraints_symmetry_kuramoto_backup(polinomial, number_of_oscillators, normalization_factors):
feature_names = polinomial.get_feature_names()
num_basis_functions = len(feature_names)
pair_symmetries = []
#Constraint on the cos^2 terms:
for i in range(number_of_oscillators):
for j in range(number_of_oscillators):
index_first = feature_names.index("x" + str(j) + '^2')
index_first_transformed = index_first + int(num_basis_functions*i)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1, 'constant': 0})
#Constraint on the sin^2 terms:
for i in range(number_of_oscillators):
for j in range(number_of_oscillators):
index_first = feature_names.index("x" + str(number_of_oscillators + j) + '^2')
index_first_transformed = index_first + int(num_basis_functions*i)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1, 'constant': 0})
type_symmetries = [('sin', 'cos'), ('cos'), ('sin')]
for symmetry in type_symmetries:
if(symmetry == ('sin', 'cos') or symmetry == ('cos', 'sin')):
#Symmetry cos-sin
#For i and j---> \xi_i(cos(x_i) sin(x_j)) = \xi_j(cos(x_j) sin(x_i))
for i in range(number_of_oscillators):
for j in range(number_of_oscillators):
if(i != j):
index_first = feature_names.index("x" + str(i) + ' x' + str(number_of_oscillators + j))
index_second = feature_names.index("x" + str(j) + ' x' + str(number_of_oscillators + i))
index_first_transformed = index_first + int(num_basis_functions*i)
index_second_transformed = index_second + int(num_basis_functions*j)
symmetry_pair = (min(index_first_transformed, index_second_transformed), max(index_first_transformed, index_second_transformed))
pair_symmetries.append({ "x" + str(symmetry_pair[0]) : normalization_factors[index_second % num_basis_functions][0], "x" + str(symmetry_pair[1]) : -normalization_factors[index_first % num_basis_functions][0], 'constant': 0})
#Symmetry sin-cos
#For i and j---> \xi_i(sin(x_i) cos(x_j)) = \xi_j(sin(x_j) cos(x_i))
for i in range(number_of_oscillators):
for j in range(number_of_oscillators):
if(i != j):
index_first = feature_names.index("x" + str(j) + ' x' + str(i + number_of_oscillators))
index_second = feature_names.index("x" + str(i) + ' x' + str(j + number_of_oscillators))
index_first_transformed = index_first + + int(num_basis_functions*i)
index_second_transformed = index_second + int(num_basis_functions*j)
symmetry_pair = (min(index_first_transformed, index_second_transformed), max(index_first_transformed, index_second_transformed))
pair_symmetries.append({ "x" + str(symmetry_pair[0]) : normalization_factors[index_second % num_basis_functions][0], "x" + str(symmetry_pair[1]) : -normalization_factors[index_first % num_basis_functions][0], 'constant': 0})
for symmetry in type_symmetries:
if(symmetry == ('cos')):
#Symmetry cos
#For i and j---> \xi_i(sin(x_i) cos(x_j)) = \xi_j(sin(x_j) cos(x_i))
for i in range(1, number_of_oscillators):
index_first = feature_names.index("x" + str(0))
index_second = feature_names.index("x" + str(i))
index_first_transformed = index_first
index_second_transformed = index_second + int(num_basis_functions*i)
symmetry_pair = (min(index_first_transformed, index_second_transformed), max(index_first_transformed, index_second_transformed))
pair_symmetries.append({ "x" + str(symmetry_pair[0]) : normalization_factors[index_second % num_basis_functions][0], "x" + str(symmetry_pair[1]) : -normalization_factors[index_first % num_basis_functions][0], 'constant': 0})
# for symmetry in type_symmetries:
# if(symmetry == ('cos')):
# #Symmetry cos
# #For i and j---> \xi_i(cos(x_j)) = \xi_j(cos(x_i))
# for i in range(number_of_oscillators):
# for j in range(number_of_oscillators):
# if(i != j):
# index_first = feature_names.index("x" + str(j))
# index_second = feature_names.index("x" + str(i))
# index_first_transformed = index_first + + int(num_basis_functions*i)
# index_second_transformed = index_second + int(num_basis_functions*j)
# symmetry_pair = (min(index_first_transformed, index_second_transformed), max(index_first_transformed, index_second_transformed))
# pair_symmetries.append({ "x" + str(symmetry_pair[0]) : normalization_factors[index_second % num_basis_functions][0], "x" + str(symmetry_pair[1]) : -normalization_factors[index_first % num_basis_functions][0], 'constant': 0})
for symmetry in type_symmetries:
if(symmetry == ('sin')):
#Symmetry sin
for i in range(1, number_of_oscillators):
index_first = feature_names.index("x" + str(number_of_oscillators + 0))
index_second = feature_names.index("x" + str(number_of_oscillators + i))
index_first_transformed = index_first
index_second_transformed = index_second + int(num_basis_functions*i)
symmetry_pair = (min(index_first_transformed, index_second_transformed), max(index_first_transformed, index_second_transformed))
pair_symmetries.append({ "x" + str(symmetry_pair[0]) : normalization_factors[index_second % num_basis_functions][0], "x" + str(symmetry_pair[1]) : -normalization_factors[index_first % num_basis_functions][0], 'constant': 0})
# for symmetry in type_symmetries:
# if(symmetry == ('sin')):
# #Symmetry cos
# #For i and j---> \xi_i(sin(x_j)) = \xi_j(sin(x_i))
# for i in range(number_of_oscillators):
# for j in range(number_of_oscillators):
# if(i != j):
# index_first = feature_names.index("x" + str(j + number_of_oscillators))
# index_second = feature_names.index("x" + str(i + number_of_oscillators))
# index_first_transformed = index_first + + int(num_basis_functions*i)
# index_second_transformed = index_second + int(num_basis_functions*j)
# symmetry_pair = (min(index_first_transformed, index_second_transformed), max(index_first_transformed, index_second_transformed))
# pair_symmetries.append({ "x" + str(symmetry_pair[0]) : normalization_factors[index_second % num_basis_functions][0], "x" + str(symmetry_pair[1]) : -normalization_factors[index_first % num_basis_functions][0], 'constant': 0})
# Remove duplicates before we output.
# pair_symmetries = [dict(t) for t in {tuple(d.keys()) for d in pair_symmetries}]
pair_symmetries =delete_dictionaries_with_duplicate_keys_backup(pair_symmetries)
return pair_symmetries
def add_constraints_symmetry_kuramoto_simple(polinomial, number_of_oscillators, normalization_factors):
feature_names = polinomial.get_feature_names()
num_basis_functions = len(feature_names)
pair_symmetries = []
#Constraint on the cos^2 terms:
for i in range(number_of_oscillators):
for j in range(number_of_oscillators):
index_first = feature_names.index("x" + str(j) + '^2')
index_first_transformed = index_first + int(num_basis_functions*i)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1, 'constant': 0})
#Constraint on the sin^2 terms:
for i in range(number_of_oscillators):
for j in range(number_of_oscillators):
index_first = feature_names.index("x" + str(number_of_oscillators + j) + '^2')
index_first_transformed = index_first + int(num_basis_functions*i)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1, 'constant': 0})
pair_symmetries = delete_dictionaries_with_duplicate_keys(pair_symmetries)
return pair_symmetries
def add_constraints_symmetry_fermi_pasta_ulam_tsingou(polinomial, number_of_oscillators, normalization_factors):
feature_names = polinomial.get_feature_names()
num_basis_functions = len(feature_names)
pair_symmetries = []
#First order monomials
#For x_i and x_{i+-1} ---> \xi_i(x_i) = \xi_{i+-1}(x_{i+-1})
for i in range(number_of_oscillators):
for j in [i-1, i+1]:
if j >= 0 and j < number_of_oscillators:
index_first = feature_names.index("x" + str(i))
index_first_transformed = index_first + int(num_basis_functions*i)
index_second = feature_names.index("x" + str(j))
index_second_transformed = index_second + int(num_basis_functions*j)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
#First order monomials
#For x_i and x_{i+-1} ---> \xi_i(x_{i+-1}) = \xi_{i+-1}(x_{i})
for i in range(number_of_oscillators):
for j in [i-1, i+1]:
if j >= 0 and j < number_of_oscillators:
index_first = feature_names.index("x" + str(i))
index_first_transformed = index_first + int(num_basis_functions*j)
index_second = feature_names.index("x" + str(j))
index_second_transformed = index_second + int(num_basis_functions*i)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
#Second order monomials
#For x_i and x_{i+-1} ---> \xi_i(x_i x_{i+-1}) = \xi_{i+-1}(x_i x_{i+-1})
for i in range(number_of_oscillators):
for j in [i-1, i+1]:
if j >= 0 and j < number_of_oscillators:
if(i <= j):
index = feature_names.index("x" + str(i) + " x" + str(j))
else:
index = feature_names.index("x" + str(j) + " x" + str(i))
index_first_transformed = index + int(num_basis_functions*i)
index_second_transformed = index + int(num_basis_functions*j)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
#Second order monomials
#For x_i and x_{i+-1} ---> \xi_i(x_i^2) = \xi_{i+-1}(x_{i+-1}^2)
for i in range(number_of_oscillators):
for j in [i-1, i+1]:
if j >= 0 and j < number_of_oscillators:
index_first = feature_names.index("x" + str(i) + "^2")
index_second = feature_names.index("x" + str(j) + "^2")
index_first_transformed = index_first + int(num_basis_functions*i)
index_second_transformed = index_second + int(num_basis_functions*j)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
#Second order monomials
#For x_i and x_{i+-1} ---> \xi_i(x_{i+-1}^2) = \xi_{i+-1}(x_i^2)
for i in range(number_of_oscillators):
for j in [i-1, i+1]:
if j >= 0 and j < number_of_oscillators:
index_first = feature_names.index("x" + str(i) + "^2")
index_second = feature_names.index("x" + str(j) + "^2")
index_first_transformed = index_first + int(num_basis_functions*j)
index_second_transformed = index_second + int(num_basis_functions*i)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
#Third order monomials
#For x_i and x_{i+-1} ---> \xi_i(x_i^2 x_{i+-1}) = \xi_{i+-1}(x_{i+-1}^2 x_i)
for i in range(number_of_oscillators):
for j in [i-1, i+1]:
if j >= 0 and j < number_of_oscillators:
if(i <= j):
index_first = feature_names.index("x" + str(i) + "^2 x" + str(j))
index_second = feature_names.index("x" + str(i) + " x" + str(j) + "^2")
else:
index_first = feature_names.index("x" + str(j) + " x" + str(i) + "^2")
index_second = feature_names.index("x" + str(j) + "^2 x" + str(i))
index_first_transformed = index_first + int(num_basis_functions*i)
index_second_transformed = index_second + int(num_basis_functions*j)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
#Third order monomials
#For x_i and x_{i+-1} ---> \xi_i(x_i x_{i+-1}^2) = \xi_{i+-1}(x_{i+-1} x_i^2)
for i in range(number_of_oscillators):
for j in [i-1, i+1]:
if j >= 0 and j < number_of_oscillators:
if(i <= j):
index_first = feature_names.index("x" + str(i) + " x" + str(j) + "^2")
index_second = feature_names.index("x" + str(i) + "^2 x" + str(j))
else:
index_first = feature_names.index("x" + str(j) + "^2 x" + str(i))
index_second = feature_names.index("x" + str(j) + " x" + str(i) + "^2")
index_first_transformed = index_first + int(num_basis_functions*i)
index_second_transformed = index_second + int(num_basis_functions*j)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
#Third order monomials
#For x_i and x_{i+-1} ---> \xi_i(x_i^3) = \xi_{i+-1}(x_{i+-1}^3)
for i in range(number_of_oscillators):
for j in [i-1, i+1]:
if j >= 0 and j < number_of_oscillators:
index_first = feature_names.index("x" + str(i) + "^3")
index_second = feature_names.index("x" + str(j) + "^3")
index_first_transformed = index_first + int(num_basis_functions*i)
index_second_transformed = index_second + int(num_basis_functions*j)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
#Third order monomials
#For x_i and x_{i+-1} ---> \xi_i(x_{i+-1}^3) = \xi_{i+-1}(x_{i}^3)
for i in range(number_of_oscillators):
for j in [i-1, i+1]:
if j >= 0 and j < number_of_oscillators:
index_first = feature_names.index("x" + str(i) + "^3")
index_second = feature_names.index("x" + str(j) + "^3")
index_first_transformed = index_first + int(num_basis_functions*j)
index_second_transformed = index_second + int(num_basis_functions*i)
pair_symmetries.append({ "x" + str(index_first_transformed) : 1.0/normalization_factors[index_first % num_basis_functions][0], "x" + str(index_second_transformed) : -1.0/normalization_factors[index_second % num_basis_functions][0], 'constant': 0})
pair_symmetries = delete_dictionaries_with_duplicate_keys(pair_symmetries)
return pair_symmetries
def delete_dictionaries_with_duplicate_keys(list_dictionaries):
seen = set()
new_dictionary = []
for d in list_dictionaries:
t = tuple(sorted(d.keys()))
if t not in seen:
seen.add(t)
new_dictionary.append(d)
return new_dictionary
def delete_dictionaries_with_duplicate_keys_backup(list_dictionaries):
seen = set()
new_dictionary = []
for d in list_dictionaries:
t = tuple(d.keys())
if t not in seen:
seen.add(t)
new_dictionary.append(d)
return new_dictionary
def add_constraints_symmetry_kuramoto_backup_v2(polinomial, number_of_oscillators):
type_symmetries = [('sin', 'cos'), ('cos'), ('sin')]
feature_names = polinomial.get_feature_names()
pair_symmetries = []
for symmetry in type_symmetries:
if(symmetry == ('sin', 'cos') or symmetry == ('cos', 'sin')):
#Symmetry cos-sin
for i in range(number_of_oscillators):
for j in range(number_of_oscillators):
if(i != j):
index_first = feature_names.index("x" + str(i) + ' x' + str(number_of_oscillators + j))
index_second = feature_names.index("x" + str(j) + ' x' + str(number_of_oscillators + i))
index_first_transformed = index_first + int(len(feature_names)*i)
index_second_transformed = index_second + int(len(feature_names)*j)
symmetry_pair = (min(index_first_transformed, index_second_transformed), max(index_first_transformed, index_second_transformed))
pair_symmetries.append(symmetry_pair)
#Symmetry sin-cos
for i in range(number_of_oscillators):
for j in range(number_of_oscillators):
if(i != j):
index_first = feature_names.index("x" + str(j) + ' x' + str(i + number_of_oscillators))
index_second = feature_names.index("x" + str(i) + ' x' + str(j + number_of_oscillators))
index_first_transformed = index_first + + int(len(feature_names)*i)
index_second_transformed = index_second + int(len(feature_names)*j)
symmetry_pair = (min(index_first_transformed, index_second_transformed), max(index_first_transformed, index_second_transformed))
pair_symmetries.append(symmetry_pair)
for symmetry in type_symmetries:
if(symmetry == ('cos')):
#Symmetry cos
for i in range(1, number_of_oscillators):
index_first = feature_names.index("x" + str(0))
index_second = feature_names.index("x" + str(i))
index_first_transformed = index_first
index_second_transformed = index_second + int(len(feature_names)*i)
symmetry_pair = (min(index_first_transformed, index_second_transformed), max(index_first_transformed, index_second_transformed))
pair_symmetries.append(symmetry_pair)
for symmetry in type_symmetries:
if(symmetry == ('sin')):
#Symmetry sin
for i in range(1, number_of_oscillators):
index_first = feature_names.index("x" + str(number_of_oscillators + 0))
index_second = feature_names.index("x" + str(number_of_oscillators + i))
index_first_transformed = index_first
index_second_transformed = index_second + int(len(feature_names)*i)
symmetry_pair = (min(index_first_transformed, index_second_transformed), max(index_first_transformed, index_second_transformed))
pair_symmetries.append(symmetry_pair)
#Remove duplicates before we output.
pair_symmetries = list(set([i for i in pair_symmetries]))
return pair_symmetries
def fermi_pasta_ulam_random(number_of_oscillators, number_of_snapshots, beta=0.7, x_min = - 0.1, x_max = 0.1):
"""Fermi–Pasta–Ulam problem.
Generate data for the Fermi–Pasta–Ulam problem represented by the differential equation
d^2/dt^2 x_i = (x_i+1 - 2x_i + x_i-1) + beta((x_i+1 - x_i)^3 - (x_i-x_i-1)^3).
See [1]_ for details.
Parameters
----------
number_of_oscillators: int
number of oscillators
number_of_snapshots: int
number of snapshots
Returns
-------
snapshots: ndarray(number_of_oscillators, number_of_snapshots)
snapshot matrix containing random displacements of the oscillators in [-0.1,0.1]
derivatives: ndarray(number_of_oscillators, number_of_snapshots)
matrix containing the corresponding derivatives
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, "Multidimensional Approximation of Nonlinear Dynamical Systems",
arXiv:1809.02448, 2018
"""
# define random snapshot matrix
snapshots = (x_max - x_min) * (np.random.rand(number_of_oscillators, number_of_snapshots) - 0.5) + (x_max + x_min)/2.0
# compute derivatives
derivatives = np.zeros((number_of_oscillators, number_of_snapshots))
for j in range(number_of_snapshots):
derivatives[0, j] = (
snapshots[1, j]
- 2 * snapshots[0, j]
+ beta * ((snapshots[1, j] - snapshots[0, j]) ** 3 - snapshots[0, j] ** 3)
)
for i in range(1, number_of_oscillators - 1):
derivatives[i, j] = (
snapshots[i + 1, j]
- 2 * snapshots[i, j]
+ snapshots[i - 1, j]
+ beta
* (
(snapshots[i + 1, j] - snapshots[i, j]) ** 3
- (snapshots[i, j] - snapshots[i - 1, j]) ** 3
)
)
derivatives[-1, j] = (
-2 * snapshots[-1, j]
+ snapshots[-2, j]
+ beta
* (-snapshots[-1, j] ** 3 - (snapshots[-1, j] - snapshots[-2, j]) ** 3)
)
return snapshots, derivatives
def fermi_pasta_ulam_time(number_of_oscillators, exact_solution, polinomial, max_time, number_of_snapshots, number_of_experiments, relative_tolerance = 1.0e-10, absolute_tolerance = 1.0e-13):
from scipy.integrate import solve_ivp
# Plot the exact trajectory.
def fun_exact(t, y):
return np.dot(
exact_solution, polinomial.fit_transform(y.reshape(1, -1)).T
).squeeze()
list_snapshots = []
list_derivatives = []
list_times = []
for i in range(number_of_experiments):
initial_position = np.random.uniform(-0.1, 1.0, size=number_of_oscillators)
sol = solve_ivp(fun_exact, [0, max_time], initial_position, method='DOP853', t_eval=np.linspace(0, max_time, number_of_snapshots), rtol = relative_tolerance, atol = absolute_tolerance)
snapshots = sol.y
derivatives = np.zeros([number_of_oscillators, number_of_snapshots])
for i in range(number_of_snapshots):
derivatives[:, i] = fun_exact(0, snapshots[:, i])
list_derivatives.append(derivatives)
list_times.append(sol['t'])
list_snapshots.append(snapshots)
return list_snapshots, list_derivatives, list_times
def fermi_pasta_ulam_time_correct(number_of_oscillators, exact_solution, polinomial, max_time, number_of_snapshots, number_of_experiments, relative_tolerance = 1.0e-10, absolute_tolerance = 1.0e-13):
from scipy.integrate import solve_ivp
# Plot the exact trajectory.
# Assume that the
def fun_exact(t, y):
position = y[:number_of_oscillators]
velocity = y[number_of_oscillators:]
return np.append(velocity, np.dot(
exact_solution, polinomial.fit_transform(position.reshape(1, -1)).T
).squeeze())
list_snapshots = []
list_derivatives = []
list_second_derivatives = []
list_times = []
for i in range(number_of_experiments):
initial_position = np.random.uniform(-0.1, 0.1, size=number_of_oscillators)
initial_velocity = np.zeros(number_of_oscillators)
vector = np.append(initial_position, initial_velocity)
sol = solve_ivp(fun_exact, [0, max_time], vector, method='DOP853', t_eval=np.linspace(0, max_time, number_of_snapshots), rtol = relative_tolerance, atol = absolute_tolerance)
snapshots = sol.y
second_derivatives = np.zeros([number_of_oscillators, number_of_snapshots])
for i in range(number_of_snapshots):
second_derivatives[:, i] = fun_exact(0, snapshots[:number_of_oscillators, i])
list_second_derivatives.append(second_derivatives)
list_derivatives.append(snapshots[number_of_oscillators:, :])
list_snapshots.append(snapshots[:number_of_oscillators, :])
list_times.append(sol['t'])
return list_snapshots, list_derivatives, list_second_derivatives, list_times
# def kuramoto_time_individual(frequencies, initial_position, start_time, end_time, number_of_samples, K = 2.0, forcing_param = 0.2, relative_tolerance = 1.0e-10, absolute_tolerance = 1.0e-13):
# from scipy.integrate import solve_ivp
# number_of_oscillators = len(frequencies)
# def kuramoto_ode(_, theta):
# [theta_i, theta_j] = np.meshgrid(theta, theta)
# return frequencies + K / number_of_oscillators * np.sin(theta_j - theta_i).sum(0) + forcing_param * np.sin(theta)
# sol = solve_ivp(kuramoto_ode, [start_time, end_time], initial_position, method='DOP853', t_eval=np.linspace(start_time, end_time, number_of_samples), rtol = relative_tolerance, atol = absolute_tolerance)
# return sol.y, sol['t']
# # Perform an experiment where we let the system develop from an initial position.
# def fermi_pasta_ulam_time_individual(
# exact_solution, polinomial, initial_position, t_min, t_max, num_steps
# ):
# # Plot the exact trajectory.
# def fun_exact(t, y):
# return np.dot(
# exact_solution, polinomial.fit_transform(y.reshape(1, -1)).T
# ).squeeze()
# from scipy.integrate import solve_ivp
# sol_true = solve_ivp(
# fun_exact,
# [t_min, t_max],
# initial_position,
# t_eval=np.linspace(t_min, t_max, num_steps),
# vectorized=False,
# )
# assert (
# sol_true["status"] == 0
# ), "The integration of the initial value solver was not succesfull."
# return sol_true.y, sol_true['t']
# Perform an experiment where we let the system develop from an initial position.
def fermi_pasta_ulam_time_individual(
exact_solution, polinomial, initial_position, t_min, t_max, num_steps
):
# Plot the exact trajectory.
def fun_exact(t, y):
return np.dot(
exact_solution, polinomial.fit_transform(y.reshape(1, -1)).T
).squeeze()
from scipy.integrate import solve_ivp
sol_true = solve_ivp(
fun_exact,
[t_min, t_max],
initial_position,
t_eval=np.linspace(t_min, t_max, num_steps),
vectorized=False,
)
assert (
sol_true["status"] == 0
), "The integration of the initial value solver was not succesfull."
return sol_true.y, sol_true['t']
# Perform an experiment where we let the system develop from an initial position.
def fermi_pasta_ulam_time_individual_correct(
exact_solution, polinomial, initial_position, initial_velocity, t_min, t_max, num_steps
):
number_of_oscillators = exact_solution.shape[0]
# Plot the exact trajectory.
# Assume that the
def fun_exact(t, y):
position = y[:number_of_oscillators]
velocity = y[number_of_oscillators:]
return np.append(velocity, np.dot(
exact_solution, polinomial.fit_transform(position.reshape(1, -1)).T
).squeeze())
from scipy.integrate import solve_ivp
position_val = np.append(initial_position, initial_velocity)
sol_true = solve_ivp(
fun_exact,
[t_min, t_max],
position_val,
t_eval=np.linspace(t_min, t_max, num_steps),
vectorized=False,
)
assert (
sol_true["status"] == 0
), "The integration of the initial value solver was not succesfull."
return sol_true.y[:number_of_oscillators], sol_true.y[number_of_oscillators:], sol_true['t']
# Build the exact dynamics of the fermi_pasta_ulam if we have polynomials of order up to three.
def exact_solution_fermi_pasta_ulam(number_of_oscillators, polinomial, beta):
reference_polynomial = polinomial.get_feature_names()
num_basis_functions = len(reference_polynomial)
# Build the exact solution at the boundary
exact_solution = np.zeros((number_of_oscillators, num_basis_functions))
# First order terms.
exact_solution[0, reference_polynomial.index("x0")] = -2
exact_solution[0, reference_polynomial.index("x1")] = 1
# Third order terms
exact_solution[0, reference_polynomial.index("x0^3")] = -2 * beta
exact_solution[0, reference_polynomial.index("x1^3")] = beta
exact_solution[0, reference_polynomial.index("x0^2 x1")] = 3 * beta
exact_solution[0, reference_polynomial.index("x0 x1^2")] = -3 * beta
# Build the exact solution in the interior.
for i in range(1, number_of_oscillators - 1):
# First order terms.
exact_solution[i, reference_polynomial.index("x" + str(i))] = -2
exact_solution[i, reference_polynomial.index("x" + str(i - 1))] = 1
exact_solution[i, reference_polynomial.index("x" + str(i + 1))] = 1
# Third order terms
exact_solution[i, reference_polynomial.index("x" + str(i) + "^3")] = -2 * beta
exact_solution[i, reference_polynomial.index("x" + str(i - 1) + "^3")] = (
1 * beta
)
exact_solution[i, reference_polynomial.index("x" + str(i + 1) + "^3")] = (
1 * beta
)
exact_solution[
i, reference_polynomial.index("x" + str(i) + "^2 x" + str(i + 1))
] = (3 * beta)
exact_solution[
i, reference_polynomial.index("x" + str(i) + " x" + str(i + 1) + "^2")
] = (-3 * beta)
exact_solution[
i, reference_polynomial.index("x" + str(i - 1) + " x" + str(i) + "^2")
] = (3 * beta)
exact_solution[
i, reference_polynomial.index("x" + str(i - 1) + "^2 x" + str(i))
] = (-3 * beta)
# Equation for the end point.
exact_solution[
number_of_oscillators - 1,
reference_polynomial.index("x" + str(number_of_oscillators - 1)),
] = -2
exact_solution[
number_of_oscillators - 1,
reference_polynomial.index("x" + str(number_of_oscillators - 2)),
] = 1
# Third order terms
exact_solution[
number_of_oscillators - 1,
reference_polynomial.index("x" + str(number_of_oscillators - 1) + "^3"),
] = (-2 * beta)
exact_solution[
number_of_oscillators - 1,
reference_polynomial.index("x" + str(number_of_oscillators - 2) + "^3"),
] = beta
exact_solution[
number_of_oscillators - 1,
reference_polynomial.index(
"x"
+ str(number_of_oscillators - 2)
+ "^2 x"
+ str(number_of_oscillators - 1)
),
] = (-3 * beta)
exact_solution[
number_of_oscillators - 1,
reference_polynomial.index(
"x"
+ str(number_of_oscillators - 2)
+ " x"
+ str(number_of_oscillators - 1)
+ "^2"
),
] = (3 * beta)
return exact_solution
def brusselator_time(initial_position, t_min = 0.0, t_max = 10.0, num_steps = 1000, r_coefficients = [1, 3, 1, 1]):
#% rate constants:
#r1 = 1; % 0 -> A
#r2 = 3; % A -> B
#r3 = 1; % 2A + B -> 3A
#r4 = 1; % A -> 0
r1, r2, r3, r4 = r_coefficients
def fun_exact(t, y):
return np.array([r1-r2*y[0]+r3*y[0]**2*y[1]-r4*y[0], r2*y[0]-r3*y[0]**2*y[1]])
from scipy.integrate import solve_ivp
sol_true = solve_ivp(
fun_exact,
[0, t_max],
initial_position,
t_eval=np.linspace(t_min, t_max, num_steps),
vectorized=False,
)
assert (
sol_true["status"] == 0
), "The integration of the initial value solver was not succesfull."
return sol_true, fun_exact(sol_true['t'], sol_true['y'])
# Build the exact dynamics of the fermi_pasta_ulam if we have polynomials of order up to three.
def exact_solution_brusselator(dimension, polinomial, r_coefficients):
r1, r2, r3, r4 = r_coefficients
reference_polynomial = polinomial.get_feature_names()
num_basis_functions = len(reference_polynomial)
# Build the exact solution at the boundary
exact_solution = np.zeros((dimension, num_basis_functions))
# Solution for the first species
exact_solution[0, reference_polynomial.index("1")] = r1
exact_solution[0, reference_polynomial.index("x0")] = - r2 - r4
exact_solution[0, reference_polynomial.index("x0")] = - r2 - r4
exact_solution[0, reference_polynomial.index("x0^2 x1")] = r3
# Solution for the first species
exact_solution[1, reference_polynomial.index("x0")] = r2
exact_solution[1, reference_polynomial.index("x0^2 x1")] = -r3
return exact_solution
def lutka_volterra_time(initial_position, t_min = 0.0, t_max = 10.0, num_steps = 1000, r_coefficients = [1, 1, 1, 1]):
#% rate constants:
#r1 = 1; % reproduction of prey: A -> 2A
#r2 = 1; % death of predator: B -> 0
#r3 = 1; % consumption: A + B -> B
#r4 = 1; % reproduction of predator: A + B -> A + 2B
r1, r2, r3, r4 = r_coefficients
def fun_exact(t, y):
return np.array([y[0]*(r1-r3*y[1]), -y[1]*(r2-r4*y[0])])
from scipy.integrate import solve_ivp
sol_true = solve_ivp(
fun_exact,
[0, t_max],
initial_position,
t_eval=np.linspace(t_min, t_max, num_steps),
vectorized=False,
)
assert (
sol_true["status"] == 0
), "The integration of the initial value solver was not succesfull."
return sol_true, fun_exact(sol_true['t'], sol_true['y'])
# Build the exact dynamics of the fermi_pasta_ulam if we have polynomials of order up to three.
def exact_solution_lutka_volterra(dimension, polinomial, r_coefficients):
r1, r2, r3, r4 = r_coefficients
reference_polynomial = polinomial.get_feature_names()
num_basis_functions = len(reference_polynomial)
# Build the exact solution at the boundary
exact_solution = np.zeros((dimension, num_basis_functions))
# Solution for the first species
exact_solution[0, reference_polynomial.index("x0")] = r1
exact_solution[0, reference_polynomial.index("x0 x1")] = - r3
# Solution for the first species
exact_solution[1, reference_polynomial.index("x1")] = - r2
exact_solution[1, reference_polynomial.index("x0 x1")] = r4
return exact_solution
def michaelis_menten_time(dimension, number_of_snapshots, number_of_experiments = 10, t_min = 0.0, t_max = 10.0, coefficients = [0.01, 1, 1]):
#Expressions
#d/dt C_1 = - k_{1}*C_{1}*C_{2} + k_{-1}* C_{3}
#d/dt C_2 = - k_{1}*C_{1}*C_{2} + (k_{-1} + k_{2})* C_{3}
#d/dt C_3 = k_{1}*C_{1}*C_{2} - (k_{-1} + k_{2})* C_{3}
#d/dt C_4 = k_{2}*C_{3}
# initial_position = np.array([1.0, 0.7, 0.0, 0.0])
k_1, k_2, k_minus1 = coefficients
def fun_exact(t, y):
return np.array([-k_1*y[0]*y[1] + k_minus1*y[2], -k_1*y[0]*y[1] + (k_minus1 + k_2)*y[2], k_1*y[0]*y[1] - (k_minus1 + k_2)*y[2], k_2*y[2]])
from scipy.integrate import solve_ivp
list_snapshots = []
list_derivatives = []
list_times = []
for i in range(number_of_experiments):
initial_position = np.random.rand(4)
sol = solve_ivp(
fun_exact,
[0, t_max],
initial_position,
t_eval=np.linspace(t_min, t_max, number_of_snapshots),
vectorized=False,
)
assert (
sol["status"] == 0
), "The integration of the initial value solver was not succesfull."
snapshots = sol.y
derivatives = np.zeros([dimension, number_of_snapshots])
for i in range(number_of_snapshots):
derivatives[:, i] = fun_exact(0, snapshots[:, i])
list_derivatives.append(derivatives)
list_times.append(sol['t'])
list_snapshots.append(snapshots)
return list_snapshots, list_derivatives, list_times
def michaelis_menten_time_individual(initial_position, t_min = 0.0, t_max = 10.0, num_steps = 1000, coefficients = [0.01, 1, 1]):
#Expressions
#d/dt C_1 = - k_{1}*C_{1}*C_{2} + k_{-1}* C_{3}
#d/dt C_2 = - k_{1}*C_{1}*C_{2} + (k_{-1} + k_{2})* C_{3}
#d/dt C_3 = k_{1}*C_{1}*C_{2} - (k_{-1} + k_{2})* C_{3}
#d/dt C_4 = k_{2}*C_{3}
k_1, k_2, k_minus1 = coefficients
def fun_exact(t, y):
return np.array([-k_1*y[0]*y[1] + k_minus1*y[2], -k_1*y[0]*y[1] + (k_minus1 + k_2)*y[2], k_1*y[0]*y[1] - (k_minus1 + k_2)*y[2], k_2*y[2]])
from scipy.integrate import solve_ivp
sol_true = solve_ivp(
fun_exact,
[0, t_max],
initial_position,
t_eval=np.linspace(t_min, t_max, num_steps),
vectorized=False,
)
assert (
sol_true["status"] == 0
), "The integration of the initial value solver was not succesfull."
return sol_true, fun_exact(sol_true['t'], sol_true['y'])
# Build the exact dynamics of the fermi_pasta_ulam if we have polynomials of order up to three.
def exact_solution_michaelis_menten(dimension, polinomial, coefficients):
assert dimension == 4, "The dimension of the michaelis-menten dynamic should be 4."
k_1, k_2, k_minus1 = coefficients
reference_polynomial = polinomial.get_feature_names()
num_basis_functions = len(reference_polynomial)
exact_solution = np.zeros((dimension, num_basis_functions))
#First species.
exact_solution[0, reference_polynomial.index("x0 x1")] = -k_1
exact_solution[0, reference_polynomial.index("x2")] = k_minus1
#Second species
exact_solution[1, reference_polynomial.index("x0 x1")] = -k_1
exact_solution[1, reference_polynomial.index("x2")] = k_minus1 + k_2
#Third species
exact_solution[2, reference_polynomial.index("x0 x1")] = k_1
exact_solution[2, reference_polynomial.index("x2")] = -(k_minus1 + k_2)
#Fourth species.
exact_solution[3, reference_polynomial.index("x2")] = k_2
return exact_solution
# Build the exact dynamics of the fermi_pasta_ulam if we have polynomials of order up to three.
def exact_solution_michaelis_menten_1D(polinomial, coefficients, initial_position):
k_1, k_2, k_minus1 = coefficients
reference_polynomial = polinomial.get_feature_names()
num_basis_functions = len(reference_polynomial)
exact_solution = np.zeros((dimension, num_basis_functions))
#First species.
exact_solution[0, reference_polynomial.index("x0 x1")] = -k_1
exact_solution[0, reference_polynomial.index("x2")] = k_minus1
#Second species
exact_solution[1, reference_polynomial.index("x0 x1")] = -k_1
exact_solution[1, reference_polynomial.index("x2")] = k_minus1 + k_2
#Third species
exact_solution[2, reference_polynomial.index("x0 x1")] = k_1
exact_solution[2, reference_polynomial.index("x2")] = -(k_minus1 + k_2)
#Fourth species.
exact_solution[3, reference_polynomial.index("x2")] = k_2
return exact_solution
#Add the constraints given by:
#d/dt (C_{2} + C_{3}) = 0
#d/dt (C_{1} + C_{3} + C_{4}) + 0
def add_constraints_michaelis_menten_easy(polinomial):
feature_names = polinomial.get_feature_names()
list_constraints = []
for i in range(len(feature_names)):
list_constraints.append({"x" + str(int(len(feature_names) + i)): 1.0, "x" + str(int(2.0*len(feature_names) + i)): 1.0, "constant": 0.0})
list_constraints.append({"x" + str(i): 1.0, "x" + str(int(2.0*len(feature_names) + i)): 1.0,"x" + str(int(3.0*len(feature_names) + i)): 1.0 , "constant": 0.0})
return list_constraints
def add_constraints_michaelis_menten_hard(polinomial, data, normalization_factors, epsilon):
feature_names = polinomial.get_feature_names()
num_data_points = data.shape[1]
list_constraints = []
#Four constraints per datapoint
for j in range(num_data_points):
constraint_dictionary = {}
constraint_dictionary2 = {}
constraint_dictionary3 = {}
constraint_dictionary4 = {}
for i in range(len(feature_names)):
#First symmetry. One side of abs val.
constraint_dictionary["x" + str(int(len(feature_names) + i))] = data[i, j]/normalization_factors[i][0]
constraint_dictionary["x" + str(int(2.0*len(feature_names) + i))] = data[i, j]/normalization_factors[i][0]
constraint_dictionary["constant"] = epsilon
#First symmetry. Other side of abs val.
constraint_dictionary2["x" + str(int(len(feature_names) + i))] = -data[i, j]/normalization_factors[i][0]
constraint_dictionary2["x" + str(int(2.0*len(feature_names) + i))] = -data[i, j]/normalization_factors[i][0]
constraint_dictionary2["constant"] = epsilon
#Second symmetry. One side of abs val.
constraint_dictionary3["x" + str(i)] = data[i, j]/normalization_factors[i][0]
constraint_dictionary3["x" + str(int(2.0*len(feature_names) + i))] = data[i, j]/normalization_factors[i][0]
constraint_dictionary3["x" + str(int(3.0*len(feature_names) + i))] = data[i, j]/normalization_factors[i][0]
constraint_dictionary3["constant"] = epsilon
#Second symmetry. Other side of abs val.
constraint_dictionary4["x" + str(i)] = -data[i, j]/normalization_factors[i][0]
constraint_dictionary4["x" + str(int(2.0*len(feature_names) + i))] = -data[i, j]/normalization_factors[i][0]
constraint_dictionary4["x" + str(int(3.0*len(feature_names) + i))] = -data[i, j]/normalization_factors[i][0]
constraint_dictionary4["constant"] = epsilon
list_constraints.append(constraint_dictionary)
list_constraints.append(constraint_dictionary2)
list_constraints.append(constraint_dictionary3)
list_constraints.append(constraint_dictionary4)
return list_constraints
def simulate_dynamics(basis, dynamic, initial_position, t_max, num_steps = 1000):
# Plot the exact trajectory.
def fun_dynamic(t, y):
return np.dot(
dynamic, basis.fit_transform(y.reshape(1, -1)).T
).squeeze()
from scipy.integrate import solve_ivp
t_val = np.linspace(0, t_max, num_steps)
sol_true = solve_ivp(fun_dynamic,[0, t_max], initial_position, t_eval= np.linspace(0.0, t_max, num_steps), vectorized=False)
return sol_true['y'], t_val
def simulate_dynamics_kuramoto(basis, dynamic, initial_position, t_max, num_steps = 1000):
# Plot the exact trajectory.
def fun_dynamic(t, y):
y_transformed = np.vstack(
(np.cos(y), np.sin(y))
)
return np.dot(
dynamic, basis.fit_transform(y_transformed.reshape(1, -1)).T
).squeeze()
from scipy.integrate import solve_ivp
t_val = np.linspace(0, t_max, num_steps)
sol_true = solve_ivp(fun_dynamic,[0, t_max], initial_position, t_eval= np.linspace(0.0, t_max, num_steps), vectorized=False)
return sol_true['y'], t_val
```
|
{
"source": "Jgeyen/devshop-bot",
"score": 3
}
|
#### File: gym_devshop/env/devshop_env.py
```python
import random
import json
import gym
import time
from gym import spaces
import pandas as pd
import numpy as np
from client import DevshopClient
import logging
logger = logging.getLogger(__name__)
MAX_BANK = 2147483647
MAX_NUM_SHARES = 2147483647
MAX_SHARE_PRICE = 5000
MAX_OPEN_POSITIONS = 5
MAX_STEPS = 2000
INITIAL_ACCOUNT_BALANCE = 10000
N_DISCRETE_ACTIONS = 5
class DevshopEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
self.reward_range = (-100, 100)
# Define action and observation space
# They must be gym.spaces objects
# Example when using discrete actions:
# 0 - Add Project, 1 - FounderDoBaWork, 2 - FounderDoDevWork, 3 - FounderDoTestWork
self.action_space = spaces.Discrete(N_DISCRETE_ACTIONS)
# Example for using image as input:
# self.observation_space = spaces.Box(low=0, high=255, shape=
# (HEIGHT, WIDTH, N_CHANNELS), dtype=np.uint8)
self.observation_space = spaces.Box(
low=-1, high=1, shape=(7,), dtype=np.float32)
self.lastBanks = []
self.lastInboxStoryCount = 0
def _take_action(self, action):
# print(f'Step: {self.current_step}')
# call the api to get the action to occur
client = DevshopClient()
response = client.doAction(action)
return response == b'true'
def step(self, action):
# Execute one time step within the environment
self.actionReward = 0
self.current_step += 1
delay_modifier = (self.current_step / MAX_STEPS)
self.didAction = False
client = DevshopClient()
state = client.getState()
if action == 0:
self.founderWasBusy = not state.founderFree
time.sleep(2)
self.didAction = True
else:
self.didAction = self._take_action(action)
state = client.getState()
self.bank = state.bank
self.inboxStoryCount = state.inboxStoryCount
self.backlogStoryCount = state.backlogStoryCount
self.devStoryCount = state.devStoryCount
self.testStoryCount = state.testStoryCount
self.doneStoryCount = state.doneStoryCount
self.founderFree = state.founderFree
self.newProjectCost = state.newProjectCost
if self.didAction:
if action == 0:
if self.founderWasBusy:
self.actionReward = 0
else:
self.actionReward = -1 - 99 * delay_modifier
if action == 1:
if state.inboxStoryCount < 3:
self.actionReward = 5
else:
tooMany = -10 -10*state.inboxStoryCount* delay_modifier
if tooMany < -100:
tooMany = -100
self.actionReward = tooMany
if action == 2:
self.actionReward = 10
if action == 3:
self.actionReward = 30
if action == 4:
self.actionReward = 35 + 70 * delay_modifier
else:
self.actionReward = 0
# self.lastBanks.append(self.bank)
reward = self.actionReward
done = self.bank + 2000 <= 0
obs = np.array((self.inboxStoryCount/100,
self.backlogStoryCount/100,
self.testStoryCount/100,
self.doneStoryCount / 100,
self.founderFree,
self.newProjectCost/150,
self.bank / 2000))
print(
f"step: {self.current_step}; Done Stories: {self.doneStoryCount}; Reward: {reward} for action: {action}")
return obs, reward, done, {}
def reset(self):
self.current_step = 0
self.lastInboxStoryCount = 0
client = DevshopClient()
client.doReset()
def render(self, mode='human', close=False):
# Render the environment to the screen
print(f'Step: {self.current_step}')
print(f'bank: {self.bank}')
print(
f'Story Counts: {self.inboxStoryCount}|{self.backlogStoryCount}|{self.devStoryCount}|{self.testStoryCount}')
print(self.founderFree)
print(f'New project cost: {self.newProjectCost}')
```
|
{
"source": "JGFahey/Autohood",
"score": 2
}
|
#### File: RobinhoodScripts/autohood/login.py
```python
import robin_stocks as robinhood
def login(username, password):
login = robinhood.login(username, password)
```
|
{
"source": "jg-fisher/indeed-bot",
"score": 3
}
|
#### File: jg-fisher/indeed-bot/main.py
```python
import os
import sys
import time
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
class IndeedBot:
def __init__(self):
"""
Initilializes the Chrome webdriver.
Sets the job search query string
self.driver:selenium.webdriver.Chrome
self.query_string:str
self.jobs:arr
self.express_apply_jobs:arr
"""
self.driver = webdriver.Chrome('./chromedriver.exe')
self.query_string = "https://www.indeed.com/jobs?q={job}&l={city}%2C+{state}"
self.jobs = []
self.express_apply_jobs = []
def nav(self, url):
"""
Navigates to a given url
Args:
url:str url chromedriver Chrome instance navigates to.
"""
self.driver.get(url)
time.sleep(3) # wait for page load
def __convert_query(self, job, city, state):
"""
Reformats the query for expected syntax of the search
Args:
job:str: Job type to search for.
city:str: City location of the job.
state:str State location of the job.
Returns:
job:str
city:str
state:str
"""
job = '+'.join(job.split(" "))
city = city.lower()
# State must be valid two letter code
if len(state) != 2:
raise Exception("State must be valid two letter code.")
state = state.upper()
return job, city, state
def query(self, job, city, state):
"""
Searches indeed for a job in given city and state
Args:
job:str: Job type to search for.
city:str: City location of the job.
state:str State location of the job.
"""
job, city, state = self.__convert_query(job, city, state)
query = self.query_string.format(job=job, city=city, state=state)
self.nav(query)
def find_express_jobs(self):
""""
Called after chromedriver Chrome instance navigates to job search results.
Fills list with express jobs in search results.
"""
self.jobs = self.driver.find_elements_by_class_name("jobsearch-SerpJobCard")
print(f'Number of jobs {len(self.jobs)}')
for job in self.jobs:
try: # Express apply indicator
job.find_element_by_class_name('jobCardShelfContainer')
self.express_apply_jobs.append(job)
except: # Job is not express apply
pass
def apply_to_express_jobs(self, profile):
"""
Extracts jobs with express apply.
Args:
profile:dict
"""
print(f'Number of express jobs {len(self.express_apply_jobs)}')
for job in self.express_apply_jobs:
self.__process_job(job)
self.__process_apply_button()
self.__fill_applicant_form(profile)
# self.driver.find_element_by_id('form-action-continue').click()
def __process_apply_button(self):
apply_button = self.driver.find_element_by_id('indeedApplyButtonContainer')
apply_button.click()
time.sleep(4)
def __process_job(self, job):
"""
Refines url of job posting and navigates to it
Args:
job:Selenium.Webdriver.Chrome.WebElement
"""
job_a_tag = job.find_element_by_tag_name('a')
job_href = job_a_tag.get_attribute('href')
# Removing all extraneous indeed url query string parameters
job_href = job_href.split('&from')[0]
self.nav(job_href)
def __fill_applicant_form(self, profile):
"""
Finds elements on the applicant form
Args:
profile:dict
"""
actions = ActionChains(self.driver)
actions.send_keys(profile['name'] + Keys.TAB + \
profile['email'] + Keys.TAB + \
profile['phone_number'] + Keys.TAB)
actions.perform()
if __name__ == '__main__':
profile = {
'name': "<NAME>",
'email': "<EMAIL>",
'phone_number': '860-364-3249',
'resume': os.getcwd() + '\\resume.txt'
}
id_bot = IndeedBot()
# keywords, city, state
id_bot.query('python developer', 'dallas', 'tx')
id_bot.find_express_jobs()
id_bot.apply_to_express_jobs(profile)
```
|
{
"source": "jgfoster/GciForPython",
"score": 2
}
|
#### File: jgfoster/GciForPython/GciClasses.py
```python
from ctypes import *
from typing import Type
GciSession: Type[c_void_p] = c_void_p
OopType: Type[c_longlong] = c_int64
GCI_ERR_STR_SIZE = 1024
GCI_ERR_reasonSize = GCI_ERR_STR_SIZE
GCI_MAX_ERR_ARGS = 10
OOP_ILLEGAL = 1
OOP_NIL = 20
class GciErrSType(Structure):
"""
see $GEMSTONE/include/gci.ht
"""
_fields_ = [
('category', OopType), # error dictionary
('context', OopType), # a GsProcess
('exceptionObj', OopType), # an AbstractException or nil
('args', OopType * GCI_MAX_ERR_ARGS), # arguments
('number', c_int), # GemStone error number
('argCount', c_int), # num of arg in the args[]
('fatal', c_ubyte), # nonzero if err is fatal
('message', c_char * (GCI_ERR_STR_SIZE + 1)), # null-terminated Utf8
('reason', c_char * (GCI_ERR_reasonSize + 1)) # null-terminated Utf8
]
def __repr__(self):
return 'aGciErrSType'
def __str__(self):
return 'GciErrSType(category=' + hex(self.category) + \
', context=' + hex(self.context) + \
', exceptionObj=' + hex(self.exceptionObj) + \
', args=' + str(list(map(hex, self.args))[0:self.argCount]) + \
', number=' + str(self.number) + \
', argCount=' + str(self.argCount) + \
', fatal=' + str(self.fatal) + \
', message=' + str(self.message) + \
', reason=' + str(self.reason) + ')'
class Error(Exception):
"""Base class for other exceptions"""
pass
class InvalidArgumentError(Error):
"""Invalid argument for GCI function"""
pass
class GciException(Error):
def __init__(self, ex: GciErrSType):
super().__init__(str(ex.message))
self.ex = ex
def number(self):
return self.ex.number
```
|
{
"source": "jggatc/interphase",
"score": 2
}
|
#### File: interphase/interphase/__init__.py
```python
try:
import pygame as engine
except ImportError:
engine = None
initialized = False
def get_init():
"""
Check if module is initialized.
"""
return initialized
def _init(engine):
global Interface, EVENT, Control, FunctionControl, Label, Textbox, Text, load_image, __version__, initialized
from interphase import env
env.engine = engine
from interphase.interface import Interface, EVENT
from interphase.control import Control, FunctionControl, Label, Textbox
from interphase.util import Text, load_image
from interphase.version import __version__
initialized = True
def init(engine):
"""
Initialize module. Argument engine is the multimedia framework object.
Engine can be Pygame, PyJ2D, or Pyjsdl.
Module initialized if Pygame can be imported.
"""
if initialized:
return
_init(engine)
if engine.__name__ == 'pyjsdl':
from interphase.image import _load_default_images
_load_default_images()
if engine:
_init(engine)
```
|
{
"source": "jggatc/pyj2d",
"score": 3
}
|
#### File: jggatc/pyj2d/numeric.py
```python
numeric = None
if not numeric:
try:
from com.github.tbekolay.jnumeric import JNumeric as numeric
except ImportError:
try:
import Numeric as numeric
except ImportError:
pass
def set_numeric_module(module):
"""
Set numeric module if imported a different numeric package.
"""
global numeric
numeric = module
def get_numeric_module():
"""
Get numeric module. Return None if module not present.
"""
return numeric
```
#### File: jggatc/pyj2d/surface.py
```python
from java.awt.image import BufferedImage, RasterFormatException
from java.lang import ArrayIndexOutOfBoundsException
from java.util import Hashtable
from pyj2d.rect import Rect
from pyj2d.color import Color
from pyj2d import locals as Const
__docformat__ = 'restructuredtext'
class Surface(BufferedImage):
"""
**pyj2d.Surface**
* Surface.get_size
* Surface.get_width
* Surface.get_height
* Surface.get_rect
* Surface.copy
* Surface.subsurface
* Surface.blit
* Surface.set_colorkey
* Surface.get_colorkey
* Surface.replace_color
* Surface.get_at
* Surface.set_at
* Surface.fill
* Surface.get_parent
* Surface.get_offset
"""
def __init__(self, *arg):
"""
Return Surface that is subclassed from java.awt.image.BufferedImage.
Alternative arguments:
* Size (w,h) of surface, optional second argument of flags (SRCALPHA)
* Bufferedimage to convert to Surface
Module initialization places pyj2d.Surface in module's namespace.
"""
try:
width, height = arg[0]
try:
if arg[1] & (BufferedImage.TYPE_INT_ARGB | Const.SRCALPHA):
BufferedImage.__init__(self, width, height, BufferedImage.TYPE_INT_ARGB)
else:
BufferedImage.__init__(self, width, height, BufferedImage.TYPE_INT_RGB)
except IndexError:
BufferedImage.__init__(self, width, height, BufferedImage.TYPE_INT_ARGB)
graphics2D = self.createGraphics()
graphics2D.setColor(Color(0,0,0))
graphics2D.fillRect(0, 0, width, height)
graphics2D.dispose()
except TypeError:
try:
cm = arg[0].getColorModel()
raster = arg[0].getRaster()
isRasterPremultiplied = arg[0].isAlphaPremultiplied()
properties = Hashtable()
keys = arg[0].getPropertyNames()
if keys != None:
for key in keys:
properties.put(key,arg[0].getProperty(key))
except AttributeError:
cm, raster, isRasterPremultiplied, properties = arg
BufferedImage.__init__(self, cm, raster, isRasterPremultiplied, properties)
self._display = None #display surface
self._super_surface = None
self._offset = (0,0)
self._colorkey = None
self._nonimplemented_methods()
def __str__(self):
s = '<%s(%dx%d)>'
return s % (self.__class__.__name__, self.width, self.height)
def __repr__(self):
return self.__str__()
def get_size(self):
"""
Return width and height of surface.
"""
return (self.width, self.height)
def get_width(self):
"""
Return width of surface.
"""
return self.width
def get_height(self):
"""
Return height of surface.
"""
return self.height
def get_rect(self, **attr):
"""
Return rect of the surface.
An optional keyword argument of the rect position.
"""
rect = Rect(0, 0, self.width, self.height)
for key in attr:
getattr(rect, '_set_'+key)(attr[key])
return rect
def copy(self):
"""
Return Surface that is a copy of this surface.
"""
if not self._super_surface:
img_properties = Hashtable()
keys = self.getPropertyNames()
if keys != None:
for key in keys:
img_properties.put(key,self.getProperty(key))
surface = Surface(
self.getColorModel(),
self.getData(),
self.isAlphaPremultiplied(),
img_properties
)
surface._colorkey = self._colorkey
else:
surface = Surface((self.width,self.height), BufferedImage.TYPE_INT_ARGB)
g2d = surface.createGraphics()
g2d.drawImage(self, 0, 0, None)
g2d.dispose()
surface._colorkey = self._colorkey
return surface
def subsurface(self, rect):
"""
Return Surface that represents a subsurface that shares data with this surface.
The rect argument is the area of the subsurface.
"""
try:
try:
subsurf = self.getSubimage(rect.x, rect.y, rect.width, rect.height)
except AttributeError:
rect = Rect(rect)
subsurf = self.getSubimage(rect.x, rect.y, rect.width, rect.height)
except RasterFormatException:
try:
rect = self.get_rect().intersection(rect)
subsurf = self.getSubimage(rect.x, rect.y, rect.width, rect.height)
except:
raise ValueError('subsurface outside surface area')
surface = Surface(subsurf)
surface._super_surface = self
surface._offset = (rect.x,rect.y)
surface._colorkey = self._colorkey
return surface
def blit(self, surface, position, area=None):
"""
Draw given surface on this surface at position.
Optional area delimitates the region of given surface to draw.
"""
g2d = self.createGraphics()
if not area:
rect = Rect(position[0],position[1],surface.width,surface.height)
g2d.drawImage(surface, rect.x, rect.y, None)
else:
rect = Rect(position[0],position[1],area[2],area[3])
g2d.drawImage(surface, rect.x,rect.y,rect.x+area[2],rect.y+area[3],
area[0],area[1],area[0]+area[2],area[1]+area[3], None)
g2d.dispose()
return self.get_rect().clip(rect)
def _blits(self, surfaces):
g2d = self.createGraphics()
for surface, rect in surfaces:
g2d.drawImage(surface, rect.x, rect.y, None)
g2d.dispose()
def _blit_clear(self, surface, rect_list):
g2d = self.createGraphics()
for r in rect_list:
g2d.drawImage(surface, r.x,r.y,r.x+r.width,r.y+r.height,
r.x,r.y,r.x+r.width,r.y+r.height, None)
g2d.dispose()
def set_colorkey(self, color, flags=None):
"""
Set surface colorkey.
"""
if self._colorkey:
r,g,b = self._colorkey.r,self._colorkey.g,self._colorkey.b
self.replace_color((r,g,b,0),self._colorkey)
self._colorkey = None
if color:
self._colorkey = Color(color)
self.replace_color(self._colorkey)
return None
def get_colorkey(self):
"""
Return surface colorkey.
"""
if self._colorkey:
return ( self._colorkey.r,
self._colorkey.g,
self._colorkey.b,
self._colorkey.a )
else:
return None
def replace_color(self, color, new_color=None):
"""
Replace color with with new_color or with alpha.
"""
pixels = self.getRGB(0,0,self.width,self.height,None,0,self.width)
if hasattr(color, 'a'):
color1 = color
else:
color1 = Color(color)
if new_color is None:
color2 = Color(color1.r,color1.g,color1.b,0)
else:
if hasattr(new_color, 'a'):
color2 = new_color
else:
color2 = Color(new_color)
for i, pixel in enumerate(pixels):
if pixel == color1.getRGB():
pixels[i] = color2.getRGB()
self.setRGB(0,0,self.width,self.height,pixels,0,self.width)
return None
def get_at(self, pos):
"""
Return color of a surface pixel.
The pos argument represents x,y position of pixel.
"""
try:
return Color(self.getRGB(pos[0],pos[1]))
except ArrayIndexOutOfBoundsException:
raise IndexError('pixel index out of range')
def set_at(self, pos, color):
"""
Set color of a surface pixel.
The arguments represent position x,y and color of pixel.
"""
color = Color(color)
try:
self.setRGB(pos[0],pos[1],color.getRGB())
except ArrayIndexOutOfBoundsException:
raise IndexError('pixel index out of range')
return None
def fill(self, color=(0,0,0), rect=None):
"""
Fill surface with color.
"""
g2d = self.createGraphics()
color = Color(color)
g2d.setColor(color)
if not rect:
rect = Rect(0, 0, self.width, self.height)
else:
rect = Rect(rect)
g2d.fillRect(rect.x, rect.y, rect.width, rect.height)
g2d.dispose()
return rect
def get_parent(self):
"""
Return parent Surface of subsurface.
"""
return self._super_surface #if delete, delete subsurface...
def get_offset(self):
"""
Return offset of subsurface in surface.
"""
return self._offset
def _nonimplemented_methods(self):
self.convert = lambda *arg: self
self.convert_alpha = lambda *arg: self
self.set_alpha = lambda *arg: None
self.get_alpha = lambda *arg: None
self.lock = lambda *arg: None
self.unlock = lambda *arg: None
self.mustlock = lambda *arg: False
self.get_locked = lambda *arg: False
self.get_locks = lambda *arg: ()
```
#### File: jggatc/pyj2d/surfarray.py
```python
from java.awt.image import BufferedImage
from pyj2d.surface import Surface
__docformat__ = 'restructuredtext'
__doc__ = 'Surface pixel manipulation'
_initialized = False
def _init():
"""
Initialize surfarray module.
"""
global numeric, _initialized
from pyj2d.numeric import numeric
if not numeric:
raise ImportError("JNumeric module is required.")
_initialized = True
def array2d(surface):
"""
Return data array of the Surface argument.
Array consists of pixel data arranged by [x,y] in integer color format.
JNumeric required as specified in numeric module.
"""
if not _initialized:
_init()
data = numeric.zeros((surface.width*surface.height), 'i')
data = surface.getRGB(0, 0, surface.width, surface.height, data, 0, surface.width)
array = numeric.reshape(data, (surface.width,surface.height))
return array
def array3d(surface):
"""
Return data array of the Surface argument.
Array consists of pixel data arranged by [x,y] in RGB format.
JNumeric required as specified in numeric module.
"""
if not _initialized:
_init()
data = surface.getRGB(0, 0, surface.width, surface.height, None, 0, surface.width)
data = numeric.array([(dat>>16 & 0xff, dat>>8 & 0xff, dat & 0xff) for dat in data])
array = numeric.reshape(data, (surface.width,surface.height,3))
return array
def array_alpha(surface):
"""
Return data array of the Surface argument.
Array consists of pixel data arranged by [x,y] of pixel alpha value.
JNumeric required as specified in numeric module.
"""
if not _initialized:
_init()
data = surface.getRGB(0, 0, surface.width, surface.height, None, 0, surface.width)
data = numeric.array([dat>>24 & 0xff for dat in data], numeric.Int8)
array = numeric.reshape(data, (surface.width,surface.height))
return array
def make_surface(array):
"""
Generates image pixels from array data.
Argument array containing image data.
Return Surface generated from array.
JNumeric required as specified in numeric module.
"""
if not _initialized:
_init()
surface = Surface((array.shape[0],array.shape[1]))
blit_array(surface, array)
return surface
def blit_array(surface, array):
"""
Generates image pixels from a JNumeric array.
Arguments include destination Surface and array of integer colors.
JNumeric required as specified in numeric module.
"""
if not _initialized:
_init()
if len(array.shape) == 2:
data = numeric.transpose(array, (1,0))
data = numeric.ravel(data)
else:
data = array[:,:,0]*0x10000 | array[:,:,1]*0x100 | array[:,:,2]
data = numeric.transpose(data, (1,0))
data = numeric.ravel(data)
if not surface.getColorModel().hasAlpha():
surface.setRGB(0, 0, surface.width, surface.height, data, 0, surface.width)
else:
surf = Surface((surface.width,surface.height), BufferedImage.TYPE_INT_RGB)
surf.setRGB(0, 0, surface.width, surface.height, data, 0, surface.width)
g2d = surface.createGraphics()
g2d.drawImage(surf, 0, 0, None)
g2d.dispose()
return None
use_arraytype = lambda *arg: None
```
#### File: jggatc/pyj2d/transform.py
```python
from math import pi as _pi, fabs as _fabs, sin as _sin, cos as _cos, ceil as _ceil
from java.awt.image import BufferedImage, AffineTransformOp
from java.awt import RenderingHints
from java.awt.geom import AffineTransform
from pyj2d.surface import Surface
__docformat__ = 'restructuredtext'
__doc__ = 'Surface transformation'
_deg_rad = _pi/180.0
def rotate(surface, angle):
"""
Return Surface rotated by the given angle.
"""
if not angle:
return surface.copy()
theta = angle*_deg_rad
width_i = surface.getWidth()
height_i = surface.getHeight()
cos_theta = _fabs( _cos(theta) )
sin_theta = _fabs( _sin(theta) )
width_f = int( (width_i*cos_theta)+(height_i*sin_theta) )
height_f = int( (width_i*sin_theta)+(height_i*cos_theta) )
surf = Surface((width_f,height_f), BufferedImage.TYPE_INT_ARGB)
at = AffineTransform()
at.translate(width_f/2.0, height_f/2.0)
at.rotate(-theta)
g2d = surf.createGraphics()
ot = g2d.getTransform()
g2d.setTransform(at)
g2d.setRenderingHint(RenderingHints.KEY_INTERPOLATION, RenderingHints.VALUE_INTERPOLATION_BILINEAR)
g2d.drawImage(surface, -width_i//2, -height_i//2, None)
g2d.setTransform(ot)
g2d.dispose()
return surf
def rotozoom(surface, angle, size):
"""
Return Surface rotated and resized by the given angle and size.
"""
if not angle:
width = int(surface.getWidth()*size)
height = int(surface.getHeight()*size)
return scale(surface, (width, height))
theta = angle*_deg_rad
width_i = int(surface.getWidth()*size)
height_i = int(surface.getHeight()*size)
cos_theta = _fabs( _cos(theta) )
sin_theta = _fabs( _sin(theta) )
width_f = int( _ceil((width_i*cos_theta)+(height_i*sin_theta)) )
if width_f % 2:
width_f += 1
height_f = int( _ceil((width_i*sin_theta)+(height_i*cos_theta)) )
if height_f % 2:
height_f += 1
surf = Surface((width_f,height_f), BufferedImage.TYPE_INT_ARGB)
at = AffineTransform()
at.translate(width_f/2.0, height_f/2.0)
at.rotate(-theta)
g2d = surf.createGraphics()
ot = g2d.getTransform()
g2d.setTransform(at)
g2d.setRenderingHint(RenderingHints.KEY_INTERPOLATION, RenderingHints.VALUE_INTERPOLATION_BILINEAR)
g2d.drawImage(surface, -width_i//2, -height_i//2, width_i, height_i, None)
g2d.setTransform(ot)
g2d.dispose()
return surf
def scale(surface, size, dest=None):
"""
Return Surface resized by the given size.
An optional destination surface can be provided.
"""
if not dest:
surf = Surface(size, BufferedImage.TYPE_INT_ARGB)
else:
surf = dest
g2d = surf.createGraphics()
g2d.setRenderingHint(RenderingHints.KEY_INTERPOLATION, RenderingHints.VALUE_INTERPOLATION_BILINEAR)
g2d.drawImage(surface, 0, 0, size[0], size[1], None)
g2d.dispose()
return surf
def smoothscale(surface, size, dest=None):
"""
Return Surface resized by the given size.
An optional destination surface can be provided.
"""
return scale(surface, size, dest)
def scale2x(surface, dest=None):
"""
Return Surface resized to twice its size.
An optional destination surface can be provided.
"""
return scale(surface, (surface.getWidth()*2,surface.getHeight()*2), dest)
def flip(surface, xbool=True, ybool=False):
"""
Return Surface that is flipped horizontally, vertically, or both.
"""
if xbool and ybool:
at = AffineTransform.getScaleInstance(-1, -1)
at.translate(-surface.getHeight(), -surface.getHeight())
elif xbool:
at = AffineTransform.getScaleInstance(-1, 1)
at.translate(-surface.getWidth(), 0)
elif ybool:
at = AffineTransform.getScaleInstance(1, -1)
at.translate(0, -surface.getHeight())
else:
return surface
op = AffineTransformOp(at, AffineTransformOp.TYPE_BILINEAR)
bimage = op.filter(surface, None)
surf = Surface(bimage)
return surf
```
|
{
"source": "jggatc/pyjsbitset",
"score": 2
}
|
#### File: jggatc/pyjsbitset/pyjsarray.py
```python
from __pyjamas__ import JS
class PyTypedArray:
"""
PyTypedArray is the base class that wraps the JavaScript TypedArray objects.
The derived objects provides an interface to the JavaScript array objects:
PyUint8ClampedArray [Uint8ClampedArray]
PyUint8Array [Uint8Array]
PyUint16Array [Uint16Array]
PyUint32Array [Uint32Array]
PyInt8Array [Int8Array]
PyInt16Array [Int16Array]
PyInt32Array [Int32Array]
PyFloat32Array [Float32Array]
PyFloat64Array [Float64Array]
The PyTypedArray interface include index syntax, iteration, and math operations.
The module contains an Ndarray class to instantiate N-dimensional arrays, and PyImageData and PyImageMatrix classes that provide an interface to canvas ImageData.
"""
def __init__(self, data=None, offset=None, length=None, typedarray=None):
"""
The PyTypedArray is instantiated with either the array size, an array of the TypedArray or Python type, or an existing ArrayBuffer to view, which creates a new TypedArray of size and included data as the specified type. Optional arguments include offset index at which ArrayBuffer data is inserted and length of an ArrayBuffer.
"""
if data:
if isinstance(data, int):
if pyjs_mode.optimized:
self.__data = JS("""new @{{typedarray}}(@{{data}})""")
else:
self.__data = JS("""new @{{typedarray}}(@{{data}}['valueOf']())""")
elif isinstance(data, (list,tuple)):
if pyjs_mode.optimized:
self.__data = JS("""new @{{typedarray}}(@{{data}}['getArray']())""")
else:
data = [dat.valueOf() for dat in data]
self.__data = JS("""new @{{typedarray}}(@{{data}}['getArray']())""")
elif isinstance(data, PyTypedArray):
self.__data = JS("""new @{{typedarray}}(@{{data}}['__data'])""")
else: #TypedArray or ArrayBuffer
if offset is None and length is None:
self.__data = JS("""new @{{typedarray}}(@{{data}})""")
else:
if offset is None:
offset = 0
if length is None:
self.__data = JS("""new @{{typedarray}}(@{{data}}, @{{offset}})""")
else:
self.__data = JS("""new @{{typedarray}}(@{{data}}, @{{offset}}, @{{length}})""")
else:
self.__data = None
def __str__(self):
"""
Return string representation of PyTypedArray object.
"""
return self.__data.toString()
def __iter__(self):
"""
Iterate over PyTypedArray object.
"""
index = 0
while index < self.__data.length:
yield self[index]
index += 1
def __getitem__(self, index):
"""
Get TypedArray element by index.
"""
return JS("""@{{int}}(@{{self}}['__data'][@{{index}}]);""")
def __setitem__(self, index, value):
"""
Set TypedArray element by index.
"""
if pyjs_mode.optimized:
JS("""@{{self}}['__data'][@{{index}}]=@{{value}};""")
else:
value = value.valueOf()
JS("""@{{self}}['__data'][@{{index}}]=@{{value}};""")
return None
def __len__(self):
"""
Get TypedArray array length.
"""
return self.__data.length
def set(self, data, offset=0):
"""
Set data to the array. Arguments: data is a list of either the TypedArray or Python type, offset is the start index where data will be set (defaults to 0).
"""
if isinstance(data, (list,tuple)):
if pyjs_mode.optimized:
self.__data.set(data.getArray(), offset)
else:
data = [dat.valueOf() for dat in data]
self.__data.set(data.getArray(), offset)
elif isinstance(data, PyTypedArray):
self.__data.set(data.__data, offset)
def subarray(self, begin, end=None):
"""
Retrieve a subarray of the array. The subarray is a TypedArray and is a view of the derived array. Arguments begin and optional end (defaults to array end) are the index spanning the subarray.
"""
if end is None:
end = self.__data.length
array = self.__data.subarray(begin, end)
pytypedarray = self.__class__()
pytypedarray.__data = array
return pytypedarray
def getLength(self):
"""
Return array.length attribute.
"""
return self.__data.length
def getByteLength(self):
"""
Return array.byteLength attribute.
"""
return self.__data.byteLength
def getBuffer(self):
"""
Return array.buffer attribute.
"""
return self.__data.buffer
def getByteOffset(self):
"""
Return array.byteOffset attribute.
"""
return self.__data.byteOffset
def getBytesPerElement(self):
"""
Return array.BYTES_PER_ELEMENT attribute.
"""
return self.__data.BYTES_PER_ELEMENT
def getArray(self):
"""
Return JavaScript TypedArray.
"""
return self.__data
def setArray(self, array):
"""
Set JavaScript TypedArray.
"""
self.__data = array
return None
class PyUint8ClampedArray(PyTypedArray):
"""
Create a PyTypedArray interface to Uint8ClampedArray.
"""
def __init__(self, data=None, offset=None, length=None):
try:
PyTypedArray.__init__(self, data, offset, length, typedarray=Uint8ClampedArray)
except (TypeError, AttributeError): #-O/-S:TypeError/AttributeError
if isUndefined(typedarray):
raise NotImplementedError("TypedArray data type not implemented")
else:
raise
class PyUint8Array(PyTypedArray):
"""
Create a PyTypedArray interface to Uint8Array.
"""
def __init__(self, data=None, offset=None, length=None):
try:
PyTypedArray.__init__(self, data, offset, length, typedarray=Uint8Array)
except (TypeError, AttributeError):
if isUndefined(typedarray):
raise NotImplementedError("TypedArray data type not implemented")
else:
raise
class PyUint16Array(PyTypedArray):
"""
Create a PyTypedArray interface to Uint16Array.
"""
def __init__(self, data=None, offset=None, length=None):
try:
PyTypedArray.__init__(self, data, offset, length, typedarray=Uint16Array)
except (TypeError, AttributeError):
if isUndefined(typedarray):
raise NotImplementedError("TypedArray data type not implemented")
else:
raise
class PyUint32Array(PyTypedArray):
"""
Create a PyTypedArray interface to Uint32Array.
"""
def __init__(self, data=None, offset=None, length=None):
try:
PyTypedArray.__init__(self, data, offset, length, typedarray=Uint32Array)
except (TypeError, AttributeError):
if isUndefined(typedarray):
raise NotImplementedError("TypedArray data type not implemented")
else:
raise
class PyInt8Array(PyTypedArray):
"""
Create a PyTypedArray interface to Int8Array.
"""
def __init__(self, data=None, offset=None, length=None):
try:
PyTypedArray.__init__(self, data, offset, length, typedarray=Int8Array)
except (TypeError, AttributeError):
if isUndefined(typedarray):
raise NotImplementedError("TypedArray data type not implemented")
else:
raise
class PyInt16Array(PyTypedArray):
"""
Create a PyTypedArray interface to Int16Array.
"""
def __init__(self, data=None, offset=None, length=None):
try:
PyTypedArray.__init__(self, data, offset, length, typedarray=Int16Array)
except (TypeError, AttributeError):
if isUndefined(typedarray):
raise NotImplementedError("TypedArray data type not implemented")
else:
raise
class PyInt32Array(PyTypedArray):
"""
Create a PyTypedArray interface to Int32Array.
"""
def __init__(self, data=None, offset=None, length=None):
try:
PyTypedArray.__init__(self, data, offset, length, typedarray=Int32Array)
except (TypeError, AttributeError):
if isUndefined(typedarray):
raise NotImplementedError("TypedArray data type not implemented")
else:
raise
class PyFloat32Array(PyTypedArray):
"""
Create a PyTypedArray interface to Float32Array.
"""
def __init__(self, data=None, offset=None, length=None):
try:
PyTypedArray.__init__(self, data, offset, length, typedarray=Float32Array)
except (TypeError, AttributeError):
if isUndefined(typedarray):
raise NotImplementedError("TypedArray data type not implemented")
else:
raise
def __getitem__(self, index):
"""
Get TypedArray element by index.
"""
return JS("""@{{self}}['__data'][@{{index}}];""")
class PyFloat64Array(PyTypedArray):
"""
Create a PyTypedArray interface to Float64Array.
"""
def __init__(self, data=None, offset=None, length=None):
try:
PyTypedArray.__init__(self, data, offset, length, typedarray=Float64Array)
except (TypeError, AttributeError):
if isUndefined(typedarray):
raise NotImplementedError("TypedArray data type not implemented")
else:
raise
def __getitem__(self, index):
"""
Get TypedArray element by index.
"""
return JS("""@{{self}}['__data'][@{{index}}];""")
class PyCanvasPixelArray(PyTypedArray):
"""
Create a PyTypedArray interface to CanvasPixelArray.
"""
def __init__(self, data=None, offset=None, length=None):
PyTypedArray.__init__(self, data, offset, length)
self._superArray = None
self._superIndex = (0,0)
def __iter__(self):
"""
Iterate over PyTypedArray object.
"""
if not self._superArray:
PyTypedArray.__iter__(self)
else:
index = self._superIndex[0]
while index < self._superIndex[1]:
yield self._superArray[index]
index += 1
def __getitem__(self, index):
"""
Get TypedArray element by index.
"""
if not self._superArray:
return PyTypedArray.__getitem__(self, index)
else:
return self._superArray.__getitem__(index+self._superIndex[0])
def __setitem__(self, index, value):
"""
Set TypedArray element by index.
"""
if not self._superArray:
PyTypedArray.__setitem__(self, index, value)
else:
self._superArray.__setitem__(index+self._superIndex[0], value)
return None
def set(self, data, offset=0):
"""
Set data to the array. Arguments: data is a list of either the TypedArray or Python type, offset is the start index where data will be set (defaults to 0).
"""
if not self._superArray:
for index in xrange(len(data)):
self[index+offset] = data[index]
else:
self._superArray.set(data, offset+self._superIndex[0])
def subarray(self, begin, end=None):
"""
Retrieve a subarray of the array. The subarray is a view of the derived array. Arguments begin and optional end (defaults to array end) are the index spanning the subarray.
"""
if end is None:
end = self.__data.length
array = self.__class__()
array.__data = self.__data
array._superArray = self
array._superIndex = (begin,end)
return array
class Ndarray:
__typedarray = {0: PyUint8ClampedArray,
1: PyUint8Array,
2: PyUint16Array,
3: PyUint32Array,
4: PyInt8Array,
5: PyInt16Array,
6: PyInt32Array,
7: PyFloat32Array,
8: PyFloat64Array}
def __init__(self, dim, dtype=8):
"""
Generate an N-dimensional array of TypedArray data.
Argument can be size (int or tuple) or data (list or TypedArray).
Optional argument dtype (default:8) specifies TypedArray data type:
0: Uint8ClampedArray
1: Uint8Array
2: Uint16Array
3: Uint32Array
4: Int8Array
5: Int16Array
6: Int32Array
7: Float32Array
8: Float64Array
"""
self._dtype = dtype
if isinstance(dim, tuple):
size = 1
for i in dim:
size *= i
self.__data = Ndarray.__typedarray[dtype](size)
self._shape = dim
indices = []
for i in self._shape:
size /= i
indices.append(size)
self._indices = tuple(indices)
elif isinstance(dim, int):
self.__data = Ndarray.__typedarray[dtype](dim)
self._shape = (dim,)
self._indices = (self._shape[0],)
elif isinstance(dim, list):
self.__data = Ndarray.__typedarray[dtype](dim)
self._shape = (len(dim),)
self._indices = (self._shape[0],)
else:
self.__data = dim
self._shape = (len(dim),)
self._indices = (self._shape[0],)
@property
def shape(self): #not implemented in pyjs -O
"""
Return array shape.
Ndarray.shape accessible with compilation in --strict mode.
"""
return self._shape
@shape.setter
def shape(self, dim): #not implemented in pyjs -O
"""
Set shape of array.
Argument is new shape.
Raises TypeError if shape is not appropriate.
Ndarray.shape accessible with compilation in --strict mode.
"""
self.setshape(dim)
return None
def __getitem__(self, index):
try:
indexLn, shapeLn = index.__len__(), len(self._shape) #len(0) no exception
if indexLn == shapeLn:
return self.__data[sum([index[i]*self._indices[i] for i in range(indexLn)])]
else:
begin = sum([index[i]*self._indices[i] for i in range(indexLn)])
end = begin + self._indices[indexLn-1]
subarray = self.__data.subarray(begin, end)
array = Ndarray(subarray, self._dtype)
array._shape = self._shape[indexLn:]
array._indices = self._indices[indexLn:]
return array
except (TypeError,AttributeError): #index[i] if index is int raises both AttributeError -S and TypeError -O. No exception for len(index) if index is int.
if len(self._shape) == 1:
return self.__data[index]
else:
begin = index * self._indices[0]
end = begin + self._indices[0]
subarray = self.__data.subarray(begin, end)
array = Ndarray(subarray, self._dtype)
array._shape = self._shape[1:]
array._indices = self._indices[1:]
return array
def __setitem__(self, index, value):
def unpack(obj, lst=None):
if lst is None:
lst = []
for element in obj:
if isinstance(element, (list,tuple)):
unpack(element, lst)
else:
lst.append(element)
return lst
try:
indexLn, shapeLn = index.__len__(), len(self._shape)
if indexLn == shapeLn:
self.__data[sum([index[i]*self._indices[i] for i in range(indexLn)])] = value
else:
begin = sum([index[i]*self._indices[i] for i in range(indexLn)])
end = begin + self._indices[indexLn-1]
subarray = self.__data.subarray(begin, end)
if isinstance(value, Ndarray):
value = value.__data
else:
if isinstance(value[0], (list,tuple)):
value = unpack(value)
subarray.set(value)
except (TypeError,AttributeError):
if len(self._shape) == 1:
self.__data[index] = value
else:
begin = index * self._indices[0]
end = begin + self._indices[0]
subarray = self.__data.subarray(begin, end)
if isinstance(value, Ndarray):
value = value.__data
else:
if isinstance(value[0], (list,tuple)):
value = unpack(value)
subarray.set(value)
return None
def __getslice__(self, lower, upper):
subarray = self.__data.subarray(lower, upper)
return Ndarray(subarray, self._dtype)
def __setslice__(self, lower, upper, data):
subarray = self.__data.subarray(lower, upper)
subarray.set(data)
return None
def __iter__(self):
if len(self._shape) > 1:
index = 0
while index < self._shape[0]:
begin = index * self._indices[0]
end = begin + self._indices[0]
subarray = self.__data.subarray(begin, end)
array = Ndarray(subarray, self._dtype)
array._shape = self._shape[1:]
array._indices = self._indices[1:]
yield array
index += 1
else:
index = 0
while index < self._shape[0]:
yield self.__data[index]
index += 1
def __str__(self):
def array_str(array, width, strval):
alst = []
if len(array._shape) == 1:
alst.append('[')
alst.extend([strval % (width,val) for val in array])
# alst.extend(["{0:>{1}} ".format(val,width) for val in array])
#pyjs-O {0:>{1}} width > NaN?
alst[-1] = alst[-1].rstrip()
alst.append(']')
else:
alst.append('[')
for a in array:
alst.extend( array_str(a,width,strval) )
alst.append(']')
return alst
if self._dtype < 7:
alst = array_str(self, len(str( max([i for i in self.__data]) )), "%*d ")
else:
alst = array_str(self, len(str( max([i for i in self.__data]) ))+7, "%*f ")
tab = len(self._shape)
i = tab
while True:
try:
i = alst.index('[', i)
except ValueError:
break
count = 0
while True:
if alst[i+count] == '[':
count += 1
continue
else:
if count == 1: #pyjs-O ' '*n > NaN
alst[i] = '\n'+''.join([' ' for x in range(tab-count)])+alst[i]
else:
alst[i] = '\n\n'+''.join([' ' for x in range(tab-count)])+alst[i]
i += count
break
return ''.join(alst)
def __len__(self):
return self._shape[0]
def __add__(self, other):
array = Ndarray.__typedarray[self._dtype](self.__data)
ndarray = Ndarray(array, self._dtype)
ndarray._shape = self._shape
ndarray._indices = self._indices
try:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] + other
except (TypeError, ValueError): #pys -S TypeError, -O ValueError
if ndarray._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] + other.__data[i]
return ndarray
def __sub__(self, other):
array = Ndarray.__typedarray[self._dtype](self.__data)
ndarray = Ndarray(array, self._dtype)
ndarray._shape = self._shape
ndarray._indices = self._indices
try:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] - other
except (TypeError, ValueError):
if ndarray._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] - other.__data[i]
return ndarray
def __mul__(self, other):
array = Ndarray.__typedarray[self._dtype](self.__data)
ndarray = Ndarray(array, self._dtype)
ndarray._shape = self._shape
ndarray._indices = self._indices
try:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] * other
except (TypeError, ValueError):
if ndarray._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] * other.__data[i]
return ndarray
def __div__(self, other):
array = Ndarray.__typedarray[self._dtype](self.__data)
ndarray = Ndarray(array, self._dtype)
ndarray._shape = self._shape
ndarray._indices = self._indices
try:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] / other
except (TypeError, ValueError):
if ndarray._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] / other.__data[i]
return ndarray
def add(self, other):
"""
Add across array elements.
Argument is a numeral or another array.
Return new array.
"""
array = Ndarray.__typedarray[self._dtype](self.__data)
ndarray = Ndarray(array, self._dtype)
ndarray._shape = self._shape
ndarray._indices = self._indices
try:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] + other
except (TypeError, ValueError):
if ndarray._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] + other.__data[i]
return ndarray
def sub(self, other):
"""
Subtract across array elements.
Argument is a numeral or another array.
Return new array.
"""
array = Ndarray.__typedarray[self._dtype](self.__data)
ndarray = Ndarray(array, self._dtype)
ndarray._shape = self._shape
ndarray._indices = self._indices
try:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] - other
except (TypeError, ValueError):
if ndarray._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] - other.__data[i]
return ndarray
def mul(self, other):
"""
Multiply across array elements.
Argument is a numeral or another array.
Return new array.
"""
array = Ndarray.__typedarray[self._dtype](self.__data)
ndarray = Ndarray(array, self._dtype)
ndarray._shape = self._shape
ndarray._indices = self._indices
try:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] * other
except (TypeError, ValueError):
if ndarray._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] * other.__data[i]
return ndarray
def div(self, other):
"""
Divide across array elements.
Argument is a numeral or another array.
Return new array.
"""
array = Ndarray.__typedarray[self._dtype](self.__data)
ndarray = Ndarray(array, self._dtype)
ndarray._shape = self._shape
ndarray._indices = self._indices
try:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] / other
except (TypeError, ValueError):
if ndarray._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] / other.__data[i]
return ndarray
def iadd(self, other):
"""
Add across array elements in-place.
Argument is a numeral or another array.
"""
try:
for i in xrange(len(self.__data)):
self.__data[i] = self.__data[i] + other
except (TypeError, ValueError):
if self._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(self.__data)):
self.__data[i] = self.__data[i] + other.__data[i]
return None
def isub(self, other):
"""
Subtract across array elements in-place.
Argument is a numeral or another array.
"""
try:
for i in xrange(len(self.__data)):
self.__data[i] = self.__data[i] - other
except (TypeError, ValueError):
if self._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(self.__data)):
self.__data[i] = self.__data[i] - other.__data[i]
return None
def imul(self, other):
"""
Multiply across array elements in-place.
Argument is a numeral or another array.
"""
try:
for i in xrange(len(self.__data)):
self.__data[i] = self.__data[i] * other
except (TypeError, ValueError):
if self._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(self.__data)):
self.__data[i] = self.__data[i] * other.__data[i]
return None
def idiv(self, other):
"""
Divide across array elements in-place.
Argument is a numeral or another array.
"""
try:
for i in xrange(len(self.__data)):
self.__data[i] = self.__data[i] / other
except (TypeError, ValueError):
if self._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(self.__data)):
self.__data[i] = self.__data[i] / other.__data[i]
return None
def bitwise_and(self, other):
"""
Bitwise AND across array elements.
Argument is a numeral or another array.
Return new array.
"""
array = Ndarray.__typedarray[self._dtype](self.__data)
ndarray = Ndarray(array, self._dtype)
ndarray._shape = self._shape
ndarray._indices = self._indices
try:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] & other
except (TypeError, ValueError):
if ndarray._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] & other.__data[i]
return ndarray
def bitwise_or(self, other):
"""
Bitwise OR across array elements.
Argument is a numeral or another array.
Return new array.
"""
array = Ndarray.__typedarray[self._dtype](self.__data)
ndarray = Ndarray(array, self._dtype)
ndarray._shape = self._shape
ndarray._indices = self._indices
try:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] | other
except (TypeError, ValueError):
if ndarray._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] | other.__data[i]
return ndarray
def bitwise_xor(self, other):
"""
Bitwise XOR across array elements.
Argument is a numeral or another array.
Return new array.
"""
array = Ndarray.__typedarray[self._dtype](self.__data)
ndarray = Ndarray(array, self._dtype)
ndarray._shape = self._shape
ndarray._indices = self._indices
try:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] ^ other
except (TypeError, ValueError):
if ndarray._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ndarray.__data[i] ^ other.__data[i]
return ndarray
def bitwise_iand(self, other):
"""
Bitwise AND across array elements in-place.
Argument is a numeral or another array.
"""
try:
for i in xrange(len(self.__data)):
self.__data[i] = self.__data[i] & other
except (TypeError, ValueError):
if self._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(self.__data)):
self.__data[i] = self.__data[i] & other.__data[i]
return None
def bitwise_ior(self, other):
"""
Bitwise OR across array elements in-place.
Argument is a numeral or another array.
"""
try:
for i in xrange(len(self.__data)):
self.__data[i] = self.__data[i] | other
except (TypeError, ValueError):
if self._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(self.__data)):
self.__data[i] = self.__data[i] | other.__data[i]
return None
def bitwise_ixor(self, other):
"""
Bitwise XOR across array elements in-place.
Argument is a numeral or another array.
"""
try:
for i in xrange(len(self.__data)):
self.__data[i] = self.__data[i] ^ other
except (TypeError, ValueError):
if self._shape != other._shape:
raise TypeError("array shapes are not compatible")
else:
for i in xrange(len(self.__data)):
self.__data[i] = self.__data[i] ^ other.__data[i]
return None
def bitwise_not(self):
"""
Bitwise NOT across array elements.
Return new array.
"""
array = Ndarray.__typedarray[self._dtype](self.__data)
ndarray = Ndarray(array, self._dtype)
ndarray._shape = self._shape
ndarray._indices = self._indices
for i in xrange(len(ndarray.__data)):
ndarray.__data[i] = ~self.__data[i]
return ndarray
def setshape(self, *dim):
"""
Set shape of array.
Argument is new shape.
Raises TypeError if shape is not appropriate.
Ndarray.shape accessible with compilation in --strict mode.
"""
if isinstance(dim[0], tuple):
dim = dim[0]
size = 1
for i in dim:
size *= i
array_size = 1
for i in self._shape:
array_size *= i
if size != array_size:
raise TypeError("array size cannot change")
self._shape = dim
indices = []
for i in self._shape:
size /= i
indices.append(size)
self._indices = tuple(indices)
return None
def getshape(self):
"""
Return array shape.
Ndarray.shape accessible with compilation in --strict mode.
"""
return self._shape
def reshape(self, dim):
"""
Return view of array with new shape.
Argument is new shape.
Raises TypeError if shape is not appropriate.
"""
size = 1
for i in dim:
size *= i
array_size = 1
for i in self._shape:
array_size *= i
if size != array_size:
raise TypeError("array size cannot change")
subarray = self.__data.subarray(0)
array = Ndarray(subarray)
array._shape = dim
indices = []
for i in array._shape:
size /= i
indices.append(size)
array._indices = tuple(indices)
return array
def set(self, data):
"""
Set array elements.
Data argument can be a 1d/2d array or number used to set Ndarray elements, data used repetitively if consists of fewer elements than Ndarray.
"""
if isinstance(data, (list,tuple)):
if pyjs_mode.optimized:
if isinstance(data[0], (list,tuple,PyTypedArray)):
data = [value for dat in data for value in dat]
else:
if not isinstance(data[0], (list,tuple,PyTypedArray)):
data = [dat.valueOf() for dat in data]
else:
data = [value.valueOf() for dat in data for value in dat]
dataLn = len(data)
data = data.getArray()
elif isinstance(data, (Ndarray,PyTypedArray)):
data = data.getArray()
dataLn = data.length
else:
if pyjs_mode.optimized:
for index in xrange(self.__data.__data.length):
JS("""@{{self}}['__data']['__data'][@{{index}}]=@{{data}};""")
else:
data = data.valueOf()
for index in xrange(self.__data.__data.length):
JS("""@{{self}}['__data']['__data'][@{{index}}]=@{{data}};""")
return None
if dataLn == self.__data.__data.length:
for index in xrange(self.__data.__data.length):
JS("""@{{self}}['__data']['__data'][@{{index}}]=@{{data}}[@{{index}}];""")
else:
for index in xrange(self.__data.__data.length):
JS("""@{{self}}['__data']['__data'][@{{index}}]=@{{data}}[@{{index}}%@{{dataLn}}];""")
return None
def fill(self, value):
"""
Set array elements to value argument.
"""
if pyjs_mode.optimized:
for index in xrange(self.__data.__data.length):
JS("""@{{self}}['__data']['__data'][@{{index}}]=@{{value}};""")
else:
value = value.valueOf()
for index in xrange(self.__data.__data.length):
JS("""@{{self}}['__data']['__data'][@{{index}}]=@{{value}};""")
return None
def copy(self):
"""
Return copy of array.
"""
array = Ndarray.__typedarray[self._dtype](self.__data)
ndarray = Ndarray(array, self._dtype)
ndarray._shape = self._shape
ndarray._indices = self._indices
return ndarray
def astype(self, dtype):
"""
Return copy of array.
Argument dtype is TypedArray data type.
"""
array = Ndarray.__typedarray[dtype](self.__data)
ndarray = Ndarray(array, dtype)
ndarray._shape = self._shape
ndarray._indices = self._indices
return ndarray
def view(self):
"""
Return view of array.
"""
subarray = self.__data.subarray(0)
array = Ndarray(subarray)
array._shape = self._shape
array._indices = self._indices
return array
def swapaxes(self, axis1, axis2):
"""
Swap axes of array.
Arguments are the axis to swap.
Return view of array with axes changed.
"""
array = Ndarray(self.__data, self._dtype)
shape = list(self._shape)
shape[axis1], shape[axis2] = shape[axis2], shape[axis1]
array._shape = tuple(shape)
indices = list(self._indices)
indices[axis1], indices[axis2] = indices[axis2], indices[axis1]
array._indices = tuple(indices)
return array
def getArray(self):
"""
Return JavaScript TypedArray.
"""
return self.__data.getArray()
class NP:
def zeros(self, size, dtype):
if dtype == 'i':
dtype = 3
return Ndarray(size, dtype)
def swapaxes(self, array, axis1, axis2):
return array.swapaxes(axis1, axis2)
def append(self, array, values):
if isinstance(values[0], (list,tuple,PyTypedArray)):
values = [value for dat in values for value in dat]
newarray = Ndarray(len(array)+len(values), array._dtype)
newarray.__data.set(array.__data)
newarray.__data.set(values, len(array))
return newarray
np = NP()
class PyImageData:
def __init__(self, imagedata):
"""
Provides an interface to canvas ImageData.
The argument required is the ImageData instance to be accessed.
"""
self.__imagedata = imagedata
if not isUndefined(Uint8ClampedArray):
self.data = PyUint8ClampedArray()
else:
self.data = PyCanvasPixelArray()
self.data.__data = imagedata.data
self.width = imagedata.width
self.height = imagedata.height
def getImageData(self):
"""
Return JavaScript ImageData instance.
"""
return self.__imagedata
class PyImageMatrix(Ndarray):
def __init__(self, imagedata):
"""
Provides an interface to canvas ImageData as an Ndarray array.
The argument required is the ImageData instance to be accessed.
"""
self.__imagedata = PyImageData(imagedata)
if isinstance(self.__imagedata.data, PyUint8ClampedArray):
Ndarray.__init__(self, self.__imagedata.data, 0)
else: #ie10 supports typedarray, not uint8clampedarray
Ndarray.__init__(self, self.__imagedata.data, 1)
self.setshape(self.__imagedata.height,self.__imagedata.width,4)
def getWidth(self):
"""
Return ImageData width.
"""
return self.__imagedata.width
def getHeight(self):
"""
Return ImageData height.
"""
return self.__imagedata.height
def getPixel(self, index):
"""
Get pixel RGBA.
The index arguement references the 2D array element.
"""
i = (index[0]*self._indices[0]) + (index[1]*4)
return (self.__imagedata.data[i], self.__imagedata.data[i+1], self.__imagedata.data[i+2], self.__imagedata.data[i+3])
def setPixel(self, index, value):
"""
Set pixel RGBA.
The arguements index references the 2D array element and value is pixel RGBA.
"""
i = (index[0]*self._indices[0]) + (index[1]*4)
self.__imagedata.data[i], self.__imagedata.data[i+1], self.__imagedata.data[i+2], self.__imagedata.data[i+3] = value[0], value[1], value[2], value[3]
return None
def getPixelRGB(self, index):
"""
Get pixel RGB.
The index arguement references the 2D array element.
"""
i = (index[0]*self._indices[0]) + (index[1]*4)
return (self.__imagedata.data[i], self.__imagedata.data[i+1], self.__imagedata.data[i+2])
def setPixelRGB(self, index, value):
"""
Set pixel RGB.
The arguements index references the 2D array element and value is pixel RGB.
"""
i = (index[0]*self._indices[0]) + (index[1]*4)
self.__imagedata.data[i], self.__imagedata.data[i+1], self.__imagedata.data[i+2] = value[0], value[1], value[2]
return None
def getPixelAlpha(self, index):
"""
Get pixel alpha.
The index arguement references the 2D array element.
"""
i = (index[0]*self._indices[0]) + (index[1]*4)
return self.__imagedata.data[i+3]
def setPixelAlpha(self, index, value):
"""
Set pixel alpha.
The arguements index references the 2D array element and value is pixel alpha.
"""
i = (index[0]*self._indices[0]) + (index[1]*4)
self.__imagedata.data[i+3] = value
return None
def getPixelInteger(self, index):
"""
Get pixel integer color.
The index arguement references the 2D array element.
"""
i = (index[0]*self._indices[0]) + (index[1]*4)
return self.__imagedata.data[i]<<16 | self.__imagedata.data[i+1]<<8 | self.__imagedata.data[i+2] | self.imagedata.data[i+3]<<24
def setPixelInteger(self, index, value):
"""
Set pixel integer color.
The arguements index references the 2D array element and value is pixel color.
"""
i = (index[0]*self._indices[0]) + (index[1]*4)
self.__imagedata.data[i], self.__imagedata.data[i+1], self.__imagedata.data[i+2], self.__imagedata.data[i+3] = value>>16 & 0xff, value>>8 & 0xff, value & 0xff, value>>24 & 0xff
return None
def getImageData(self):
"""
Return JavaScript ImageData instance.
"""
return self.__imagedata.getImageData()
class Pyjs_Mode:
def __init__(self):
self.strict, self.optimized = self._setmode()
def __getattr__(self, attr):
if attr == '__strict_mode':
return True
def _setmode(self):
if self.__strict_mode == True:
return True, False
else:
return False, True
pyjs_mode = Pyjs_Mode()
```
#### File: jggatc/pyjsbitset/pyjsbitset.py
```python
import math
from pyjsarray import PyUint8Array, PyUint16Array, PyUint32Array
class BitSet:
"""
BitSet provides a bitset object to use in a Python-to-JavaScript application. It uses the PyUint8Array implementation of the JavaScript Uint8Array 8-bit typedarray. BitSet16 and BitSet32 stores data in PyUint16Array (16-bit) and PyUint32Array (32-bit) that implement the Uint16Array and Uint32Array typedarray. The BitSet will dynamically expand to hold the bits required, an optional width argument define number of bits the BitSet instance will initially hold.
"""
__bit = 8
__bitmask = None
__typedarray = PyUint8Array
def __init__(self, width=None):
if not self.__class__.__bitmask:
self.__class__.__bitmask = dict([(self.__class__.__bit-i-1,1<<i) for i in range(self.__class__.__bit-1,-1,-1)])
self.__class__.__bitmask[self.__class__.__bit-1] = int(self.__class__.__bitmask[self.__class__.__bit-1]) #pyjs [1<<0] = 1L
if width:
self.__width = abs(width)
else:
self.__width = self.__bit
self.__data = self.__typedarray( math.ceil(self.__width/(self.__bit*1.0)) )
def __str__(self):
"""
Return string representation of BitSet object.
"""
return "%s" % self.__class__
def __repr__(self):
"""
Return string of the indexes of the set bits.
"""
setBit = []
for index in xrange(self.__width):
if self.get(index):
setBit.append(str(index))
return "{" + ", ".join(setBit) + "}"
def __getitem__(self, index):
"""
Get bit by index.
"""
return self.get(index)
def __setitem__(self, index, value):
"""
Set bit by index.
"""
self.set(index, value)
def __len__(self):
"""
Get bit length.
"""
for index in xrange(self.__width-1, -1, -1):
if self.get(index):
break
return index+1
def __iter__(self):
"""
Iterate over bits.
"""
index = 0
while index < self.__width:
yield self.get(index)
index += 1
def get(self, index, toIndex=None):
"""
Get bit by index.
Arguments include index of bit, and optional toIndex that return a slice as a BitSet.
"""
if index > self.__width-1:
if not toIndex:
return False
else:
size = toIndex-index
if size > 0:
return self.__class__(size)
else: #use exception
return None
if toIndex is None:
return bool( self.__data[ int(index/self.__bit) ] & self.__bitmask[ index%self.__bit ] )
else:
size = toIndex-index
if size > 0:
bitset = self.__class__(size)
ix = 0
if toIndex > self.__width:
toIndex = self.__width
for i in xrange(index, toIndex):
bitset.set(ix, bool( self.__data[ int(i/self.__bit) ] & self.__bitmask[ i%self.__bit ] ))
ix += 1
return bitset
else: #use exception
return None
def set(self, index, value=1):
"""
Set bit by index.
Optional argument value is the bit state of 1(True) or 0(False). Default:1
"""
if index > self.__width-1:
if value:
self.resize(index+1)
else:
return
if value:
self.__data[ int(index/self.__bit) ] = self.__data[ int(index/self.__bit) ] | self.__bitmask[ index%self.__bit ]
# self.__data[ int(index/self.__bit) ] |= self.__bitmask[ index%self.__bit ] #pyjs -O: |= not processed
else:
self.__data[ int(index/self.__bit) ] = self.__data[ int(index/self.__bit) ] & ~(self.__bitmask[ index%self.__bit ])
# self.__data[ int(index/self.__bit) ] &= ~(self.__bitmask[ index%self.__bit ]) #pyjs -O: &= not processed
return None
def fill(self, index=None, toIndex=None):
"""
Set the bit. If no argument provided, all bits are set.
Optional argument index is bit index to set, and toIndex to set a range of bits.
"""
if index is None and toIndex is None:
for i in xrange(0, self.__width):
self.set(i, 1)
else:
if toIndex is None:
self.set(index, 1)
else:
for i in xrange(index, toIndex):
self.set(i, 1)
def clear(self, index=None, toIndex=None):
"""
Clear the bit. If no argument provided, all bits are cleared.
Optional argument index is bit index to clear, and toIndex to clear a range of bits.
"""
if index is None:
for i in xrange(len(self.__data)):
self.__data[i] = 0
else:
if toIndex is None:
self.set(index, 0)
else:
if index == 0 and toIndex == self.__width:
for dat in xrange(len(self.__data)):
self.__data[dat] = 0
else:
for i in xrange(index, toIndex):
self.set(i, 0)
def flip(self, index, toIndex=None):
"""
Flip the state of the bit.
Argument index is the bit index to flip, and toIndex to flip a range of bits.
"""
if toIndex is None:
self.set(index, not self.get(index))
else:
if toIndex > self.__width:
self.resize(toIndex)
toIndex = self.__width
if index == 0 and toIndex == self.__width:
for dat in xrange(len(self.__data)):
self.__data[dat] = ~self.__data[dat]
else:
for i in xrange(index, toIndex):
self.set(i, not self.get(i))
def cardinality(self):
"""
Return the count of bit set.
"""
count = 0
for bit in xrange(self.__width):
if self.get(bit):
count += 1
return count
def intersects(self, bitset):
"""
Check if set bits in this BitSet are also set in the bitset argument.
Return True if bitsets intersect, otherwise return False.
"""
for dat in xrange(len(bitset.__data)):
if bitset.__data[dat] & self.__data[dat]:
return True
return False
def andSet(self, bitset):
"""
BitSet and BitSet.
"""
data = min(len(self.__data), len(bitset.__data))
for dat in xrange(data):
self.__data[dat] = self.__data[dat] & bitset.__data[dat]
# self.__data[dat] &= bitset.__data[dat] #pyjs -O: &= not processed
# pyjs -S: &= calls __and__ instead of __iand__, -O: no call to operator methods
def orSet(self, bitset):
"""
BitSet or BitSet.
"""
data = min(len(self.__data), len(bitset.__data))
for dat in xrange(data):
self.__data[dat] = self.__data[dat] | bitset.__data[dat]
# self.__data[dat] |= bitset.__data[dat] #pyjs -O: |= not processed
def xorSet(self, bitset):
"""
BitSet xor BitSet.
"""
data = min(len(self.__data), len(bitset.__data))
for dat in xrange(data):
self.__data[dat] = self.__data[dat] ^ bitset.__data[dat]
# self.__data[dat] ^= bitset.__data[dat] #pyjs -O: |= not processed
def resize(self, width):
"""
Resize the BitSet to width argument.
"""
if width > self.__width:
self.__width = width
if self.__width > len(self.__data) * self.__bit:
array = self.__typedarray( math.ceil(self.__width/(self.__bit*1.0)) )
array.set(self.__data)
self.__data = array
elif width < self.__width:
if width < len(self):
width = len(self)
self.__width = width
if self.__width <= len(self.__data) * self.__bit - self.__bit:
array = self.__typedarray( math.ceil(self.__width/(self.__bit*1.0)) )
array.set(self.__data.subarray(0,math.ceil(self.__width/(self.__bit*1.0))))
self.__data = array
def size(self):
"""
Return bits used by BitSet storage array.
"""
return len(self.__data) * self.__bit
def isEmpty(self):
"""
Check whether any bit is set.
Return True if none set, otherwise return False.
"""
for data in self.__data:
if data:
return False
return True
def clone(self):
"""
Return a copy of the BitSet.
"""
new_bitset = self.__class__(1)
data = self.__typedarray(self.__data)
new_bitset.__data = data
new_bitset.__width = self.__width
return new_bitset
class BitSet16(BitSet):
"""
BitSet using PyUint16Array.
"""
__bit = 16
__bitmask = None
__typedarray = PyUint16Array
def __init__(self, width=None):
BitSet.__init__(self, width)
class BitSet32(BitSet):
"""
BitSet using PyUint32Array.
"""
__bit = 32
__bitmask = None
__typedarray = PyUint32Array
def __init__(self, width=None):
BitSet.__init__(self, width)
```
|
{
"source": "jggatc/pyjsdl",
"score": 3
}
|
#### File: jggatc/pyjsdl/cursors.py
```python
from pyjsdl.surface import Surface
from pyjsdl.color import Color
from pyjsdl import locals as Const
#cursors not implemented
arrow = diamond = broken_x = tri_left = tri_right = ()
def compile(strings, black='X', white='.', xor='o'):
"""
Compile binary data from cursor string.
Arguments cursor string, and optional symbols representing colors.
Data represents black and white pixels, xor color defaulting to black.
Data should be a string list of width divisible by 8.
Return cursor data and mask, can be used with mouse.set_cursor.
"""
data = []
mask = []
dbit = {black:1, white:0, xor:1}
mbit = {black:1, white:1, xor:0}
string = ''.join(strings)
for i in range(0, len(string), 8):
s = string[i:i+8]
db = mb = 0
if s != ' ':
for j in range(8):
c = s[j]
if c == ' ':
continue
if dbit[c]:
db |= 0x01<<7-j
if mbit[c]:
mb |= 0x01<<7-j
data.append(int(db))
mask.append(int(mb))
return tuple(data), tuple(mask)
def create_cursor(size, data, mask):
"""
Create cursor image from binary data.
Arguments cursor size and its binary data and mask.
Return surface, can be used with mouse.set_cursor.
"""
surface = Surface(size, Const.SRCALPHA)
black = Color(0,0,0,255)
white = Color(255,255,255,255)
x = y = 0
for i in range(len(data)):
if data[i] or mask[i]:
for j in range(8):
if data[i] & 0x01<<7-j:
surface.setFillStyle(black)
surface.fillRect(x+j, y, 1, 1)
elif mask[i] & 0x01<<7-j:
surface.setFillStyle(white)
surface.fillRect(x+j, y, 1, 1)
x += 8
if x >= size[0]:
x = 0
y += 1
return surface
def get_cursor_types():
#https://developer.mozilla.org/en-US/docs/Web/CSS/cursor
"""
Return list of cursor types from CSS Cursor API.
"""
types = ['default', 'auto', 'none', 'context-menu', 'help', 'pointer', 'progress', 'wait', 'cell', 'crosshair', 'text', 'vertical-text', 'alias', 'copy', 'move', 'no-drop', 'not-allowed', 'e-resize', 'n-resize', 'ne-resize', 'nw-resize', 's-resize', 'se-resize', 'sw-resize', 'w-resize', 'ew-resize', 'ns-resize', 'nesw-resize', 'nwse-resize', 'col-resize', 'row-resize', 'all-scroll', 'zoom-in', 'zoom-out', 'grab', 'grabbing']
return types
```
#### File: jggatc/pyjsdl/key.py
```python
from pyjsdl import env
from pyjsdl import locals as Const
__docformat__ = 'restructuredtext'
class Key(object):
"""
**pyjsdl.key**
* pyjsdl.key.name
* pyjsdl.key.get_mods
"""
def __init__(self):
"""
Provides methods to access the key function.
Module initialization creates pyjsdl.key instance.
"""
self.keyPress = env.event.keyPress
self.keyMod = env.event.keyMod
self.alt = Const.K_ALT
self.ctrl = Const.K_CTRL
self.shift = Const.K_SHIFT
self._keys = {}
self._nonimplemented_methods()
def name(self, keycode):
"""
Return name of key of a keycode.
"""
if not self._keys:
for keyname in dir(Const):
if keyname.startswith('K_'):
self._keys[getattr(Const, keyname)] = keyname.split('_')[-1].lower()
self._keys[0] = 'unknown key'
if keycode not in self._keys:
keycode = 0
return self._keys[keycode]
def get_mods(self):
"""
Return int modifier keys alt|ctrl|shift.
"""
return self.keyMod[self.alt][self.keyPress[self.alt]] | self.keyMod[self.ctrl][self.keyPress[self.ctrl]] | self.keyMod[self.shift][self.keyPress[self.shift]]
def _nonimplemented_methods(self):
self.get_focused = lambda *arg: None
self.get_pressed = lambda *arg: None
self.set_mods = lambda *arg: None
self.set_repeat = lambda *arg: None
self.get_repeat = lambda *arg: True
```
|
{
"source": "jggatc/pyjsdl-ts",
"score": 3
}
|
#### File: pyjsdl-ts/pyjsdl/mask.py
```python
from pyjsdl.pyjsarray import BitSet
from pyjsdl.color import Color
# __pragma__ ('skip')
import sys
if sys.version_info < (3,):
from pyjsdl.util import _range as range
# __pragma__ ('noskip')
__docformat__ = 'restructuredtext'
def from_surface(surface, threshold=127):
"""
**pyjsdl.mask.from_surface**
Return Mask derived from surface using alpha transparency.
Optional argument to set alpha threshold.
"""
mask = Mask((surface.width, surface.height))
if not mask.bit:
return None
pixels = surface.getImageData(0, 0, surface.width, surface.height)
width, height = surface.width*4, surface.height
for y in range(0, height):
xpix = 0
i = (y*width)+3
for x in range(0, width, 4):
if surface._getPixel(pixels, i+x) > threshold:
mask.set_at((xpix,y))
xpix += 1
return mask
def from_threshold(surface, color, threshold=(0,0,0,255)):
"""
**pyjsdl.mask.from_threshold**
Return Mask from surface using a given color.
Optional threshold argument to set color range and alpha threshold.
"""
mask = Mask((surface.width, surface.height))
if not mask.bit:
return None
pixels = surface.getImageData(0, 0, surface.width, surface.height)
if threshold == (0,0,0,255):
color = Color(color)
width, height = surface.width*4, surface.height
for y in range(0, height):
xpix = 0
i = y*width
for x in range(0, width, 4):
ix = i+x
if surface._getPixel(pixels, ix) == color.r and surface._getPixel(pixels, ix+1) == color.g and surface._getPixel(pixels, ix+2) == color.b and surface._getPixel(pixels, ix+3) >= threshold[3]:
mask.set_at((xpix,y))
xpix += 1
else:
color = Color(color)
col = {}
col['r1'] = color.r - threshold[0] - 1
col['r2'] = color.r + threshold[0] + 1
col['g1'] = color.g - threshold[1] - 1
col['g2'] = color.g + threshold[1] + 1
col['b1'] = color.b - threshold[2] - 1
col['b2'] = color.b + threshold[2] + 1
col['a'] = threshold[3] - 1
width, height = surface.width*4, surface.height
for y in range(0, height):
xpix = 0
i = y*width
for x in range(0, width, 4):
ix = i+x
if (col['r1'] < surface._getPixel(pixels, ix) < col['r2']) and (col['g1'] < surface._getPixel(pixels, ix+1) < col['g2']) and (col['b1'] < surface._getPixel(pixels, ix+2) < col['b2']) and (surface._getPixel(pixels, ix+3) > col['a']):
mask.set_at((xpix,y))
xpix += 1
return mask
class Mask(object):
"""
**pyjsdl.mask.Mask**
* Mask.get_size
* Mask.get_at
* Mask.set_at
* Mask.fill
* Mask.clear
* Mask.invert
* Mask.count
* Mask.overlap
* Mask.toString
"""
def __init__(self, size):
"""
Return a Mask object.
The size argument is (width, height) of the mask.
The mask is represented by a list of Bitset.
"""
self.width = int(size[0])
self.height = int(size[1])
self.bit = []
for bitset in range(self.height):
self.bit.append(BitSet(self.width))
def __str__(self):
return self.toString()
def __repr__(self):
return '{}({})'.format(self.__class__, repr(self.__dict__))
def get_size(self):
"""
Return width, height of mask.
"""
return (self.width, self.height)
def get_at(self, pos):
"""
Return bit setting for given pos.
"""
return self.bit[pos[1]].get(pos[0])
def set_at(self, pos, value=1):
"""
Set bit for given pos.
Optional value to set bit, eith 1 or 0, defaults to 1.
"""
self.bit[pos[1]].set(pos[0], value)
return None
def fill(self):
"""
Fill mask.
"""
for bitset in self.bit:
bitset.fill()
return None
def clear(self):
"""
Clear mask.
"""
for bitset in self.bit:
bitset.clear()
return None
def invert(self):
"""
Invert bit value in mask.
"""
for bitset in self.bit:
bitset.flip(0,self.width)
return None
def count(self):
"""
Return count of true bits in mask.
"""
true_bits = 0
for bitset in self.bit:
true_bits += bitset.cardinality()
return true_bits
def overlap(self, mask, offset):
"""
Return True if mask at offset position overlap with this mask.
"""
if offset[0] > 0:
x1 = offset[0]
x2 = 0
else:
x1 = 0
x2 = -offset[0]
if offset[1] > 0:
y1 = offset[1]
y2 = 0
else:
y1 = 0
y2 = -offset[1]
w = min(self.width-x1, mask.width-x2)
h = min(self.height-y1, mask.height-y2)
if w > 0 and h > 0:
for y in range(h):
if self.bit[y1+y].get(x1, x1+w).intersects(mask.bit[y2+y].get(x2, x2+w)):
return True
return None
def toString(self, bit=('1','0')):
"""
Return string representation of mask.
Optional bit argument specify bit character.
"""
cbit = {True:bit[0], False:bit[1]}
cbitset = []
for bitset in self.bit:
cbitset.append('\n')
cbitset.extend([cbit[bitset.get(i)] for i in range(self.width)])
bitstr = ''.join(cbitset)
return bitstr
```
#### File: pyjsdl-ts/pyjsdl/pyjsobj.py
```python
class Element:
def __init__(self, element=None):
if element is not None:
self._element = element
else:
self._element = None
@property
def style(self, attr):
return self._element.style
@style.setter
def style(self, attr, value):
self._element.style[attr] = value
def style(self, attr):
pass
def getElement(self):
return self._element
def setElement(self, element):
self._element = element
def setID(self, id):
self._element.id = id
def getID(self):
return self._element.id
def setSize(self, width, height):
self.setWidth(width)
self.setHeight(height)
def setWidth(self, width):
if isinstance(width, str):
self._element.style.width = width
else:
self._element.style.width = str(int(width)) + 'px'
def setHeight(self, height):
if isinstance(height, str):
self._element.style['min-height'] = height
else:
self._element.style['min-height'] = str(int(height)) + 'px'
def getAttributes(self):
return self._element.attributes
def getClientHeight(self):
return self._element.clientHeight
def getClientLeft(self):
return self._element.clientLeft
def getClientTop(self):
return self._element.clientTop
def getClientWidth(self):
return self._element.clientWidth
def getScrollHeight(self):
return self._element.scrollHeight
def getScrollLeft(self):
return self._element.scrollLeft
def getScrollTop(self):
return self._element.scrollTop
def getScrollWidth(self):
return self._element.scrollWidth
def addEventListener(self, type, listener, useCapture):
self._element.addEventListener(type, listener, useCapture)
def removeEventListener(self, type, listener, useCapture):
self._element.removeEventListener(type, listener, useCapture)
def getMouseWheelEventType(self):
if self._element is not None:
element = self._element
else:
element = document.createElement('div')
if hasattr(element, 'onwheel'):
event_type = 'wheel'
elif hasattr(element, 'onmousewheel'):
event_type = 'mousewheel'
else:
event_type = 'DOMMouseScroll'
return event_type
def getAttribute(self):
return self._element.getAttribute()
def setAttribute(self, name, value):
self._element.setAttribute(name, value)
def getBoundingClientRect(self):
return self._element.getBoundingClientRect()
def appendChild(self, el):
self._element.appendChild(el)
def removeChild(self, el):
self._element.removeChild(el)
def getStyle(self):
return self._element.style
def getTitle(self):
return self._element.title
def setTitle(self, text):
self._element.title = text
def focus(self):
self._element.focus()
def blur(self):
self._element.blur()
def click(self):
self._element.click()
class FocusElement(Element):
_event_type = None
def __init__(self):
Element.__init__(self)
self._sink_events = None
def addMouseListener(self, obj):
element = obj.getElement()
element.addEventListener('mousemove', self.onMouseMove)
element.addEventListener('mousedown', self.onMouseDown)
element.addEventListener('mouseup', self.onMouseUp)
element.addEventListener('mouseenter', self.onMouseEnter)
element.addEventListener('mouseleave', self.onMouseLeave)
if hasattr(element, 'onwheel'):
element.addEventListener('wheel', self.onMouseWheel)
elif hasattr(element, 'onmousewheel'):
element.addEventListener('mousewheel', self.onMouseWheel)
else:
element.addEventListener('DOMMouseScroll', self.onMouseWheel)
def addKeyboardListener(self, obj):
element = obj.getElement()
element.setAttribute('tabindex','0')
element.addEventListener('keydown', self.onKeyDown)
element.addEventListener('keyup', self.onKeyUp)
element.addEventListener('keypress', self.onKeyPress)
def _addKeyboardListener(self, obj):
element = obj.getElement()
element.setAttribute('tabindex','0')
element.addEventListener('keydown', self._onKeyDown)
element.addEventListener('keyup', self._onKeyUp)
element.addEventListener('keypress', self._onKeyPress)
def addKeyEventListener(self, obj):
element = obj.getElement()
element.setAttribute('tabindex','0')
listener = lambda event: self.onKeyEvent(event)
_listener[self.__name__] = listener
element.addEventListener('keydown', listener)
def removeKeyEventListener(self, obj):
element = obj.getElement()
listener = _listener[self.__name__]
element.removeEventListener('keydown', listener)
del _listener[self.__name__]
def addFocusListener(self, obj):
element = obj.getElement()
element.setAttribute('tabindex','0')
element.addEventListener('focus', self.onFocus)
element.addEventListener('blur', self.onBlur)
def sinkEvents(self, events):
self._sink_events = events
def onMouseMove(self, event):
pass
def onMouseDown(self, event):
pass
def onMouseUp(self, event):
pass
def onMouseEnter(self, event):
pass
def onMouseLeave(self, event):
pass
def onMouseWheel(self, event):
pass
def onKeyDown(self, event):
pass
def onKeyUp(self, event):
pass
def onKeyPress(self, event):
pass
def onTouchInitiate(self, event):
pass
def onTouchStart(self, event):
pass
def onTouchEnd(self, event):
pass
def onTouchMove(self, event):
pass
def onTouchCancel(self, event):
pass
def onFocus(self, event):
pass
def onBlur(self, event):
pass
def focus(self):
self._element.focus()
def blur(self):
self._element.blur()
_listener = {}
class HTML5Canvas(FocusElement):
_identity = 0
def __init__(self, width, height):
FocusElement.__init__(self)
self._id = HTML5Canvas._identity
HTML5Canvas._identity += 1
self._canvas = document.createElement('canvas')
self._element = self._canvas
self._element.id = str(self._id)
self._element.width = width
self._element.height = height
self.width = width
self.height = height
self._element.style.margin = '0px'
self._element.style.padding = '0px'
self._element.style['vertical-align'] = 'bottom'
self._element.style.id = str(self._id)
self.canvas = self._element
self._ctx = self._element.getContext('2d')
self.impl = CanvasImpl(self._ctx)
def resize(self, width, height):
self.width = width
self.height = height
def drawImage(self, image, *args):
ln = len(args)
if ln == 2:
self._ctx.drawImage(image,args[0],args[1])
elif ln == 4:
self._ctx.drawImage(image,args[0],args[1],args[2],args[3])
elif ln == 8:
self._ctx.drawImage(image,args[0],args[1],args[2],args[3],
args[4],args[5],args[6],args[7])
def fill(self):
self._ctx.fill()
def setFillStyle(self, style):
self._ctx.fillStyle = str(style)
def fillRect(self, x, y, width, height):
self._ctx.fillRect(x, y, width, height)
def clear(self):
self._ctx.clear()
def setLineWidth(self, width):
self._ctx.lineWidth = width
def setStrokeStyle(self, style):
self._ctx.strokeStyle = str(style)
def strokeRect(self, x, y, width, height):
self._ctx.strokeRect(x, y, width, height)
def saveContext(self):
self._ctx.save()
def restoreContext(self):
self._ctx.restore()
def translate(self, x, y):
self._ctx.translate(x,y)
def scale(self, x, y):
self._ctx.scale(x,y)
def rotate(self, angle):
self._ctx.rotate(angle)
def transform(self, m11, m12, m21, m22, dx, dy):
self._ctx.transform(m11, m12, m21, m22, dx, dy)
def arc(self, x, y, r, sAngle, eAngle, counterclockwise):
self._ctx.arc(x, y, r, sAngle, eAngle, counterclockwise)
def beginPath(self):
self._ctx.beginPath()
def closePath(self):
self._ctx.closePath()
def moveTo(self, x, y):
self._ctx.moveTo(x, y)
def lineTo(self, x, y):
self._ctx.lineTo(x, y)
def stroke(self):
self._ctx.stroke()
def setFont(self, font):
self._ctx.font = font
def setTextAlign(self, align):
self._ctx.textAlign = align
def setTextBaseline(self, baseline):
self._ctx.textBaseline = baseline
def fillText(self, text, x, y):
self._ctx.fillText(text, x, y)
def strokeText(self, text, x, y):
self._ctx.strokeText(text, x, y)
def measureText(self, text):
return self._ctx.measureText(text).width
def getImageData(self, x, y, width, height):
return self._ctx.getImageData(x, y, width, height)
def putImageData(self, *args):
if len(args) == 3:
self._ctx.putImageData(args[0], args[1], args[2])
else:
self._ctx.putImageData(args[0], args[1], args[2], args[3], args[4], args[5], args[6])
def getContext(self, ctx_type='2d', ctx_attr=None):
if ctx_attr is None:
return self._element.getContext(ctx_type)
else:
return self._element.getContext(ctx_type, ctx_attr)
def toDataURL(self, img_type='image/png', enc_options=0.92):
return self._element.toDataURL(img_type, enc_options)
def toBlob(self, callback, img_type='image/png', quality=0.92):
return self._element.toBlob(callback, img_type, quality)
def getElement(self):
return self._element
class CanvasImpl:
def __init__(self, ctx):
self.canvasContext = ctx
class Panel(Element):
def __init__(self):
self._element = document.createElement('div')
def setID(self, id):
self._element.id = id
def getID(self):
return self._element.id
def appendChild(self, element):
self._element.appendChild(element._element)
def removeChild(self, element):
self._element.removeChild(element._element)
def append(self, element):
self._element.appendChild(element._element)
def add(self, element):
self.append(element)
def remove(self, element):
self._element.removeChild(element._element)
class RootPanel(Panel):
_id = None
def __init__(self):
if self._id is None:
self._id = '__panel__'
self._element = document.getElementById(self._id)
@classmethod
def _set_root_panel(cls, id):
if cls._id is None:
cls._id = id
def setId(self, id):
self._id = id
def getId(self):
return self._id
def add(self, element):
if isinstance(element, Element):
self._element.appendChild(element.getElement())
else:
self._element.appendChild(element)
class FocusPanel(Panel):
pass
class VerticalPanel(Panel):
def __init__(self):
Panel.__init__(self)
self._element.style.display = 'flex'
self._element.style['flex-direction'] = 'column'
def append(self, element):
el = element._element
el.display = 'inline-block'
el.style.flex = '1'
el.style.width = '100%'
self._element.appendChild(el)
class TextBox(Element):
_type = 'input'
def __init__(self):
Element.__init__(self)
self._element = document.createElement(self._type)
self._element.style.display = 'inline-block'
self._element.style.flex = '1'
self._element.style.border = '1px solid rgb(118, 118, 118)'
self._element.style.margin = '0px'
self._element.style.padding = '0px'
@property
def value(self):
return self._element.value
@value.setter
def value(self, text):
self._element.value = text
def setVisible(self, visible):
if visible:
self._element.style.display = 'inline-block'
else:
self._element.style.display = 'none'
def getVisible(self):
if self._element.style.display != 'none':
return True
else:
return False
def getText(self):
return self._element.value
def setText(self, text):
self._element.value = text
class TextArea(TextBox):
_type = 'textarea'
def __init__(self):
TextBox.__init__(self)
self._element.style.resize = 'vertical'
class ImageLoader:
def __init__(self, imagelist, callback):
self.imagelist = imagelist
self.callback = callback
self.images = []
self.image_toload = len(self.imagelist)
for image in self.imagelist:
self.load(image)
def load(self, imageurl):
image = __new__(Image())
self.images.append(image)
image.addEventListener('load', self.loaded, False)
image.src = imageurl
def loaded(self):
self.image_toload -= 1
if not self.image_toload:
self.callback.onImagesLoaded(self.images)
def loadImages(imagelist, callback):
ImageLoader(imagelist, callback)
class Color:
def __init__(self):
pass
class Audio:
def __init__(self, sound_file):
self.element = document.createElement("AUDIO")
self.element.src = sound_file
def play(self):
self.element.play()
def pause(self):
self.element.pause()
def getCurrentTime(self):
return self.element.currentTime
def setCurrentTime(self, time):
self.element.currentTime = time
def isPaused(self):
return self.element.paused
def getSrc(self):
return self.element.src
def getVolume(self):
return self.element.volume
def setVolume(self, volume):
self.element.volume = volume
def getDuration(self):
return self.element.duration
class DOM:
@staticmethod
def eventGetCurrentEvent():
return Event()
@staticmethod
def setStyleAttribute(element, attr, val):
element.style[attr] = val
class Event:
pass
def doc():
return document
def get_main_frame():
return document
def wnd():
return window
def requestAnimationFrameInit():
requestAnimationFramePolyfill()
return wnd()
def performanceNowInit():
performanceNowPolyfill()
return wnd()
def requestAnimationFramePolyfill():
__pragma__('js', {},
"""
// http://paulirish.com/2011/requestanimationframe-for-smart-animating/
// http://my.opera.com/emoller/blog/2011/12/20/requestanimationframe-for-smart-er-animating
// requestAnimationFrame polyfill by <NAME>. fixes from Paul Irish and <NAME>
// MIT license
(function() {
var lastTime = 0;
var vendors = ['ms', 'moz', 'webkit', 'o'];
for(var x = 0; x < vendors.length && !window.requestAnimationFrame; ++x) {
window.requestAnimationFrame = window[vendors[x]+'RequestAnimationFrame'];
window.cancelAnimationFrame = window[vendors[x]+'CancelAnimationFrame']
|| window[vendors[x]+'CancelRequestAnimationFrame'];
}
if (!window.requestAnimationFrame)
window.requestAnimationFrame = function(callback, element) {
var currTime = new Date().getTime();
var timeToCall = Math.max(0, 16 - (currTime - lastTime));
var id = window.setTimeout(function() { callback(currTime + timeToCall); },
timeToCall);
lastTime = currTime + timeToCall;
return id;
};
if (!window.cancelAnimationFrame)
window.cancelAnimationFrame = function(id) {
clearTimeout(id);
};
}());
""")
def performanceNowPolyfill():
__pragma__('js', {},
"""
// @license http://opensource.org/licenses/MIT
// copyright <NAME> 2015
// Date.now() is supported everywhere except IE8. For IE8 we use the Date.now polyfill
// github.com/Financial-Times/polyfill-service/blob/master/polyfills/Date.now/polyfill.js
// as Safari 6 doesn't have support for NavigationTiming, we use a Date.now() timestamp for relative values
// if you want values similar to what you'd get with real perf.now, place this towards the head of the page
// but in reality, you're just getting the delta between now() calls, so it's not terribly important where it's placed
(function(){
if ("performance" in window == false) {
window.performance = {};
}
Date.now = (Date.now || function () { // thanks IE8
return new Date().getTime();
});
if ("now" in window.performance == false){
var nowOffset = Date.now();
if (performance.timing && performance.timing.navigationStart){
nowOffset = performance.timing.navigationStart
}
window.performance.now = function now(){
return Date.now() - nowOffset;
}
}
})();
""")
fabs = Math.abs
```
#### File: pyjsdl-ts/pyjsdl/pylib.py
```python
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded. An empty last part will result in a path that
ends with a separator."""
sep = _get_sep(a)
path = a
try:
if not p:
path[:0] + sep
for b in p:
if b.startswith(sep):
path = b
elif not path or path.endswith(sep):
path += b
else:
path += sep + b
except (TypeError, AttributeError, BytesWarning):
_check_arg_types('join', a, *p) #from genericpath
raise
return path
#os.path.normpath - py3.5.2
def normpath(path): ###
"""Normalize path, eliminating double slashes, etc."""
if isinstance(path, bytes):
sep = b'/'
empty = b''
dot = b'.'
dotdot = b'..'
else:
sep = '/'
empty = ''
dot = '.'
dotdot = '..'
if path == empty:
return dot
initial_slashes = path.startswith(sep)
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith(sep*2) and not path.startswith(sep*3)):
initial_slashes = 2
comps = path.split(sep)
new_comps = []
for comp in comps:
if comp in (empty, dot):
continue
if (comp != dotdot or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == dotdot)):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = sep.join(comps)
if initial_slashes:
path = sep*initial_slashes + path
return path or dot
#os - py3.5.2
def _get_sep(path):
if isinstance(path, bytes):
return b'/'
else:
return '/'
#genericpath - py3.5.2
def _check_arg_types(funcname, *args):
hasstr = hasbytes = False
for s in args:
if isinstance(s, str):
hasstr = True
elif isinstance(s, bytes):
hasbytes = True
else:
raise TypeError('%s() argument must be str or bytes, not %r' %
(funcname, s.__class__.__name__)) from None
if hasstr and hasbytes:
raise TypeError("Can't mix strings and bytes in path components") from None
class Mod:pass
os = Mod()
path = Mod()
path.join = join
path.normpath = normpath
os.path = path
```
#### File: pyjsdl-ts/test/draw_test.py
```python
env = None
pg = None
surface = None
# __pragma__ ('opov')
def init(environ):
global env, pg, surface
env = environ
pg = env['pg']
surface = env['surface']
tests = [test_draw_rect,
test_draw_circle,
test_draw_ellipse,
test_draw_arc,
test_draw_polygon,
test_draw_line,
test_draw_lines]
return tests
def test_draw_rect():
data = [((10,6),0), ((10,8),1), ((10,10),1)], (5,8,10,5)
surface.fill((0,0,0))
rect = pg.draw.rect(surface, (255,0,0), (5,8,10,5))
for pos in data[0]:
c = surface.get_at(pos[0])
c = {True:1,False:0}[c.r>0]
assert c == pos[1]
assert (rect.x,rect.y,rect.width,rect.height) == data[1]
data = [((10,6),0), ((10,8),1), ((10,10),0)], (5,8,10,5)
surface.fill((0,0,0))
rect = pg.draw.rect(surface, (255,0,0,255), pg.Rect((5,8,10,5)), 1)
for pos in data[0]:
c = surface.get_at(pos[0])
c = {True:1,False:0}[c.r>0]
assert c == pos[1]
assert (rect.x,rect.y,rect.width,rect.height) == data[1]
data = [((3,0),1), ((3,2),1), ((3,4),0)], (0,0,5,3)
surface.fill((0,0,0))
rect = pg.draw.rect(surface, (255,0,0), (-5,-2,10,5))
for pos in data[0]:
c = surface.get_at(pos[0])
c = {True:1,False:0}[c.r>0]
assert c == pos[1]
assert (rect.x,rect.y,rect.width,rect.height) == data[1]
def test_draw_circle():
data = [((10,0),0), ((10,5),1), ((10,10),1)], (5,5,10,10)
surface.fill((0,0,0))
rect = pg.draw.circle(surface, (255,0,0), (10,10), 5)
data = [((10,0),0), ((10,5),1), ((10,10),0)], (5,5,10,10)
surface.fill((0,0,0))
rect = pg.draw.circle(surface, (255,0,0,255), (10,10), 5, 1)
for pos in data[0]:
c = surface.get_at(pos[0])
c = {True:1,False:0}[c.r>0]
assert c == pos[1]
assert (rect.x,rect.y,rect.width,rect.height) == data[1]
def test_draw_ellipse():
data = [((10,6),1), ((10,8),1), ((10,10),0)], (5,5,10,5)
surface.fill((0,0,0))
rect = pg.draw.ellipse(surface, (255,0,0), (5,5,10,5))
for pos in data[0]:
c = surface.get_at(pos[0])
c = {True:1,False:0}[c.r>0]
assert c == pos[1]
assert (rect.x,rect.y,rect.width,rect.height) == data[1]
def test_draw_arc():
data = [((10,0),0), ((10,5),1), ((10,10),0)], (5,5,11,6)
surface.fill((0,0,0))
rect = pg.draw.arc(surface, (255,0,0), (5,5,10,10), 0, 3.14)
for pos in data[0]:
c = surface.get_at(pos[0])
c = {True:1,False:0}[c.r>0]
assert c == pos[1]
if env['platform'] not in ('jvm','js'):
try:
assert (rect.x,rect.y,rect.width,rect.height) == data[1]
except AssertionError: #pg1.9.6
assert (rect.x,rect.y,rect.width,rect.height) == (5,5,10,10)
else: #update to new boundary process
assert (rect.x,rect.y,rect.width,rect.height) == (5,5,10,10)
def test_draw_polygon():
data = [((10,4),0), ((10,6),1), ((10,8),1)], (5,5,11,11)
surface.fill((0,0,0))
rect = pg.draw.polygon(surface, (255,0,0), ((10,5),(15,15),(5,15)))
for pos in data[0]:
c = surface.get_at(pos[0])
c = {True:1,False:0}[c.r>0]
assert c == pos[1]
assert (rect.x,rect.y,rect.width,rect.height) == data[1]
def test_draw_line():
data = [((10,6),0), ((10,8),1), ((10,10),0)], (5,8,11,1)
surface.fill((0,0,0))
rect = pg.draw.line(surface, (255,0,0), (5,8), (15,8), 1)
for pos in data[0]:
c = surface.get_at(pos[0])
c = {True:1,False:0}[c.r>0]
assert c == pos[1]
assert (rect.x,rect.y,rect.width,rect.height) == data[1]
def test_draw_lines():
data = [((10,6),0), ((10,8),1), ((10,10),0)], (5,8,11,1)
surface.fill((0,0,0))
rect = pg.draw.lines(surface, (255,0,0), True, ((7,8),(5,8),(15,8)))
for pos in data[0]:
c = surface.get_at(pos[0])
c = {True:1,False:0}[c.r>0]
assert c == pos[1]
assert (rect.x,rect.y,rect.width,rect.height) == data[1]
```
#### File: pyjsdl-ts/test/surface_test.py
```python
env = None
pg = None
surface = None
width = None
height = None
# __pragma__ ('opov')
def init(environ):
global env, pg, surface, width, height
env = environ
pg = env['pg']
surface = env['surface']
width = env['width']
height = env['height']
tests = [test_surface_get_size,
test_surface_get_rect,
test_surface_copy,
test_surface_blit,
test_surface_fill,
test_surface_set_colorkey,
test_surface_get_colorkey,
test_surface_set_at,
test_surface_get_at]
return tests
def _color_convert(color):
if isinstance(color,tuple):
if len(color) == 4:
r,g,b,a = color[0],color[1],color[2],color[3]
else:
r,g,b,a = color[0],color[1],color[2],255
else:
r,g,b,a = int((color>>16) & 0xff), int((color>>8) & 0xff), int(color & 0xff), int((color>>24) & 0xff)
return r,g,b,a
def test_surface_get_size():
assert surface.get_size() == (width,height)
assert surface.get_width() == width
assert surface.get_height() == height
def test_surface_get_rect():
rect = surface.get_rect()
if env['platform'] != 'js': #pyjs compares rect==tuple not __eq__
assert rect == (0,0,width,height)
assert (rect.x,rect.y,rect.width,rect.height) == (0,0,width,height)
rect = surface.get_rect(center=(15,15))
assert (rect.x,rect.y,rect.width,rect.height) == (5,5,width,height)
def test_surface_copy():
new_surface = surface.copy()
assert surface == surface
assert surface != new_surface
assert surface.get_size() == new_surface.get_size()
def test_surface_blit():
new_surface = pg.Surface((5,5))
surface.fill((0,0,0))
new_surface.fill((100,100,100))
rect = surface.blit(new_surface, (1,0))
if env['executor'] != 'pyjs':
assert surface.get_at((0,0)) == (0,0,0,255)
assert surface.get_at((1,0)) == (100,100,100,255)
else:
if not env['pyjs_opt']: #pyjs -s compares color==tuple not __eq__
assert surface.get_at((0,0)) == pg.Color(0,0,0,255)
assert surface.get_at((1,0)) == pg.Color(100,100,100,255)
else: #pyjs -O __eq__ ignored
c = surface.get_at((0,0))
assert (c.r,c.g,c.b,c.a) == (0,0,0,255)
c = surface.get_at((1,0))
assert (c.r,c.g,c.b,c.a) == (100,100,100,255)
assert (rect.x,rect.y,rect.width,rect.height) == (1,0,5,5)
def test_surface_fill():
color = (255,0,0), (0,255,0,255), (0xff<<24)+255
for c in color:
surface.fill((0,0,0))
surface.fill(c)
if env['executor'] != 'pyjs':
assert surface.get_at((0,0)) == _color_convert(c)
else:
cc = surface.get_at((0,0))
assert (cc[0],cc[1],cc[2],cc[3]) == _color_convert(c)
surface.fill((0,0,0))
surface.fill((255,0,0,255), (0,0,2,2))
if env['executor'] != 'pyjs':
assert surface.get_at((0,0)) == (255,0,0,255)
assert surface.get_at((2,2)) != (255,0,0,255)
else:
c = surface.get_at((0,0))
assert (c.r,c.g,c.b,c.a) == (255,0,0,255)
c = surface.get_at((2,2))
assert (c.r,c.g,c.b,c.a) != (255,0,0,255)
def test_surface_set_colorkey():
color = (255,0,0), (0,255,0,255), None
#color = (255,0,0), (0,255,0,255), (0xff<<24)+255, None #pg.error?
for c in color:
surface.set_colorkey(c)
if surface.get_colorkey():
if not env['pyjs_opt']:
assert pg.Color(*surface.get_colorkey()) == pg.Color(*c)
else: #pyjs -O no __eq__ call
r,g,b,a = pg.Color(*surface.get_colorkey())
cr,cg,cb,ca = pg.Color(*c)
assert r==cr and g==cg and b==cb and a==ca
def test_surface_get_colorkey():
surface.fill((0,0,0))
surface.set_colorkey((0,0,0))
assert surface.get_colorkey() == (0,0,0,255)
surface.set_colorkey(None)
assert surface.get_colorkey() is None
def test_surface_set_at():
color = (255,0,0), (0,255,0,255), (0xff<<24)+255
for c in color:
surface.fill((0,0,0))
surface.set_at((0,0), c)
if env['executor'] != 'pyjs':
assert surface.get_at((0,0)) == _color_convert(c)
else: #pyjs compares color==tuple not __eq__
cc = surface.get_at((0,0))
assert (cc.r,cc.g,cc.b,cc.a) == _color_convert(c)
def test_surface_get_at():
color = (0,0,255,255)
surface.fill((0,0,0))
surface.set_at((0,0), (0,0,255,255))
if env['executor'] != 'pyjs':
assert surface.get_at((0,0)) == (0,0,255,255)
assert surface.get_at((0,0)) == (0,0,255)
else: #pyjs compares color==tuple not __eq__
cc = surface.get_at((0,0))
assert (cc.r,cc.g,cc.b,cc.a) == (0,0,255,255)
```
#### File: pyjsdl-ts/test/time_test.py
```python
env = None
pg = None
wait = 0
# __pragma__ ('opov')
def init(environ):
global env, pg
env = environ
pg = env['pg']
tests = [test_time_delay,
test_time_wait,
test_time_timer]
return tests
def test_time_delay():
_time = 30
t = pg.time.get_ticks()
pg.time.delay(_time)
assert (pg.time.get_ticks()-t) >= _time
def test_time_wait():
global wait
_time = 30
if env['platform'] != 'js':
t = pg.time.get_ticks()
pg.time.wait(_time)
assert (pg.time.get_ticks()-t) >= _time
else:
if not wait:
wait = pg.time.get_ticks()
pg.time.wait(_time)
return True
else:
assert (pg.time.get_ticks()-wait) >= _time
wait = 0
return False
def test_time_timer():
global wait
_time = 30
event = pg.USEREVENT
if env['platform'] != 'js':
t = pg.time.get_ticks()
pg.event.clear()
pg.time.set_timer(event, _time)
evt = pg.event.wait()
pg.time.set_timer(event, 0)
assert evt.type == event
assert (pg.time.get_ticks()-t) >= _time
else:
if not wait:
wait = pg.time.get_ticks()
pg.event.clear()
pg.time.set_timer(event, _time)
pg.time.wait(_time)
return True
else:
evt = pg.event.get()[0]
pg.time.set_timer(event, 0)
assert evt.type == event
assert (pg.time.get_ticks()-wait) >= _time
wait = 0
return False
```
|
{
"source": "jggatter/cumulus",
"score": 3
}
|
#### File: demultiplexing/demuxlet/generate_zarr.py
```python
import argparse
import pegasusio as pio
import pandas as pd
parser = argparse.ArgumentParser(description='Merge demuxlet result with gene-count matrix.')
parser.add_argument('demux_res', metavar = 'demux_result.best', help = 'Demuxlet demultiplexing results.')
parser.add_argument('raw_mat', metavar = 'raw_feature_bc_matrix.h5', help = 'Raw gene count matrix in 10x format.')
parser.add_argument('out_file', metavar = 'output_result.zarr', help = 'Output zarr file.')
args = parser.parse_args()
demux_type_dict = {'SNG': 'singlet', 'DBL': 'doublet', 'AMB': 'unknown'}
def write_output(assignment_file: str, input_mat_file: str, output_zarr_file: str) -> None:
df = pd.read_csv(assignment_file, sep = '\t', header = 0, index_col = 'BARCODE')
df.index = pd.Index([x[:-2] for x in df.index])
df['demux_type'] = df['DROPLET.TYPE'].apply(lambda s: demux_type_dict[s])
df['assignment'] = ''
df.loc[df['demux_type'] == 'singlet', 'assignment'] = df.loc[df['demux_type'] == 'singlet', 'SNG.BEST.GUESS']
df.loc[df['demux_type'] == 'doublet', 'assignment'] = df.loc[df['demux_type'] == 'doublet', 'DBL.BEST.GUESS'].apply(lambda s: ','.join(s.split(',')[:-1]))
data = pio.read_input(input_mat_file)
data.obs['demux_type'] = ''
data.obs['assignment'] = ''
idx = data.obs_names.isin(df.index)
barcodes = data.obs_names[idx]
df_valid = df.loc[barcodes, ['demux_type', 'assignment']]
data.obs.loc[idx, 'demux_type'] = df_valid['demux_type'].values
data.obs.loc[idx, 'assignment'] = df_valid['assignment'].values
pio.write_output(data, output_zarr_file, zarr_zipstore = True)
if __name__ == '__main__':
write_output(args.demux_res, args.raw_mat, args.out_file)
```
|
{
"source": "jggautier/dataverse-automation",
"score": 3
}
|
#### File: dataverse-automation/other_scripts/get_oaipmh_records.py
```python
import csv
import os
import time
import requests
from tkinter import filedialog
from tkinter import ttk
from tkinter import *
import xmltodict
####################################################################################
# Create GUI for getting user input
window = Tk()
window.title('Get record IDs in OAI-PMH feed')
window.geometry('625x450') # width x height
# Function called when Browse button is pressed
def retrieve_directory():
global directory
# Call the OS's file directory window and store selected object path as a global variable
directory = filedialog.askdirectory()
# Show user which directory she chose
label_showChosenDirectory = Label(window, text='You chose: ' + directory, anchor='w', foreground='green', wraplength=500, justify='left')
label_showChosenDirectory.grid(sticky='w', column=0, row=13, padx=20)
# Function called when Start button is pressed
def retrieve_input():
global baseUrl
global oaiSet
# Store what's entered in dataverseUrl text box as a global variable
baseUrl = entry_baseUrl.get().strip()
# Store what's entered in dataverseUrl text box as a global variable
oaiSet = entry_oaiSet.get().strip()
if baseUrl:
window.destroy()
# If no baseUrl is entered, display message that one is required
else:
print('A dataverse URL is required')
label_baseUrlReqiured = Label(window, text='The repository\'s OAI-PMH URL is required.', foreground='red', anchor='w')
label_baseUrlReqiured.grid(sticky='w', column=0, row=3, padx=20)
# Create label for BaseUrl field
label_baseUrl = Label(window, text='OAI-PMH Base URL:', anchor='w')
label_baseUrl.grid(sticky='w', column=0, row=0, padx=20)
# Create Base URL field
dataverseUrl = str()
entry_baseUrl = Entry(window, width=50, textvariable=dataverseUrl)
entry_baseUrl.grid(sticky='w', column=0, row=1, pady=2, padx=20)
# Create help text for BaseUrl field
label_dataverseUrlHelpText = Label(window, text='Example: https://demo.dataverse.org/oai', foreground='grey', anchor='w')
label_dataverseUrlHelpText.grid(sticky='w', column=0, row=2, padx=20)
# Create empty row in grid to improve spacing between the two fields
window.grid_rowconfigure(4, minsize=25)
# Create label for oaiSet key field
label_oaiSet = Label(window, text='OAI set name:', anchor='w')
label_oaiSet.grid(sticky='w', column=0, row=8, padx=20)
# Create oaiSet field
oaiSet = str()
entry_oaiSet = Entry(window, width=50, textvariable=oaiSet)
entry_oaiSet.grid(sticky='w', column=0, row=9, pady=2, padx=20)
# Create help text for oaiSet field
label_oaiSetHelpText = Label(window, text='If no OAI Set is entered, all records in the repository\'s OAI-PMH feed will be retrived', foreground='grey', anchor='w')
label_oaiSetHelpText.grid(sticky='w', column=0, row=10, padx=20)
# Create empty row in grid to improve spacing between the two fields
window.grid_rowconfigure(11, minsize=25)
# Create label for Browse directory button
label_browseDirectory = Label(window, text='Choose folder to store CSV file with identifiers and statuses of harvested records:', anchor='w')
label_browseDirectory.grid(sticky='w', column=0, row=12, pady=2, padx=20)
# Create Browse directory button
button_browseDirectory = ttk.Button(window, text='Browse', command=lambda: retrieve_directory())
button_browseDirectory.grid(sticky='w', column=0, row=13, padx=20)
# Create start button
button_Submit = ttk.Button(window, text='Start', command=lambda: retrieve_input())
button_Submit.grid(sticky='w', column=0, row=15, pady=40, padx=20)
# Keep window open until it's closed
mainloop()
def improved_get(_dict, path, default=None):
for key in path.split('.'):
try:
_dict = _dict[key]
except KeyError:
return default
return str(_dict)
currentTime = time.strftime('%Y.%m.%d_%H.%M.%S')
metadataPrefix = 'oai_dc'
if oaiSet:
oaiUrl = '%s?verb=ListIdentifiers&set=%s&metadataPrefix=%s' % (baseUrl, oaiSet, metadataPrefix)
else:
oaiSet = 'no_set'
oaiUrl = '%s?verb=ListIdentifiers&metadataPrefix=%s' % (baseUrl, metadataPrefix)
csvFile = 'harvested_records_%s_%s.csv' % (oaiSet, currentTime)
csvFilePath = os.path.join(directory, csvFile)
print('Counting current and deleted records:')
response = requests.get(oaiUrl)
dictData = xmltodict.parse(response.content)
recordCount = 0
deletedRecordCount = 0
with open(csvFilePath, mode='w', encoding='utf-8', newline='') as f:
f = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
f.writerow(['record_identifier', 'record_status'])
if 'resumptionToken' not in dictData['OAI-PMH']['ListIdentifiers']:
for record in dictData['OAI-PMH']['ListIdentifiers']['header']:
recordIdentifier = record['identifier']
recordStatus = record.get('@status')
if recordStatus != 'deleted':
recordStatus = 'present'
recordCount += 1
elif recordStatus == 'deleted':
deletedRecordCount +=1
f.writerow([recordIdentifier, recordStatus])
print('Record count in %s set: %s' % (oaiSet, recordCount))
print('Count of deleted records: %s' % (deletedRecordCount))
elif 'resumptionToken' in dictData['OAI-PMH']['ListIdentifiers']:
pageCount = 1
print('Counting records in page %s' % (pageCount), end='\r', flush=True)
resumptionToken = improved_get(dictData, 'OAI-PMH.ListIdentifiers.resumptionToken.#text')
for record in dictData['OAI-PMH']['ListIdentifiers']['header']:
recordIdentifier = record['identifier']
recordStatus = record.get('@status')
if recordStatus != 'deleted':
recordStatus = 'present'
recordCount += 1
elif recordStatus == 'deleted':
deletedRecordCount +=1
f.writerow([recordIdentifier, recordStatus])
resumptionToken = improved_get(dictData, 'OAI-PMH.ListIdentifiers.resumptionToken.#text')
while resumptionToken is not None:
pageCount += 1
print('Counting records in page %s' % (pageCount), end='\r', flush=True)
oaiUrlResume = '%s?verb=ListIdentifiers&resumptionToken=%s' % (baseUrl, resumptionToken)
response = requests.get(oaiUrlResume)
dictData = xmltodict.parse(response.content)
for record in dictData['OAI-PMH']['ListIdentifiers']['header']:
recordIdentifier = record['identifier']
recordStatus = record.get('@status')
if recordStatus != 'deleted':
recordStatus = 'present'
recordCount += 1
elif recordStatus == 'deleted':
deletedRecordCount +=1
f.writerow([recordIdentifier, recordStatus])
resumptionToken = improved_get(dictData, 'OAI-PMH.ListIdentifiers.resumptionToken.#text')
print('\nRecord count in %s set: %s' % (oaiSet, recordCount))
print('Count of deleted records: %s' % (deletedRecordCount))
print('Record identifiers saved to %s' % (csvFilePath))
```
|
{
"source": "jggautier/dataverse_scripts",
"score": 4
}
|
#### File: jggautier/dataverse_scripts/combine_tables.py
```python
from functools import reduce
import glob
import os
import pandas as pd
from tkinter import filedialog
from tkinter import ttk
from tkinter import *
# Create GUI for getting user input
# Create, title and size the window
window = Tk()
window.title('Join CSV files')
window.geometry('550x250') # width x height
# Function called when Browse button is pressed
def retrieve_csvdirectory():
global csvDirectory
# Call the OS's file directory window and store selected object path as a global variable
csvDirectory = filedialog.askdirectory()
# Show user which directory she chose
label_showChosenDirectory = Label(window, text='You chose: ' + csvDirectory, anchor='w', foreground='green', wraplength=500, justify='left')
label_showChosenDirectory.grid(sticky='w', column=0, row=2)
# Function called when Browse button is pressed
def retrieve_mergedfiledirectory():
global mergedFileDirectory
# Call the OS's file directory window and store selected object path as a global variable
mergedFileDirectory = filedialog.askdirectory()
# Show user which directory she chose
label_showChosenDirectory = Label(window, text='You chose: ' + mergedFileDirectory, anchor='w', foreground='green', wraplength=500, justify='left')
label_showChosenDirectory.grid(sticky='w', column=0, row=6)
# Function called when Browse button is pressed
def start():
window.destroy()
# Create label for button to browse for directory containing JSON files
label_getCsvFiles = Label(window, text='Choose folder containing the CSV files to join:', anchor='w')
label_getCsvFiles.grid(sticky='w', column=0, row=0, pady=2)
# Create button to browse for directory containing JSON files
button_getCsvFiles = ttk.Button(window, text='Browse', command=lambda: retrieve_csvdirectory())
button_getCsvFiles.grid(sticky='w', column=0, row=1)
# Create empty row in grid to improve spacing between the two fields
window.grid_rowconfigure(3, minsize=25)
# Create label for button to browse for directory to add CSV files in
label_mergedFileDirectory = Label(window, text='Choose folder to store the CSV file:', anchor='w')
label_mergedFileDirectory.grid(sticky='w', column=0, row=4, pady=2)
# Create button to browse for directory containing JSON files
button_mergedFileDirectory = ttk.Button(window, text='Browse', command=lambda: retrieve_mergedfiledirectory())
button_mergedFileDirectory.grid(sticky='w', column=0, row=5)
# Create start button
button_Start = ttk.Button(window, text='Start', command=lambda: start())
button_Start.grid(sticky='w', column=0, row=7, pady=40)
# Keep window open until it's closed
mainloop()
directory_name = csvDirectory.split('/')[-1]
# Create CSV file in the directory that the user selected
filename = os.path.join(mergedFileDirectory, '%s_merged.csv' % (directory_name))
# Save directory paths to each CSV file as a list and save in 'all_tables' variable
all_tables = glob.glob(os.path.join(csvDirectory, '*.csv'))
print('Creating a dataframe for each CSV file...')
# Create a dataframe of each CSV file in the 'all-tables' list
dataframes = [pd.read_csv(table, sep=',') for table in all_tables]
# For each dataframe, set the indexes (or the common columns across the dataframes to join on)
for dataframe in dataframes:
dataframe.set_index(['datasetVersionId', 'persistentUrl', 'persistent_id'], inplace=True)
print('Joining dataframes into one dataframe...')
# Merge all dataframes and save to the 'merged' variable
merged = reduce(lambda left, right: left.join(right, how='outer'), dataframes)
print('Exporting joined dataframe to a CSV file...')
# Export merged dataframe to a CSV file
merged.to_csv(filename)
print('Joined dataframe exported to %s' % (filename))
```
#### File: jggautier/dataverse_scripts/get_dataset_lock_info.py
```python
import csv
from csv import DictReader
from datetime import datetime
from dateutil import tz
import os
import requests
import time
repositoryURL = ''
inputFile = '' # Path to .txt or .csv file with database IDs of dataverses to be deleted
directory = '' # Path to directory where CSV file containing lock info will be created
# Function for converting given timestamp string into datetime object with local timezone
def convert_to_local_tz(timestamp):
# Save local timezone to localTimezone variable
localTimezone = tz.tzlocal()
# Convert string to datetime object
timestamp = datetime.strptime(timestamp, '%a %b %d %H:%M:%S %Z %Y')
# Convert from UTC to local timezone
timestamp = timestamp.astimezone(localTimezone)
return timestamp
current_time = time.strftime('%Y.%m.%d_%H.%M.%S')
datasetPIDs = []
if '.csv' in inputFile:
with open(inputFile, mode='r', encoding='utf-8') as f:
csvDictReader = DictReader(f, delimiter=',')
for row in csvDictReader:
datasetPIDs.append(row['persistent_id'].rstrip())
elif '.txt' in inputFile:
inputFile = open(inputFile)
for datasetPID in inputFile:
# Remove any trailing spaces from datasetPID
datasetPIDs.append(datasetPID.rstrip())
total = len(datasetPIDs)
count = 0
# Create CSV file
csvOutputFile = 'dataset_locked_status_%s.csv' % (current_time)
csvOutputFilePath = os.path.join(directory, csvOutputFile)
with open(csvOutputFilePath, mode='w', newline='') as f:
f = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
f.writerow(['persistent_id', 'locked', 'reason', 'locked_date', 'user_name'])
for datasetPID in datasetPIDs:
url = '%s/api/datasets/:persistentId/locks?persistentId=%s' % (repositoryURL, datasetPID)
req = requests.get(url)
data = req.json()
count += 1
if len(data['data']) > 0:
for lock in data['data']:
locked = True
reason = lock['lockType']
lockedDate = convert_to_local_tz(lock['date'])
userName = lock['user']
f.writerow([datasetPID, locked, reason, lockedDate, userName])
print('%s of %s datasets: %s' % (count, total, datasetPID))
else:
locked = False
reason = 'NA (Not locked)'
lockedDate = ''
userName = 'NA (Not locked)'
f.writerow([datasetPID, locked, reason, lockedDate, userName])
print('%s of %s datasets: %s' % (count, total, datasetPID))
```
|
{
"source": "jggautier/dataverse-scripts",
"score": 3
}
|
#### File: dataverse-scripts/other_scripts/get_dataset_PIDs.py
```python
import csv
import glob
import json
import os
import requests
import sys
import time
from tkinter import filedialog
from tkinter import ttk
from tkinter import *
from urllib.parse import urlparse
####################################################################################
# Create GUI for getting user input
window = Tk()
window.title('Get dataset PIDs')
window.geometry('625x450') # width x height
# Function called when Browse button is pressed
def retrieve_directory():
global directory
# Call the OS's file directory window and store selected object path as a global variable
directory = filedialog.askdirectory()
# Show user which directory she chose
label_showChosenDirectory = Label(window, text='You chose: ' + directory, anchor='w', foreground='green', wraplength=500, justify='left')
label_showChosenDirectory.grid(sticky='w', column=0, row=13)
# Function called when Start button is pressed
def retrieve_input():
global dataverseUrl
global apikey
global get_subdataverses
# Record if user wants to search in subdataverses
get_subdataverses = get_subdataverses.get()
# Store what entered in the api key text box as a global variable
apikey = entry_apikey.get().rstrip()
# Store what's entered in dataverseUrl text box as a global variable
dataverseUrl = entry_dataverseUrl.get()
# If user enters text in dataverseUrl text box, strip any white characters
if dataverseUrl:
dataverseUrl = str(dataverseUrl)
dataverseUrl = dataverseUrl.strip()
# If user also selected a directory, close the window
if directory:
window.destroy()
# If no dataverseUrl is entered, display message that one is required
else:
print('A dataverse URL is required')
label_dataverseUrlReqiured = Label(window, text='A dataverse URL is required.', foreground='red', anchor='w')
label_dataverseUrlReqiured.grid(sticky='w', column=0, row=3)
# Create label for Dataverse URL field
label_dataverseUrl = Label(window, text='Dataverse URL:', anchor='w')
label_dataverseUrl.grid(sticky='w', column=0, row=0)
# Create Dataverse URL field
dataverseUrl = str()
entry_dataverseUrl = Entry(window, width=50, textvariable=dataverseUrl)
entry_dataverseUrl.grid(sticky='w', column=0, row=1, pady=2)
entry_dataverseUrl.insert(0, 'https://demo.dataverse.org/')
# Create help text for Dataverse URL field
label_dataverseUrlHelpText = Label(window, text='Example: https://demo.dataverse.org or https://demo.dataverse.org/dataverse/dataversealias', foreground='grey', anchor='w')
label_dataverseUrlHelpText.grid(sticky='w', column=0, row=2)
# Create empty row in grid to improve spacing between the two fields
window.grid_rowconfigure(4, minsize=25)
# Create "Include subdataverses" checkbox
get_subdataverses = IntVar()
c = Checkbutton(window, text="Include subdataverses", variable=get_subdataverses).grid(sticky='w', column=0, row=5)
# Create help text for "Include subdataverses" checkbox
label_apikeyHelpText = Label(
window,
text='If the URL of the "Root" Dataverse collection is entered, all datasets in the repository (in all subdataverses) will be found',
foreground='grey', anchor='w', wraplength=500, justify='left')
label_apikeyHelpText.grid(sticky='w', column=0, row=6)
# Create empty row in grid to improve spacing between the two fields
window.grid_rowconfigure(7, minsize=25)
# Create label for API key field
label_apikey = Label(window, text='API key:', anchor='w')
label_apikey.grid(sticky='w', column=0, row=8)
# Create API key field
apikey = str()
entry_apikey = Entry(window, width=50, textvariable=apikey)
entry_apikey.grid(sticky='w', column=0, row=9, pady=2)
# Create help text for API key field
label_apikeyHelpText = Label(window, text='If no API is entered, only published datasets will be found', foreground='grey', anchor='w')
label_apikeyHelpText.grid(sticky='w', column=0, row=10)
# Create empty row in grid to improve spacing between the two fields
window.grid_rowconfigure(11, minsize=25)
# Create label for Browse directory button
label_browseDirectory = Label(window, text='Choose folder to store CSV file with info of dataset PIDs:', anchor='w')
label_browseDirectory.grid(sticky='w', column=0, row=12, pady=2)
# Create Browse directory button
button_browseDirectory = ttk.Button(window, text='Browse', command=lambda: retrieve_directory())
button_browseDirectory.grid(sticky='w', column=0, row=13)
# Create start button
button_Submit = ttk.Button(window, text='Start', command=lambda: retrieve_input())
button_Submit.grid(sticky='w', column=0, row=15, pady=40)
# Keep window open until it's closed
mainloop()
# Save current time to append it to main folder name
current_time = time.strftime('%Y.%m.%d_%H.%M.%S')
# Parse dataverseUrl to get server name and alias
parsed = urlparse(dataverseUrl)
server = parsed.scheme + '://' + parsed.netloc
# alias = parsed.path.split('/')[2]
try:
alias = parsed.path.split('/')[2]
except IndexError:
alias = ''
# Get alias of the root dataverse (assumming the root dataverse's ID is 1, which isn't the case with UVA Dataverse)
url = '%s/api/dataverses/1' % (server)
response = requests.get(url)
dataverse_data = response.json()
root_alias = dataverse_data['data']['alias']
installation_name = dataverse_data['data']['name']
####################################################################################
# If user provides no alias or the alias is the repository's root alias,
# use Search API to find PIDs of all datasets in repository
if not alias or alias == root_alias:
# Create CSV file
csv_file = 'dataset_pids_%s_%s.csv' % (installation_name.replace(' ', '_'), current_time)
csv_file_path = os.path.join(directory, csv_file)
with open(csv_file_path, mode='w', newline='') as f:
f = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
f.writerow(['persistent_id', 'persistentUrl', 'dataverse_name', 'dataverse_alias', 'publication_date'])
# Report count of datasets
if apikey:
url = '%s/api/v1/search?q=*&fq=-metadataSource:"Harvested"&type=dataset&per_page=1&start=0&sort=date&order=desc&key=%s' % (server, apikey)
response = requests.get(url)
data = response.json()
total = data['data']['total_count']
print('\nSaving %s dataset PIDs\n(Search API returns the draft and published version of a dataset. List will be de-duplicated at the end):' % (total))
else:
url = '%s/api/v1/search?q=*&fq=-metadataSource:"Harvested"&type=dataset&per_page=1&start=0&sort=date&order=desc' % (server)
response = requests.get(url)
data = response.json()
total = data['data']['total_count']
print('\nSaving %s dataset PIDs:' % (total))
# Initialization for paginating through Search API results and showing progress
start = 0
condition = True
count = 0
# Create variable for storing count of misindexed datasets
misindexed_datasets_count = 0
while condition:
try:
per_page = 10
if apikey:
url = '%s/api/v1/search?q=*&fq=-metadataSource:"Harvested"&type=dataset&per_page=%s&start=%s&sort=date&order=desc&key=%s' % (server, per_page, start, apikey)
else:
url = '%s/api/v1/search?q=*&fq=-metadataSource:"Harvested"&type=dataset&per_page=%s&start=%s&sort=date&order=desc' % (server, per_page, start)
response = requests.get(url)
data = response.json()
# For each item object...
for i in data['data']['items']:
persistent_id = i['global_id']
persistent_url = i['url']
dataverse_name = i['name_of_dataverse']
dataverse_alias = i['identifier_of_dataverse']
publicationDate = i.get('published_at', 'unpublished')
with open(csv_file_path, mode='a', encoding='utf-8', newline='') as open_csv_file:
open_csv_file = csv.writer(open_csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# Create new row with dataset and file info
open_csv_file.writerow([persistent_id, persistent_url, dataverse_name, dataverse_alias, publicationDate])
count += 1
print('%s of %s' % (count, total), end='\r', flush=True)
# Update variables to paginate through the search results
start = start + per_page
# Print error message if misindexed datasets break the Search API call, and try the next page. (See https://github.com/IQSS/dataverse/issues/4225)
except urllib.error.URLError:
try:
per_page = 1
if apikey:
url = '%s/api/v1/search?q=*&fq=-metadataSource:"Harvested"&type=dataset&per_page=%s&start=%s&sort=date&order=desc&key=%s' % (server, per_page, start, apikey)
else:
url = '%s/api/v1/search?q=*&fq=-metadataSource:"Harvested"&type=dataset&per_page=%s&start=%s&sort=date&order=desc' % (server, per_page, start)
response = requests.get(url)
data = response.json()
# For each item object...
for i in data['data']['items']:
persistent_id = i['global_id']
persistent_url = i['url']
dataverse_name = i['name_of_dataverse']
dataverse_alias = i['identifier_of_dataverse']
publicationDate = i.get('published_at', 'unpublished')
with open(csv_file_path, mode='a', encoding='utf-8', newline='') as open_csv_file:
open_csv_file = csv.writer(open_csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# Create new row with dataset and file info
open_csv_file.writerow([persistent_id, persistent_url, dataverse_name, dataverse_alias, publicationDate])
print('%s of %s' % (count, total), end='\r', flush=True)
# Update variables to paginate through the search results
start = start + per_page
except urllib.error.URLError:
misindexed_datasets_count += 1
start = start + per_page
# Stop paginating when there are no more results
condition = start < total
print('\nDataset PIDs written to the CSV file: %s' % (count))
if misindexed_datasets_count:
print('\n\nUnretrievable dataset PIDs due to misindexing: %s\n' % (misindexed_datasets_count))
####################################################################################
# If user provides an alias, and it isn't the root dataverses's alias, use "Get content" endpoints instead of Search API
else:
csv_file = 'dataset_pids_%s_%s.csv' % (alias, current_time)
csv_file_path = os.path.join(directory, csv_file)
with open(csv_file_path, mode='w', newline='') as f:
f = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
f.writerow(['persistent_id', 'persistentUrl', 'dataverse_name', 'dataverse_alias', 'publication_date'])
# Get ID of given dataverse alias
if apikey:
url = '%s/api/dataverses/%s?key=%s' % (server, alias, apikey)
else:
url = '%s/api/dataverses/%s' % (server, alias)
response = requests.get(url)
data = response.json()
parent_dataverse_id = data['data']['id']
# Create list and add ID of given dataverse
dataverse_ids = [parent_dataverse_id]
# If user wants datasets in subdataverses, search for and include IDs of subdataverses (excludes linked dataverses)
# Get each subdataverse in the given dataverse
if get_subdataverses == 1:
print('\nGetting dataverse IDs in %s:' % (alias))
for dataverse_id in dataverse_ids:
sys.stdout.write('.')
sys.stdout.flush()
if apikey:
url = '%s/api/dataverses/%s/contents?key=%s' % (server, dataverse_id, apikey)
else:
url = '%s/api/dataverses/%s/contents' % (server, dataverse_id)
response = requests.get(url)
data = response.json()
for i in data['data']:
if i['type'] == 'dataverse':
dataverse_id = i['id']
dataverse_ids.extend([dataverse_id])
print('\n\nFound 1 dataverse and %s subdataverses' % (len(dataverse_ids) - 1))
# For each dataverse in the list, add the PIDs of all datasets to a CSV file - excludes linked and harvested datasets
print('\nWriting dataset IDs to %s:' % (csv_file_path))
count = 0
with open(csv_file_path, mode='a', encoding='utf-8', newline='') as open_csv_file:
open_csv_file = csv.writer(open_csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for dataverse_id in dataverse_ids:
# Get name of dataverse
if apikey:
url = '%s/api/dataverses/%s?key=%s' % (server, dataverse_id, apikey)
else:
url = '%s/api/dataverses/%s' % (server, dataverse_id)
response = requests.get(url, timeout=10)
data = response.json()
dataverse_name = data['data']['name']
dataverse_alias = data['data']['alias']
# Get content of dataverse
if apikey:
url = '%s/api/dataverses/%s/contents?key=%s' % (server, dataverse_id, apikey)
else:
url = '%s/api/dataverses/%s/contents' % (server, dataverse_id)
response = requests.get(url)
data = response.json()
for i in data['data']:
if i['type'] == 'dataset':
protocol = i['protocol']
authority = i['authority']
identifier = i['identifier']
persistent_id = '%s:%s/%s' % (protocol, authority, identifier)
persistent_url = i['persistentUrl']
publicationDate = i.get('publicationDate', 'unpublished')
count += 1
# Create new line with dataset PID
open_csv_file.writerow([persistent_id, persistent_url, dataverse_name, dataverse_alias, publicationDate])
# As a progress indicator, print a dot each time a row is written
sys.stdout.write('.')
sys.stdout.flush()
print('\n\nDataset PIDs written to the CSV file: %s' % (count))
```
#### File: other_scripts/get-dataverse-metadata/get_dataset_json_metadata.py
```python
import csv
from csv import DictReader
import json
import os
from pathlib import Path
import requests
import time
from tkinter import *
from tkinter import filedialog
from tkinter import ttk
import sys
# Create GUI for getting user input
window = Tk()
window.title('Get dataset metadata')
window.geometry('650x600') # width x height
# Create label for Dataverse repository URL
labelRepositoryURL = Label(window, text='Enter Dataverse repository URL:', anchor='w')
labelRepositoryURL.grid(sticky='w', column=0, row=0)
# Create Dataverse repository URL text box
repositoryURL = str()
entryRepositoryURL = Entry(window, width=50, textvariable=repositoryURL)
entryRepositoryURL.grid(sticky='w', column=0, row=1, pady=2)
# Create help text for server name field
labelDataverseUrlHelpText = Label(window, text='Example: https://demo.dataverse.org/', foreground='grey', anchor='w')
labelDataverseUrlHelpText.grid(sticky='w', column=0, row=2)
# Create empty row in grid to improve spacing between the two fields
window.grid_rowconfigure(3, minsize=25)
# Create label for API key field
labelApikey = Label(window, text='API token/key:', anchor='w')
labelApikey.grid(sticky='w', column=0, row=4)
# Create API key field
apikey = str()
entryApikey = Entry(window, width=50, textvariable=apikey)
entryApikey.grid(sticky='w', column=0, row=5, pady=2)
# Create help text for API key field
labelApikeyHelpText = Label(window, text='If no API token/key is entered, only published metadata will be downloaded', foreground='grey', anchor='w')
labelApikeyHelpText.grid(sticky='w', column=0, row=6)
# Create empty row in grid to improve spacing between the two fields
window.grid_rowconfigure(7, minsize=25)
getAllVersionMetadata = IntVar()
Checkbutton(window, text="Get metadata of all dataset versions", variable=getAllVersionMetadata).grid(sticky='w', column=0, row=8)
# Create help text for all versions checkbox
labelAllVersionMetadataHelpText = Label(window, text='If unchecked, only metadata of latest dataset version will be downloaded', foreground='grey', anchor='w')
labelAllVersionMetadataHelpText.grid(sticky='w', column=0, row=9)
# Create empty row in grid to improve spacing between the two fields
window.grid_rowconfigure(10, minsize=25)
# Create label for Browse directory button
labelBrowseForFile = Label(window, text='Choose CSV or TXT file containing list of dataset PIDs:', anchor='w')
labelBrowseForFile.grid(sticky='w', column=0, row=11, pady=2)
# Create Browse directory button
buttonBrowseForFile = ttk.Button(window, text='Browse', command=lambda: retrieve_file())
buttonBrowseForFile.grid(sticky='w', column=0, row=12)
# Create empty row in grid to improve spacing between the two fields
window.grid_rowconfigure(14, minsize=25)
# Create label for Browse directory button
labelBrowseDirectory = Label(window, text='Choose folder to put the metadata files and metadatablock files folders into:', anchor='w')
labelBrowseDirectory.grid(sticky='w', column=0, row=15, pady=2)
# Create Browse directory button
buttonBrowseDirectory = ttk.Button(window, text='Browse', command=lambda: retrieve_directory())
buttonBrowseDirectory.grid(sticky='w', column=0, row=16)
# Create start button
buttonSubmit = ttk.Button(window, text='Start', command=lambda: retrieve_input())
buttonSubmit.grid(sticky='w', column=0, row=18, pady=40)
# Function called when Browse button is pressed for choosing text file with dataset PIDs
def retrieve_file():
global datasetPIDFile
# Call the OS's file directory window and store selected object path as a global variable
datasetPIDFile = filedialog.askopenfilename(filetypes=[('Text files', '*.txt'), ('CSV files', '*.csv')])
# Show user which file she chose
labelShowChosenFile = Label(window, text='You chose: ' + datasetPIDFile, anchor='w', foreground='green', wraplength=500, justify='left')
labelShowChosenFile.grid(sticky='w', column=0, row=13)
# Function called when Browse button is pressed
def retrieve_directory():
global metadataFileDirectory
# Call the OS's file directory window and store selected object path as a global variable
metadataFileDirectory = filedialog.askdirectory()
# Show user which directory she chose
labelShowChosenDirectory = Label(
window,
text='You chose: ' + metadataFileDirectory,
anchor='w', foreground='green',
wraplength=500, justify='left'
)
labelShowChosenDirectory.grid(sticky='w', column=0, row=17)
# Function called when Start button is pressed
def retrieve_input():
global repositoryURL
global apikey
global getAllVersionMetadata
# Record if user wants metadata from all dataset versions
getAllVersionMetadata = getAllVersionMetadata.get()
# Store what's entered in dataverseUrl text box as a global variable
repositoryURL = entryRepositoryURL.get()
# Store what's entered in the API key text box as a global variable
apikey = entryApikey.get().rstrip()
window.destroy()
# Keep window open until it's closed
mainloop()
def improved_get(_dict, path, default=None):
for key in path.split('.'):
try:
_dict = _dict[key]
except KeyError:
return default
return str(_dict)
# Save current time to append it to main folder name
currentTime = time.strftime('%Y.%m.%d_%H.%M.%S')
# Use the "Get Version" endpoint to get repository's Dataverse version (or set version as 'NA')
getInstallationVersionApiUrl = '%s/api/v1/info/version' % (repositoryURL)
response = requests.get(getInstallationVersionApiUrl)
getInstallationVersionApiData = response.json()
dataverseVersion = getInstallationVersionApiData['data']['version']
dataverseVersion = str(dataverseVersion.lstrip('v'))
# Create main directory name with current time
metadataFileDirectoryPath = str(Path(metadataFileDirectory)) + '/' + 'JSON_metadata_%s' % (currentTime)
# Create name for metadatablock files directory in main directory
metadatablockFileDirectoryPath = str(Path(metadataFileDirectory)) + '/' + 'metadatablocks_v%s' % (dataverseVersion)
# Create dataset metadata and metadatablock directories
os.mkdir(metadataFileDirectoryPath)
os.mkdir(metadatablockFileDirectoryPath)
# Download metadatablock JSON files
# Get list of the repository's metadatablock names
metadatablocksApi = '%s/api/v1/metadatablocks' % (repositoryURL)
metadatablocksApi = metadatablocksApi.replace('//api', '/api')
response = requests.get(metadatablocksApi)
data = response.json()
metadatablockNames = []
for i in data['data']:
name = i['name']
metadatablockNames.append(name)
print('Downloading %s metadatablock JSON file(s) into metadatablocks folder:' % ((len(metadatablockNames))))
for metadatablockName in metadatablockNames:
metadatablockApi = '%s/%s' % (metadatablocksApi, metadatablockName)
response = requests.get(metadatablockApi)
metadatablockFile = str(Path(metadatablockFileDirectoryPath)) + '/' '%s_v%s.json' % (metadatablockName, dataverseVersion)
with open(metadatablockFile, mode='w') as f:
f.write(json.dumps(response.json(), indent=4))
sys.stdout.write('.')
sys.stdout.flush()
print('\nFinished downloading %s metadatablock JSON file(s)' % (len(metadatablockNames)))
if getAllVersionMetadata != 1:
print('\nDownloading JSON metadata of latest published dataset versions to dataset_metadata folder:')
elif getAllVersionMetadata == 1:
print('\nDownloading JSON metadata of all published dataset versions to dataset_metadata folder:')
# Initiate count for terminal progress indicator
count = 0
datasetPIDs = []
if '.csv' in datasetPIDFile:
with open(datasetPIDFile, mode='r', encoding='utf-8') as f:
csvDictReader = DictReader(f, delimiter=',')
for row in csvDictReader:
datasetPIDs.append(row['persistent_id'].rstrip())
elif '.txt' in datasetPIDFile:
datasetPIDFile = open(datasetPIDFile)
for datasetPID in datasetPIDFile:
# Remove any trailing spaces from datasetPID
datasetPIDs.append(datasetPID.rstrip())
total = len(datasetPIDs)
for datasetPID in datasetPIDs:
try:
latestVersionUrl = '%s/api/datasets/:persistentId' % (repositoryURL)
params = {'persistentId': datasetPID}
if apikey:
params['key'] = apikey
response = requests.get(
latestVersionUrl,
params=params)
latestVersionMetadata = response.json()
# If the dataset has a database ID
if 'id' in latestVersionMetadata['data']:
persistentUrl = latestVersionMetadata['data']['persistentUrl']
publisher = latestVersionMetadata['data']['publisher']
publicationDate = improved_get(latestVersionMetadata, 'data.publicationDate')
if 'id' in latestVersionMetadata['data']:
if getAllVersionMetadata != 1:
datasetVersion = {
'status': latestVersionMetadata['status'],
'data': {
'persistentUrl': persistentUrl,
'publisher': publisher,
'publicationDate': publicationDate,
'datasetVersion': latestVersionMetadata['data']['latestVersion']}}
metadataFile = '%s.json' % (datasetPID.replace(':', '_').replace('/', '_'))
with open(os.path.join(metadataFileDirectoryPath, metadataFile), mode='w') as f:
f.write(json.dumps(datasetVersion, indent=4))
else:
allVersionUrl = '%s/api/datasets/:persistentId/versions' % (repositoryURL)
params = {'persistentId': datasetPID}
if apikey:
params['key'] = apikey
response = requests.get(
allVersionUrl,
params=params)
allVersionsMetadata = response.json()
for datasetVersion in allVersionsMetadata['data']:
datasetVersion = {
'status': latestVersionMetadata['status'],
'data': {
'persistentUrl': persistentUrl,
'publisher': publisher,
'publicationDate': publicationDate,
'datasetVersion': datasetVersion}}
majorVersion = improved_get(datasetVersion, 'data.datasetVersion.versionNumber')
minorVersion = improved_get(datasetVersion, 'data.datasetVersion.versionMinorNumber')
if (majorVersion is not None) and (minorVersion is not None):
versionNumber = majorVersion + '.' + minorVersion
metadataFile = '%s_v%s.json' % (datasetPID.replace(':', '_').replace('/', '_'), versionNumber)
else:
metadataFile = '%s_vDRAFT.json' % (datasetPID.replace(':', '_').replace('/', '_'))
with open(os.path.join(metadataFileDirectoryPath, metadataFile), mode='w') as f:
f.write(json.dumps(datasetVersion, indent=4))
# Increase count variable to track progress
count += 1
# Print progress
print('%s of %s datasets' % (count, total), end='\r', flush=True)
except Exception:
print('Could not download JSON metadata of %s' % (datasetPID))
```
|
{
"source": "jggautier/dataverse_scripts",
"score": 3
}
|
#### File: jggautier/dataverse_scripts/parse_metadatablock_json_files.py
```python
import csv
import json
import os
from pathlib import Path
# Function for getting list of non-hidden directories inside of a given directory
def listdir_nohidden(path):
directories = []
for f in os.listdir(path):
if not f.startswith('.'):
directories.append(f)
return directories
# Enter path to directory that contains the folders and files created by the get_dataset_metadata_of_all_installations.py script
main_directory = ''
# Enter path to directory to store CSV file that this script will create
csvfile_folder = ''
csvfile = str(Path(csvfile_folder + '/' + 'metadatablocks.csv'))
with open(csvfile, mode='w') as data:
data = csv.writer(data, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# Create header row
data.writerow(['installation_name_(Dataverse_version)', 'metadatablock_name', 'parentfield_name', 'subfield_name'])
count = 0
total = len(listdir_nohidden(main_directory))
# for repository_file_name in os.listdir(main_directory):
for repository_file_name in listdir_nohidden(main_directory):
count += 1
# Get the repository name
size = len(repository_file_name)
repository_name = repository_file_name[:size - 20]
print('Parsing metadatablocks: %s of %s: %s' % (count, total, repository_name))
# Open each installation folder
repository_folder_path = str(Path(main_directory + '/' + repository_file_name))
if os.path.isdir(repository_folder_path):
for sub_folder in os.listdir(repository_folder_path):
if 'metadatablocks' in sub_folder:
metadatablock_folder_path = str(Path(repository_folder_path + '/' + sub_folder))
# Open each .json file
for metadatablock_file in os.listdir(metadatablock_folder_path):
# Get only the metadatablock name from the name of each metadatablock JSON file
metadatablock_name = metadatablock_file.split('_', 1)[0]
version = metadatablock_file.split('_v', 1)[1].rstrip('.json')
# Get repository name and combine with version
repository_name_version = '%s_(%s)' % (repository_name, version)
metadatablock_file_path = str(Path(metadatablock_folder_path + '/' + metadatablock_file))
with open(metadatablock_file_path, 'r') as f: # Open file in read mode
metadatablock_data = f.read() # Copy content to dataset_metadata variable
metadatablock_data = json.loads(metadatablock_data) # Load content as a python dict
# Get the names of fields that have childfields
compoundfields = []
for parentfield in metadatablock_data['data']['fields']:
properties = metadatablock_data['data']['fields'][parentfield]
for property in properties:
if 'childFields' in properties:
field = properties[property]
compoundfields.append(field)
break
all_parent_and_child_fields = []
for parentfield in compoundfields:
if parentfield in metadatablock_data['data']['fields']:
properties = metadatablock_data['data']['fields'][parentfield]['childFields']
all_parent_and_child_fields.append(parentfield)
for subfield in properties:
all_parent_and_child_fields.append(subfield)
# Add parent and child names to the CSV file
with open(csvfile, mode='a') as data:
data = csv.writer(data, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# Write new row
data.writerow([repository_name_version, metadatablock_name, parentfield, subfield])
# Get the names of all fields
all_fields = []
for parentfield in metadatablock_data['data']['fields']:
properties = metadatablock_data['data']['fields'][parentfield]
for property in properties:
field = properties[property]
all_fields.append(field)
break
# Get names of primitives fields by removing list of compound and child fields from the list of all fields
primitive_fields = list(set(all_fields) - set(all_parent_and_child_fields))
# Add the primitive field names to the CSV file
for primitive_field in primitive_fields:
# Set subfield to an empty string so that Dataverse ingests the CSV file.
# (Dataverse's ingest process doesn't seem to like it when there is nothing entered in the fourth column)
subfield = ''
with open(csvfile, mode='a') as data:
data = csv.writer(data, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# Write new row
data.writerow([repository_name_version, metadatablock_name, primitive_field, subfield])
```
|
{
"source": "jggimi/tools",
"score": 3
}
|
#### File: se/commands/semanticate.py
```python
import argparse
import se
import se.formatting
def semanticate() -> int:
"""
Entry point for `se semanticate`
"""
parser = argparse.ArgumentParser(description="Automatically add semantics to Standard Ebooks source directories.")
parser.add_argument("-v", "--verbose", action="store_true", help="increase output verbosity")
parser.add_argument("targets", metavar="TARGET", nargs="+", help="an XHTML file, or a directory containing XHTML files")
args = parser.parse_args()
for filename in se.get_target_filenames(args.targets, (".xhtml",)):
if args.verbose:
print(f"Processing {filename} ...", end="", flush=True)
try:
with open(filename, "r+", encoding="utf-8") as file:
xhtml = file.read()
processed_xhtml = se.formatting.semanticate(xhtml)
if processed_xhtml != xhtml:
file.seek(0)
file.write(processed_xhtml)
file.truncate()
except FileNotFoundError:
se.print_error(f"Not a file: {filename}")
if args.verbose:
print(" OK")
return 0
```
#### File: tools/se/images.py
```python
from pathlib import Path
import tempfile
import regex
from PIL import Image, ImageMath
import se
import se.formatting
def _color_to_alpha(image: Image, color=None) -> Image:
"""
Implements GIMP's color to alpha algorithm.
See https://stackoverflow.com/a/1617909
GPLv3: http://bazaar.launchpad.net/~stani/phatch/trunk/annotate/head:/phatch/actions/color_to_alpha.py#L50
INPUTS
image: A PIL image to work on
color: A 4-tuple (R, G, B, A) value as the color to change to alpha
OUTPUTS
A string of XML representing the new SVG
"""
image = image.convert("RGBA")
color = list(map(float, color))
img_bands = [band.convert("F") for band in image.split()]
# Find the maximum difference rate between source and color. I had to use two
# difference functions because ImageMath.eval only evaluates the expression
# once.
alpha = ImageMath.eval(
"""float(
max(
max(
max(
difference1(red_band, cred_band),
difference1(green_band, cgreen_band)
),
difference1(blue_band, cblue_band)
),
max(
max(
difference2(red_band, cred_band),
difference2(green_band, cgreen_band)
),
difference2(blue_band, cblue_band)
)
)
)""",
difference1=lambda source, color: (source - color) / (255.0 - color),
difference2=lambda source, color: (color - source) / color,
red_band=img_bands[0],
green_band=img_bands[1],
blue_band=img_bands[2],
cred_band=color[0],
cgreen_band=color[1],
cblue_band=color[2]
)
# Calculate the new image colors after the removal of the selected color
new_bands = [
ImageMath.eval(
"convert((image - color) / alpha + color, 'L')",
image=img_bands[i],
color=color[i],
alpha=alpha
)
for i in range(3)
]
# Add the new alpha band
new_bands.append(ImageMath.eval(
"convert(alpha_band * alpha, 'L')",
alpha=alpha,
alpha_band=img_bands[3]
))
new_image = Image.merge("RGBA", new_bands)
background = Image.new("RGB", new_image.size, (0, 0, 0, 0))
background.paste(new_image.convert("RGB"), mask=new_image)
# SE addition: Lastly, convert transparent pixels to rgba(0, 0, 0, 0) so that Pillow's
# crop function can detect them.
# See https://stackoverflow.com/a/14211878
pixdata = new_image.load()
width, height = new_image.size
for image_y in range(height):
for image_x in range(width):
if pixdata[image_x, image_y] == (255, 255, 255, 0):
pixdata[image_x, image_y] = (0, 0, 0, 0)
return new_image
# Note: We can't type hint driver, because we conditionally import selenium for performance reasons
def render_mathml_to_png(driver, mathml: str, output_filename: Path) -> None:
"""
Render a string of MathML into a transparent PNG file.
INPUTS
driver: A Selenium webdriver, usually initialized from se.browser.initialize_selenium_firefox_webdriver
mathml: A string of MathML
output_filename: A filename to store PNG output to
OUTPUTS
None.
"""
with tempfile.NamedTemporaryFile(mode="w+") as mathml_file:
with tempfile.NamedTemporaryFile(mode="w+", suffix=".png") as png_file:
mathml_file.write(f"<!doctype html><html><head><meta charset=\"utf-8\"><title>MathML fragment</title></head><body>{mathml}</body></html>")
mathml_file.seek(0)
driver.get(f"file://{mathml_file.name}")
# We have to take a screenshot of the html element, because otherwise we screenshot the viewport, which would result in a truncated image
driver.find_element_by_tag_name("html").screenshot(png_file.name)
image = Image.open(png_file.name)
image = _color_to_alpha(image, (255, 255, 255, 255))
image.crop(image.getbbox()).save(output_filename)
def format_inkscape_svg(filename: Path):
"""
Clean and format SVGs created by Inkscape, which have lots of useless metadata.
INPUTS
filename: A filename of an Inkscape SVG
OUTPUTS
None.
"""
with open(filename, "r+", encoding="utf-8") as file:
svg = file.read()
# Time to clean up Inkscape's mess
svg = regex.sub(r"id=\"[^\"]+?\"", "", svg)
svg = regex.sub(r"<metadata[^>]*?>.*?</metadata>", "", svg, flags=regex.DOTALL)
svg = regex.sub(r"<defs[^>]*?/>", "", svg)
svg = regex.sub(r"xmlns:(dc|cc|rdf)=\"[^\"]*?\"", "", svg)
# Inkscape includes CSS even though we've removed font information
svg = regex.sub(r" style=\".*?\"", "", svg)
svg = se.formatting.format_xhtml(svg)
file.seek(0)
file.write(svg)
file.truncate()
def remove_image_metadata(filename: Path) -> None:
"""
Remove exif metadata from an image.
INPUTS
filename: A filename of an image
OUTPUTS
None.
"""
image = Image.open(filename)
data = list(image.getdata())
image_without_exif = Image.new(image.mode, image.size)
image_without_exif.putdata(data)
image_without_exif.save(filename, subsampling="4:4:4")
```
|
{
"source": "jggomez/demo-gcp-secret-manager",
"score": 2
}
|
#### File: jggomez/demo-gcp-secret-manager/create_secret_manager_demo.py
```python
from typing import Final
from google.cloud import secretmanager
from dotenv import load_dotenv
import json
load_dotenv()
SECRET_ID: Final = "secret1"
DECODE_FORMAT: Final = "UTF-8"
PROJECT_ID = "devhack-55d18"
def create_secret(project_id, secret_id):
parent = f"projects/{project_id}"
client = secretmanager.SecretManagerServiceClient()
client.create_secret(
request={
"parent": parent,
"secret_id": secret_id,
"secret": {"replication": {"automatic": {}}},
}
)
def add_secret_value(project_id, secret_id, payload):
client = secretmanager.SecretManagerServiceClient()
parent = client.secret_path(project_id, secret_id)
payload = payload.encode("UTF-8")
response = client.add_secret_version(
request={"parent": parent, "payload": {"data": payload}}
)
print("Added secret version: {}".format(response.name))
if __name__ == '__main__':
create_secret(PROJECT_ID, SECRET_ID)
add_secret_value(PROJECT_ID, SECRET_ID, json.dumps({
"name": "Juan", "lastname": "Gomez"}))
```
#### File: jggomez/demo-gcp-secret-manager/read_secret_manager_demo.py
```python
from typing import Final
from google.cloud import secretmanager
from dotenv import load_dotenv
load_dotenv()
SECRET_USER_ID: Final = "user"
SECRET_PASSWORD_ID: Final = "password"
DECODE_FORMAT: Final = "UTF-8"
PROJECT_ID = "devhack-55d18"
def get_name(project_id, secret_id, version):
return f"projects/{project_id}/secrets/{secret_id}/versions/{version}"
def get_secret(project_id, secret_id, version="latest"):
name = get_name(project_id, secret_id, version)
client = secretmanager.SecretManagerServiceClient()
response = client.access_secret_version(request={"name": name})
payload = response.payload.data.decode(DECODE_FORMAT)
return payload
if __name__ == '__main__':
print(get_secret(PROJECT_ID, SECRET_USER_ID))
print(get_secret(PROJECT_ID, SECRET_PASSWORD_ID))
print(get_secret(PROJECT_ID, SECRET_USER_ID, 1))
```
|
{
"source": "jggomez/Python-Reference-Microservice",
"score": 3
}
|
#### File: src/bp/get_type_games_by_user.py
```python
from dataclasses import dataclass
from data import GetAllTypeGamesRepository
from .usecase import UseCase
@dataclass
class Params:
user_id: str
class GetTypeGamesByUser(UseCase):
def __init__(self, type_games_repository: GetAllTypeGamesRepository):
self.type_games_repository = type_games_repository
def run(self, params):
return self.type_games_repository.get_type_games_by_user(params.user_id)
```
#### File: src/endpoints/type_games_endpoint.py
```python
import logging
from bp import ParamsGetAllTypeGames
from bp import ParamsGetTypeGamesByUser
from di import providers
from fastapi import APIRouter
from fastapi import Depends
from fastapi import Response
from fastapi import status
from typing_extensions import Final
CODE: Final = "code"
MESSAGE: Final = "message"
ITEMS: Final = "items"
PAGE: Final = "page"
router = APIRouter()
@router.get("/apis/typegames/1.0.0")
def get_type_games(
userid: str,
codegame: str,
page: int,
maxitems: int,
response: Response,
get_all_type_game_uc=Depends(providers.all_type_games_use_case_module),
):
try:
logging.info(userid)
resp = get_all_type_game_uc.run(ParamsGetAllTypeGames(userid))
response_list = list([item.to_json() for item in resp])
return {ITEMS: response_list, PAGE: page}
except Exception as e:
logging.error("Error in getalltypegames", exc_info=True)
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
return {CODE: 500, MESSAGE: str(e)}
@router.get("/apis/typegamesbyuser/1.0.0")
def get_type_games_by_user(
userid: str,
page: int,
maxitems: int,
response: Response,
get_type_game_by_user_uc=Depends(providers.type_games_by_user_use_case_module),
):
try:
logging.info(userid)
resp = get_type_game_by_user_uc.run(ParamsGetTypeGamesByUser(userid))
response_list = list([item.to_json() for item in resp])
return {ITEMS: response_list, PAGE: page}
except Exception as e:
logging.error("Fatal error in gettypegamesbyuser", exc_info=True)
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
return {CODE: 500, MESSAGE: str(e)}
```
#### File: endpoints/steps/get_all_type_games_endpoint_steps.py
```python
from .util.util_neo4j import UtilNeo4j
CODE = "code"
NAME = "name"
LEVEL = "level"
TYPE_GAME_1_EXPECT = "type_game_1"
LEVEL_EXPECT = 50
class ShouldGetAllTypeGameSteps:
def given(self, client, user_id, type_games):
self.user_id = user_id
self.client = client
self.type_games = type_games
self.util_neo4j = UtilNeo4j()
self.util_neo4j.create_user(user_id, user_id)
self.util_neo4j.create_type_games(self.type_games)
self.util_neo4j.create_relationship_played_user_typegame(user_id, type_games)
def when(self):
self.response = self.client.get(f"/apis/typegames/1.0.0?userid={self.user_id}")
def then(self):
assert self.response is not None
type_games = self.response.get_json()
assert CODE not in type_games
count_type_game = 0
for tp in type_games:
if tp[NAME] == TYPE_GAME_1_EXPECT:
assert int(tp[LEVEL]) == LEVEL_EXPECT
count_type_game = count_type_game + 1
assert count_type_game == 1
assert len(type_games) > 0
def teardown(self):
self.util_neo4j.delete_type_games_played_data_test(self.user_id)
```
#### File: endpoints/steps/get_type_games_by_user_endpoint_steps.py
```python
from .util.util_neo4j import UtilNeo4j
# from ..util.util_neo4j import UtilNeo4j
CODE = "code"
class ShouldGetTypeGameByUserSteps:
def given(self, client, user_id, type_games):
self.client = client
self.user_id = user_id
self.type_games = type_games
self.util_neo4j = UtilNeo4j()
self.util_neo4j.create_user(user_id, user_id)
self.util_neo4j.create_type_games(type_games)
self.util_neo4j.create_relationship_user_typegame(user_id, type_games)
def when(self):
self.response = self.client.get(
f"/apis/typegamesbyuser/1.0.0?userid={self.user_id}"
)
def then(self):
assert self.response is not None
type_games_resp = self.response.get_json()
assert CODE not in type_games_resp
assert len(type_games_resp) > 0
assert len(type_games_resp) == len(self.type_games)
def teardown(self):
self.util_neo4j.delete_type_games_playing_data_test(self.user_id)
```
#### File: tests/endpoints/type_games_test.py
```python
import pytest
from .steps.get_all_type_games_endpoint_steps import ShouldGetAllTypeGameSteps
from .steps.get_type_games_by_user_endpoint_steps import ShouldGetTypeGameByUserSteps
USER_ID_DATA_TEST = "USER_TEST_TYPE_GAMES"
TYPE_GAMES_ALL_DATA_TEST = [
{
"name": "type_game_1",
"code": "PSICO",
"hasmedals": False,
"type": "ARCADE1",
"url_background": "image1.png",
"points": 20,
},
{
"name": "type_game_2",
"code": "ING",
"hasmedals": True,
"type": "ARCADE2",
"url_background": "image2.png",
"points": 30,
},
{
"name": "type_game_3",
"code": "ARQ",
"hasmedals": False,
"type": "ARCADE3",
"url_background": "image3.png",
"points": 10,
},
{
"name": "type_game_1",
"code": "PSICO",
"hasmedals": False,
"type": "ARCADE1",
"url_background": "image1.png",
"points": 50,
},
]
TYPE_GAMES_BY_USER_DATA_TEST = [
{
"name": "type_game_1",
"code": "PSICO",
"hasmedals": False,
"type": "ARCADE1",
"url_background": "image1.png",
"points": 20,
},
{
"name": "type_game_2",
"code": "ING",
"hasmedals": True,
"type": "ARCADE2",
"url_background": "image2.png",
"points": 30,
},
{
"name": "type_game_3",
"code": "ARQ",
"hasmedals": False,
"type": "ARCADE3",
"url_background": "image3.png",
"points": 10,
},
]
@pytest.mark.parametrize(
"user_id, type_games", [(USER_ID_DATA_TEST, TYPE_GAMES_ALL_DATA_TEST)]
)
def test_should_get_all_type_games_endpoint(client, user_id, type_games):
test = ShouldGetAllTypeGameSteps()
try:
test.given(client, user_id, type_games)
test.when()
test.then()
finally:
test.teardown()
@pytest.mark.parametrize(
"user_id, type_games", [(USER_ID_DATA_TEST, TYPE_GAMES_BY_USER_DATA_TEST)]
)
def test_should_get_type_games_by_user_endpoint(client, user_id, type_games):
test = ShouldGetTypeGameByUserSteps()
try:
test.given(client, user_id, type_games)
test.when()
test.then()
finally:
test.teardown()
```
|
{
"source": "jgh9094/Benchmarks",
"score": 2
}
|
#### File: Pilot3/JGH/distiller-2l.py
```python
import numpy as np
import argparse
import os
import pandas as pd
# keras python inputs
from keras.models import Model
from keras.layers import Input,Embedding,Dropout,Dense,GlobalMaxPooling1D,Conv1D,Lambda,Activation,concatenate
from keras.callbacks import EarlyStopping
from keras.utils import to_categorical
from keras.losses import categorical_crossentropy as logloss
from keras import backend as K
from keras import initializers
from keras.metrics import categorical_accuracy
from keras.layers.merge import Concatenate
from sklearn.metrics import f1_score
# summit specific imports
from loaddata6reg import loadAllTasks
from mpi4py import MPI
# global variables
COMM = MPI.COMM_WORLD
RANK = COMM.Get_rank()
SIZE = COMM.size
EPOCHS = 100
CLASS = [4,639,7,70,326]
TEMP = 0
# return configuration for the experiment
def GetModelConfig(config):
# testing configuration
if config == 0:
return {
'learning_rate': 0.01,
'batch_size': 256,
'batch_size': 5,
'dropout': 0.5,
'optimizer': 'adam',
'wv_len': 300,
'emb_l2': 0.001,
'in_seq_len': 1500,
'filter_sizes': [3,4,5],
'num_filters': [300,300,300],
'alpha': 0.07,
'temp': [0.1,0.2,0.5,0.7,0.9,1.0,1.1,1.2,1.5,1.7,1.9,2.0]
}
else:
print('MODEL CONFIGURATION DOES NOT EXIST')
exit(-1)
# first 1/2 are hard labels, second 1/2 are softmax outputs
# alpha: constant for hard label error (should be small according to lit review)
def kd_loss(y_true,y_pred,alpha,temp,split):
# ground truth and teacher softmax
y_true_hl, y_true_sl = y_true[: , :split], y_true[: , split:]
# student softmax and logits/temp
s_softmax, s_logits = y_pred[: , :split], y_pred[: , split:]
# student raw logtis tranformed/temp: distallation loss
s_logits_sl = K.softmax(s_logits/temp)
# hard loss
hl = alpha * logloss(y_true_hl,s_softmax,from_logits=False)
# distillation loss
dl = (1.0 - alpha) * logloss(y_true_sl,s_logits_sl,from_logits=False)
return hl + dl
# softmax/temperture output transformer
def softmax(x,t):
ex = np.exp(x/t)
tot = np.sum(ex)
return np.array(ex / tot)
# concatenate the data
# 1st half: hard lables
# 2nd half: teacher temperature softmax
def ConcatData(y,yv,teach, temp):
print('CONCAT DATA:', flush= True)
Y,YV = [],[]
# iterate through the number of classes in the training data
for i in range(len(CLASS)):
print(str(i))
Y.append([])
# get training dir
yt = np.load(teach + 'training-task-' + str(i) + '.npy')
# make sure same lengths
if yt.shape[0] != y[i].shape[0] or yt.shape[1] != y[i].shape[1]:
print('NOT MATHCING DIMENSIONS: TRAINING')
exit(-1)
# concatenate + transform the teacher data the output data
for j in range(yt.shape[0]):
Y[i].append(np.concatenate((y[i][j], softmax(yt[j], temp))))
# make a numpy array
Y[i] = np.array(Y[i])
# get validation dir
YV.append([])
yvt = np.load(teach + 'validating-task-' + str(i) + '.npy')
# make sure same lengths
if yvt.shape[0] != yv[i].shape[0] or yvt.shape[1] != yv[i].shape[1]:
print('NOT MATHCING DIMENSIONS: VALIDATING')
exit(-1)
# concatenate + transform the teacher data the output data
for j in range(yvt.shape[0]):
YV[i].append(np.concatenate((y[i][j], softmax(yvt[j], temp))))
YV[i] = np.array(YV[i])
print('Training Output Data', flush= True)
i = 0
for y in Y:
print('task', i, flush= True)
print('--cases:', len(y), flush= True)
print('--classes:',len(y[0]), flush= True)
i += 1
print()
print('Validation Output Data', flush= True)
i = 0
for y in YV:
print('task', i, flush= True)
print('--cases:', len(y), flush= True)
print('--classes:',len(y[0]), flush= True)
i += 1
print()
return Y,YV
# transform y data to_categorical
# [0,1,0,1,...]
def Transform(rawY,rawYV):
print('TRANSFORM', flush= True)
# create array for each task output
# create array for each task output
y = [[] for i in range(rawY.shape[1])]
yv = [[] for i in range(rawY.shape[1])]
# load data into appropiate list
for t in range(rawY.shape[1]):
y[t] = rawY[:,t]
yv[t] = rawYV[:,t]
# make to catagorical data and pack up
Y,YV = [],[]
for i in range(len(y)):
Y.append(to_categorical(y[i], num_classes=CLASS[i]))
for i in range(len(yv)):
YV.append(to_categorical(yv[i], num_classes=CLASS[i]))
print('Training Output Data', flush= True)
i = 0
for y in Y:
print('task', i, flush= True)
print('--cases:', len(y), flush= True)
print('--classes:',len(y[0]), flush= True)
i += 1
print()
print('Validation Output Data', flush= True)
i = 0
for y in YV:
print('task', i, flush= True)
print('--cases:', len(y), flush= True)
print('--classes:',len(y[0]), flush= True)
i += 1
print()
return Y,YV
# will return a mt-cnn with a certain configuration
def CreateMTCnn(num_classes,vocab_size,cfg):
# define network layers ----------------------------------------------------
input_shape = tuple([cfg['in_seq_len']])
model_input = Input(shape=input_shape, name= "Input")
# embedding lookup
emb_lookup = Embedding(vocab_size, cfg['wv_len'], input_length=cfg['in_seq_len'],
embeddings_initializer= initializers.RandomUniform( minval= 0, maxval= 0.01 ),
name="embedding")(model_input)
# convolutional layer and dropout
conv_blocks = []
for ith_filter,sz in enumerate(cfg['filter_sizes']):
conv = Conv1D(filters=cfg['num_filters'][ ith_filter ], kernel_size=sz, padding="same",
activation="relu", strides=1, name=str(ith_filter) + "_thfilter")(emb_lookup)
conv_blocks.append(GlobalMaxPooling1D()(conv))
concat = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
concat_drop = Dropout(cfg['dropout'])(concat)
# different dense layer per tasks
FC_models = []
for i in range(len(num_classes)):
dense = Dense(num_classes[i], name= "Dense"+str(i), )( concat_drop )
act = Activation('softmax', name= "Active"+str(i))(dense)
FC_models.append(act)
# the multitsk model
model = Model(inputs=model_input, outputs = FC_models)
model.compile( loss= "categorical_crossentropy", optimizer= cfg['optimizer'], metrics=[ "acc" ] )
return model
def main():
print('************************************************************************************', flush= True)
# generate and get arguments
parser = argparse.ArgumentParser(description='Process arguments for model training.')
parser.add_argument('tech_dir', type=str, help='Where is the teacher data located?')
parser.add_argument('dump_dir', type=str, help='Where are we dumping the output?')
parser.add_argument('config', type=int, help='What model config are we using?')
# Parse all the arguments & set random seed
args = parser.parse_args()
# RANK = 0
seed = int(RANK)
print('Seed:', seed, end='\n\n')
np.random.seed(seed)
# check that dump directory exists
if not os.path.isdir(args.tech_dir):
print('TEACHER DIRECTORY DOES NOT EXIST')
exit(-1)
# check that dump directory exists
if not os.path.isdir(args.dump_dir):
print('DUMP DIRECTORY DOES NOT EXIST')
exit(-1)
# Step 1: Get experiment configurations
config = GetModelConfig(args.config)
print('run parameters:', config, flush= True)
global TEMP
TEMP = config['temp'][seed]
print('TEMP:', TEMP, flush= True)
# Step 2: Create training/testing data for models
X, XV, XT, Y, YV, YT = loadAllTasks(print_shapes = False)
Y,YV = Transform(Y,YV)
Y,YV = ConcatData(Y,YV, args.tech_dir, TEMP)
print('DATA LOADED AND READY TO GO', flush= True)
# Step 3: Create the studen mtcnn model
mtcnn = CreateMTCnn(CLASS, max(np.max(X),np.max(XV)) + 1,config)
print('MODEL CREATED\n', flush= True)
#Step 4: Create knowledge distilled student topology
# remove the last activation layers
for i in range(len(CLASS)):
mtcnn.layers.pop()
# add new distlled layers
new_out = []
for i in range(len(CLASS)):
logits = mtcnn.get_layer('Dense'+str(i)).output
probs = Activation('softmax', name='Logits'+str(i))(logits)
# softed probabilities at raised temperature
logits_T = Lambda(lambda x: x, name='lambda'+str(i))(logits)
# probs_T = Activation('softmax', name='TLogits'+str(i))(logits_T)
# output layer: softmax from dense & raw logits
output = concatenate([probs, logits_T], name="Active"+str(i))
new_out.append(output)
# mtcnn distillation model ready to go!
mtcnn = Model(mtcnn.input, new_out)
mtcnn.summary()
print('MODEL READJUSTED FOR DISTILLATION\n')
# student softmax(raw_logits) and hard labels
def acc(y_true, y_pred, split):
y_true = y_true[:, :split]
y_pred = y_pred[:, split:]
y_pred = K.softmax(y_pred)
return categorical_accuracy(y_true, y_pred)
# student softmax(raw_logits) and hard labels
def categorical_crossentropy(y_true, y_pred, split):
y_true = y_true[:, :split]
y_pred = y_pred[:, split:]
return logloss(y_true, y_pred, from_logits=True)
# student softmax(raw_logits) and hard labels
def soft_logloss(y_true, y_pred, split,temp):
y_true = y_true[:, split:]
y_pred = y_pred[:, split:]
y_soft = y_pred/temp
return logloss(y_true, y_soft, from_logits=True)
# create loss dictionary for each task
losses = {}
# task 0
l0 = lambda y_true, y_pred: kd_loss(y_true,y_pred,config['alpha'], TEMP,CLASS[0])
l0.__name__ = 'kdl'+str(0)
losses['Active'+str(0)] = l0
# task 1
l1 = lambda y_true, y_pred: kd_loss(y_true,y_pred,config['alpha'], TEMP,CLASS[1])
l1.__name__ = 'kdl'+str(1)
losses['Active'+str(1)] = l1
# task 2
l2 = lambda y_true, y_pred: kd_loss(y_true,y_pred,config['alpha'], TEMP,CLASS[2])
l2.__name__ = 'kdl'+str(2)
losses['Active'+str(2)] = l2
# task 3
l3 = lambda y_true, y_pred: kd_loss(y_true,y_pred,config['alpha'], TEMP,CLASS[3])
l3.__name__ = 'kdl'+str(3)
losses['Active'+str(3)] = l3
# create metric dictionary per task
metrics = {}
# task 0
metrics['Active'+str(0)] = []
l01 = lambda y_true, y_pred: acc(y_true,y_pred,CLASS[0])
l01.__name__ = 'acc'+str(0)
metrics['Active'+str(0)].append(l01)
l02 = lambda y_true, y_pred: categorical_crossentropy(y_true,y_pred,CLASS[0])
l02.__name__ = 'cc'+str(0)
metrics['Active'+str(0)].append(l02)
l03 = lambda y_true, y_pred: soft_logloss(y_true,y_pred,CLASS[0],TEMP)
l03.__name__ = 'sl'+str(0)
metrics['Active'+str(0)].append(l03)
# task 1
metrics['Active'+str(1)] = []
l11 = lambda y_true, y_pred: acc(y_true,y_pred,CLASS[1])
l11.__name__ = 'acc'+str(1)
metrics['Active'+str(1)].append(l11)
l12 = lambda y_true, y_pred: categorical_crossentropy(y_true,y_pred,CLASS[1])
l12.__name__ = 'cc'+str(1)
metrics['Active'+str(1)].append(l12)
l13 = lambda y_true, y_pred: soft_logloss(y_true,y_pred,CLASS[1],TEMP)
l13.__name__ = 'sl'+str(1)
metrics['Active'+str(1)].append(l13)
# task 2
metrics['Active'+str(2)] = []
l21 = lambda y_true, y_pred: acc(y_true,y_pred,CLASS[2])
l21.__name__ = 'acc'+str(2)
metrics['Active'+str(2)].append(l21)
l22 = lambda y_true, y_pred: categorical_crossentropy(y_true,y_pred,CLASS[2])
l22.__name__ = 'cc'+str(2)
metrics['Active'+str(2)].append(l22)
l23 = lambda y_true, y_pred: soft_logloss(y_true,y_pred,CLASS[2],TEMP)
l23.__name__ = 'sl'+str(2)
metrics['Active'+str(2)].append(l23)
# task 3
metrics['Active'+str(3)] = []
l31 = lambda y_true, y_pred: acc(y_true,y_pred,CLASS[3])
l31.__name__ = 'acc'+str(3)
metrics['Active'+str(3)].append(l31)
l32 = lambda y_true, y_pred: categorical_crossentropy(y_true,y_pred,CLASS[3])
l32.__name__ = 'cc'+str(3)
metrics['Active'+str(3)].append(l32)
l33 = lambda y_true, y_pred: soft_logloss(y_true,y_pred,CLASS[3],TEMP)
l33.__name__ = 'sl'+str(3)
metrics['Active'+str(3)].append(l33)
print('METRICS CREATED', flush= True)
# create validation data dictionary
val_dict = {}
for i in range(len(CLASS)):
layer = 'Active' + str(i)
val_dict[layer] = YV[i]
print('VAL-DICT CREATED', flush= True)
mtcnn.compile(optimizer='adam', loss=losses, metrics=metrics)
print('MODEL COMPILED FOR DISTILLATION\n', flush= True)
hist = mtcnn.fit(X, Y,
batch_size=config['batch_size'],
epochs=EPOCHS,
verbose=2,
validation_data=({'Input': XV}, val_dict),
callbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=0, mode='auto', restore_best_weights=True)])
# Step 5: Save everything
# create directory to dump all data related to model
fdir = args.dump_dir + 'MTDistilled2L-' + str(args.config) + '-' + str(RANK) + '/'
os.mkdir(fdir)
micMac = []
data_path = fdir + "MicMacTest_R" + str(RANK) + ".csv"
# convert the history.history dict to a pandas DataFrame:
hist_df = pd.DataFrame(hist.history)
hist_df.to_csv(path_or_buf= fdir + 'history.csv', index=False)
print('History Saved!', flush= True)
# save model
mtcnn.save(fdir + 'model.h5')
print('Model Saved!', flush= True)
# get the softmax values of the our predictions from raw logits
predT = mtcnn.predict(XT)
# use only the first half of the output vector: those are predictions
pred = [[] for x in range(len(CLASS))]
# get the softmax values of the our predictions from raw logits
for i in range(len(predT)):
for j in range(len(predT[i])):
pred[i].append(softmax(predT[i][j][CLASS[i]:],1.0))
pred = [np.array(x) for x in pred]
for t in range(len(CLASS)):
preds = np.argmax(pred[t], axis=1)
micro = f1_score(YT[:,t], preds, average='micro')
macro = f1_score(YT[:,t], preds, average='macro')
micMac.append(micro)
micMac.append(macro)
data = np.zeros(shape=(1, 10))
data = np.vstack((data, micMac))
df0 = pd.DataFrame(data,
columns=['Beh_Mic', 'Beh_Mac', 'His_Mic', 'His_Mac', 'Lat_Mic', 'Lat_Mac', 'Site_Mic',
'Site_Mac', 'Subs_Mic', 'Subs_Mac'])
df0.to_csv(data_path)
print('MIC-MAC SCORES SAVED', flush= True)
# save model output
print('Saving Training Softmax Output', flush= True)
for i in range(len(predT)):
print('task:',str(i))
print('--Number of data points: ', len(predT[i]), flush= True)
print('--Size of each data point', len(predT[i][0]), flush= True)
fname = fdir + 'testing-task-' + str(i) + '.npy'
np.save(fname, predT[i])
print()
if __name__ == '__main__':
main()
```
#### File: Pilot3/JGH/mtcnn-model.py
```python
import numpy as np
from matplotlib import pyplot as plt
import argparse
import os
import pandas as pd
# keras python inputs
from keras.models import Model
from keras.layers import Input, Embedding, Dense, Dropout
from keras.regularizers import l2
from keras.layers import GlobalMaxPooling1D, Convolution1D
from keras.utils import plot_model
from keras.callbacks import EarlyStopping
from keras.utils import to_categorical
from keras.layers.merge import Concatenate
from keras import initializers
from keras.losses import CategoricalCrossentropy
from sklearn.metrics import f1_score
from loaddata6reg import loadAllTasks
from mpi4py import MPI
# global variables
COMM = MPI.COMM_WORLD
RANK = COMM.Get_rank()
SIZE = COMM.size #Node count. size-1 = max rank.
# EXPECTED CLASSES FOR EACH TASK, MUST UPDATE
CLASS = [4,639,7,70,326]
EPOCHS = 100
# return configuration for the experiment
def GetModelConfig(config):
# testing configuration
if config == 0:
return {
'learning_rate': 0.01,
'batch_size': 256,
'dropout': 0.5,
'optimizer': 'adam',
'wv_len': 300,
'emb_l2': 0.001,
'in_seq_len': 1500,
'filter_sizes': [3,4,5],
'num_filters': [300,300,300],
}
else:
print('MODEL CONFIGURATION DOES NOT EXIST', flush= True)
exit(-1)
# softmax output transformer
def softmax(x):
ex = np.exp(x)
tot = np.sum(ex)
return ex / tot
# transform data and return number of classes
def TransformData(rawX, rawXV, rawXT, rawY, rawYV, rawYT):
# raw data descriptions
print('RAW DATA DIMS', flush= True)
print('rawX dim: ', rawX.shape, flush= True)
print('rawY dim: ', rawY.shape, flush= True)
print('rawXV dim: ', rawXV.shape, flush= True)
print('rawYV dim: ', rawYV.shape, flush= True)
print('rawXT dim: ', rawXT.shape, flush= True)
print('rawYT dim: ', rawYT.shape , end='\n\n', flush= True)
# make sure number of tasks between data sets is consistent
if rawY.shape[1] != rawYT.shape[1] or rawYT.shape[1] != rawYV.shape[1]:
print('NUMBER OF TASKS NOT THE SAME BETWEEN DATA SETS', flush= True)
exit(-1)
# create array for each task output
y = [[] for i in range(rawY.shape[1])]
yv = [[] for i in range(rawY.shape[1])]
yt = [[] for i in range(rawY.shape[1])]
# load data into appropiate list
for t in range(rawY.shape[1]):
y[t] = rawY[:,t]
yv[t] = rawYV[:,t]
yt[t] = rawYT[:,t]
# make to catagorical data and pack up
Y,YV,YT = [],[],[]
for i in range(len(y)):
Y.append(to_categorical(y[i], num_classes=CLASS[i]))
for i in range(len(yv)):
YV.append(to_categorical(yv[i], num_classes=CLASS[i]))
for i in range(len(yt)):
YT.append(to_categorical(yt[i], num_classes=CLASS[i]))
print('Training Output Data', flush= True)
i = 0
for y in Y:
print('task', i, flush= True)
print('--cases:', len(y), flush= True)
print('--classes:',len(y[0]), flush= True)
i += 1
print()
print('Validation Output Data', flush= True)
i = 0
for y in YV:
print('task', i, flush= True)
print('--cases:', len(y), flush= True)
print('--classes:',len(y[0]), flush= True)
i += 1
print()
print('Testing Output Data', flush= True)
i = 0
for y in YT:
print('task', i, flush= True)
print('--cases:', len(y), flush= True)
print('--classes:',len(y[0]), flush= True)
i += 1
print()
# number of classes per task
classes = []
for y in Y:
classes.append(len(y[0]))
return np.array(rawX),np.array(rawXV),np.array(rawXT),Y,YV,YT,classes
# will return a mt-cnn with a certain configuration
def CreateMTCnn(num_classes,vocab_size,cfg):
# define network layers ----------------------------------------------------
input_shape = tuple([cfg['in_seq_len']])
model_input = Input(shape=input_shape, name= "Input")
# embedding lookup
emb_lookup = Embedding(vocab_size, cfg['wv_len'], input_length=cfg['in_seq_len'],
embeddings_initializer= initializers.RandomUniform( minval= 0, maxval= 0.01 ),
name="embedding")(model_input)
# convolutional layer and dropout
conv_blocks = []
for ith_filter,sz in enumerate(cfg['filter_sizes']):
conv = Convolution1D(filters=cfg['num_filters'][ ith_filter ], kernel_size=sz, padding="same",
activation="relu", strides=1, name=str(ith_filter) + "_thfilter")(emb_lookup)
conv_blocks.append(GlobalMaxPooling1D()(conv))
concat = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
concat_drop = Dropout(cfg['dropout'])(concat)
# different dense layer per tasks
FC_models = []
for i in range(len(num_classes)):
outlayer = Dense(num_classes[i], name= "Dense"+str(i))( concat_drop )
FC_models.append(outlayer)
# the multitsk model
model = Model(inputs=model_input, outputs = FC_models)
model.compile( loss= CategoricalCrossentropy(from_logits=True), optimizer= cfg['optimizer'], metrics=[ "acc" ] )
return model
def main():
# generate and get arguments
parser = argparse.ArgumentParser(description='Process arguments for model training.')
parser.add_argument('dump_dir', type=str, help='Where are we dumping the output?')
parser.add_argument('config', type=int, help='What model config are we using?')
parser.add_argument('prop', type=float, help='proportion of testcases being used')
# parse all the argument
args = parser.parse_args()
# set the seed to the rank
seed = int(RANK)
print('Seed:', seed)
np.random.seed(seed)
# Step 1: Get experiment configurations
print('Config Using:', args.config)
config = GetModelConfig(args.config)
print('run parameters:', config, end='\n\n', flush= True)
# save the dump directory
dump_dir = args.dump_dir
# check that dump directory exists
# we expect the dump dir to be made already
print('DUMP Directory:', dump_dir)
if not os.path.isdir(dump_dir):
print('DUMP DIRECTORY DOES NOT EXIST', flush= True)
exit(-1)
# Step 2: Create training/testing data for models
X, XV, XT, Y, YV, YT= loadAllTasks(print_shapes = False)
# check that prop is between (0,1]
if args.prop <= 0 or args.prop > 1:
print('PROP NOT IN CORRECT RANGE')
exit(-1)
# Take the proportion of test cases
print('PROP:', args.prop)
propX = int(args.prop * len(X))
propXV = int(args.prop * len(XV))
propXT = int(args.prop * len(XT))
propY = int(args.prop * len(Y))
propYV = int(args.prop * len(YV))
propYT = int(args.prop * len(YT))
# subset the data set
X = X[0:propX]
XV = XV[0:propXV]
XT = XT[0:propXT]
Y = Y[0:propY]
YV = YV[0:propYV]
YT = YT[0:propYT]
X, XV, XT, Y, YV, YT, classes = TransformData(X, XV, XT, Y, YV, YT)
# Step 3: Create the mtcnn model
mtcnn = CreateMTCnn(classes, max(np.max(X),np.max(XV)) + 1,config)
print( mtcnn.summary() )
# Step 4: Train mtcnn model
# create validation data dictionary
val_dict = {}
for i in range(len(YV)):
layer = 'Dense' + str(i)
val_dict[layer] = YV[i]
hist = mtcnn.fit(x= X, y= Y, batch_size= config['batch_size'],
epochs= EPOCHS, verbose= 2, validation_data= ({'Input': XV}, val_dict),
callbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto', restore_best_weights=True)]
)
# create directory to dump all data related to model
fdir = dump_dir + 'MTModel-' + str(args.config) + "_Rank-" + str(RANK) +'/'
if not os.path.exists(fdir):
os.makedirs(fdir)
# save predictions from all data inputs
pred = mtcnn.predict(X)
predV = mtcnn.predict(XV)
predT = mtcnn.predict(XT)
print('Saving Training Softmax Output', flush= True)
for i in range(len(pred)):
print('task:',str(i))
print('--Number of data points: ', len(pred[i]), flush= True)
print('--Size of each data point', len(pred[i][0]), flush= True)
fname = fdir + 'training-task-' + str(i) + '.npy'
np.save(fname, pred[i])
print()
print('Saving Validation Softmax Output', flush= True)
for i in range(len(predV)):
print('task:',str(i), flush= True)
print('--Number of data points: ', len(predV[i]), flush= True)
print('--Size of each data point', len(predV[i][0]), flush= True)
fname = fdir + 'validating-task-' + str(i) + '.npy'
np.save(fname, predV[i])
print()
print('Saving Testing Softmax Output', flush= True)
for i in range(len(predT)):
print('task:',str(i))
print('--Number of data points: ', len(predT[i]), flush= True)
print('--Size of each data point', len(predT[i][0]), flush= True)
fname = fdir + 'testing-task-' + str(i) + '.npy'
np.save(fname, predT[i])
print()
#predT has this shape: [numTasks, numSamples, numLabelsInTask]
'''
Save final micro/macro:
'''
micMac = []
data_path = fdir + "MicMacTest_R" + str(RANK) + ".csv"
X, XV, XT, Y, YV, YT= loadAllTasks(print_shapes = False)
# subset the data set
X = X[0:propX]
XV = XV[0:propXV]
XT = XT[0:propXT]
Y = Y[0:propY]
YV = YV[0:propYV]
YT = YT[0:propYT]
# get the softmax values of the our predictions from raw logits
for i in range(len(predT)):
for j in range(len(predT[i])):
predT[i][j] = softmax(predT[i][j])
for t in range(5):
preds = np.argmax(predT[t], axis=1)
micro = f1_score(YT[:,t], preds, average='micro')
macro = f1_score(YT[:,t], preds, average='macro')
micMac.append(micro)
micMac.append(macro)
data = np.zeros(shape=(1, 10))
data = np.vstack((data, micMac))
df0 = pd.DataFrame(data,
columns=['Beh_Mic', 'Beh_Mac', 'His_Mic', 'His_Mac', 'Lat_Mic', 'Lat_Mac', 'Site_Mic',
'Site_Mac', 'Subs_Mic', 'Subs_Mac'])
df0.to_csv(data_path)
# convert the history.history dict to a pandas DataFrame:
hist_df = pd.DataFrame(hist.history)
hist_df.to_csv(path_or_buf= fdir + 'history.csv', index=False)
print('History Saved!', flush= True)
# save model
mtcnn.save(fdir + 'model.h5')
print('Model Saved!', flush= True)
if __name__ == '__main__':
main()
```
|
{
"source": "jgh9094/Knowlege-Distillation-P3B3",
"score": 3
}
|
#### File: CompressSM/Aggregation/stack-splits.py
```python
import numpy as np
import argparse
import pickle as pk
import psutil
import os
# Get total number of files there should be for each data type
def GetTotal(c):
# training data
if c == 0:
return 160
# testing data
elif c == 1:
return 20
# validating data
elif c == 2:
return 18
else:
print('UNKNOWN TOTAL')
exit(-1)
# Get file naming for each data type
def GetDataType(c,t):
# training data
if c == 0:
return 'training-task-' + str(t) + '-rank-'
# testing data
elif c == 1:
return 'testing-task-' + str(t) + '-rank-'
# validating data
elif c == 2:
return 'validating-task-' + str(t) + '-rank-'
else:
print('UNKNOWN TOTAL')
exit(-1)
# will look at all directories in data dir and sample a set of them
def GetDataFiles(dir,fn,n):
dirs = [dir + fn + str(i) + '.npy' for i in range(n)]
print('DIRS EXPLORING:', flush= True)
for d in dirs:
print(d, flush= True)
print(flush= True)
return dirs
# will return string associated with data type
def GetData(c):
# training data
if c == 0:
return 'training'
# testing data
elif c == 1:
return 'testing'
# validating data
elif c == 2:
return 'validating'
else:
print('UNKNOWN TOTAL')
exit(-1)
# will look through all dirs and average out their data (testing, training, validate)
def ConcatData(files,dump,data,task):
print('PROCESSING FILES', flush=True)
# will hold all data to stack
hold = []
for f in files:
print('processing:', f, flush= True)
X = np.load(file=f)
hold.append(X)
mat = np.concatenate(hold)
print('mat.shape', mat.shape)
np.save(dump + data + '-task-' + str(task) +'.npy', mat)
print('finished saving:', dump + data + '-task-' + str(task) +'.npy', flush= True)
print(flush= True)
def main():
# generate and get arguments
parser = argparse.ArgumentParser(description='Process arguments for model training.')
parser.add_argument('data_dir', type=str, help='Where are we dumping the output?')
parser.add_argument('dump_dir', type=str, help='Where are we dumping the output?')
parser.add_argument('task', type=int, help='What task are we looking for')
parser.add_argument('data_type', type=int, help='0: training, 1: testing, 2: validating')
# parse all the argument
args = parser.parse_args()
# what are the inputs
print('data_dir:', args.data_dir, flush= True)
print('dump_dir:', args.dump_dir, flush= True)
print('task:', args.task, flush= True)
print('data_type:', args.data_type, flush= True)
# Step 1: Get data directories we are exploring
fn = GetDataType(args.data_type, args.task)
n = GetTotal(args.data_type)
files = GetDataFiles(args.data_dir.strip(),fn,n)
# Step 2: Concatenate the data
data = GetData(args.data_type)
ConcatData(files, args.dump_dir, data,args.task)
if __name__ == '__main__':
main()
```
#### File: CompressSM/MLP/mlp-sm.py
```python
import numpy as np
import argparse
import pickle as pk
import psutil
import os
# keras python inputs
from keras.models import Model
from keras.layers import Input, Dense
from keras.regularizers import l2
from keras.callbacks import EarlyStopping
from keras.utils import to_categorical
from keras.losses import CategoricalCrossentropy
# OLCF imports
from mpi4py import MPI
# global variables
COMM = MPI.COMM_WORLD
RANK = COMM.Get_rank()
SIZE = COMM.size #Node count. size-1 = max rank.
EPOCHS = 100
YLAB = '/gpfs/alpine/world-shared/med106/yoonh/storageFolder/HardLabels/'
CLASS = [4,639,7,70,326]
# will look at all directories in data dir and sample a set of them
def GetDataDirs(dir,p):
# store the directories we are lookin in and dimensions of softmax
dirs = filter(os.path.isdir, [os.path.join(dir, o) for o in os.listdir(dir)])
dirs = [dir + '/' for dir in dirs]
sub = int(p * len(dirs))
dirs = np.sort(np.random.choice(dirs, sub, replace=False))
print('NUMBER OF DIRS:', len(dirs), flush=True)
print('DIRS EXPLORING:', flush= True)
for d in dirs:
print(d, flush= True)
print('', flush= True)
return dirs
# concatenate all data into on sinlge matrix
def AggregateData(dirs,task,data):
# get training data
print('COLLECTING',data.upper(),'DATA...', flush= True)
# check that dimenstions are the same
x,y = [],[]
# go through all files and check the dimensions
print('CHECKING DATA DIMENSIONS...', flush= True)
for dir in dirs:
X = np.load(file=dir + data +'-task-' + str(task) + '.npy')
# store dimensions
x.append(X.shape[0])
y.append(X.shape[1])
del X
# make sure that dimensions match for all data
if 1 < len(set(x)) or 1 < len(set(y)):
print('TRAINING DATA DIMS NOT EQUAL', flush= True)
exit(-1)
else:
print('DATA DIMENSIONS MATCH!', flush= True)
# matrix that will
mat = [ np.array([]) for i in range(x[0])]
del x,
for dir in dirs:
# go through all the dirs
print('processing:', dir + data +'-task-' + str(task) + '.npy', flush= True)
X = np.load(file=dir + data +'-task-' + str(task) + '.npy')
# go through all the data points and create a new data matrix
for i in range(len(X)):
mat[i] = np.concatenate((mat[i], X[i]), axis=None)
del X
print('FINISHED GOING THROUGH ALL DIRS', flush= True)
mat = np.array(mat)
print('mat.shape:',mat.shape, flush= True)
# memory checks
print('memory:',psutil.virtual_memory(), flush= True)
print('', flush= True)
return mat
# get a specific row of y labels
def GetYLabs(dir,task,name):
print('GETTING Y LABELS FOR', name.upper(), flush= True)
file = open(dir + name, 'rb')
ylab = pk.load(file)
file.close
# for testing purposes [0:20000]
ylab = ylab[:,task]
Y = []
for i in range(len(ylab)):
Y.append(to_categorical(ylab[i], num_classes=CLASS[task]))
print('', flush= True)
return np.array(Y)
def GetMLP(x,y):
# set input layer, assuming that all input will have same shape as starting case
input = Input(shape=([x]), name= "Input")
hidden = Dense(x, activation='relu')(input)
# add dropout layer (.25)
output = Dense(y)(hidden)
# link, compile, and fit model
mlp = Model(inputs=input, outputs = output)
mlp.compile(loss= CategoricalCrossentropy(from_logits=True), optimizer= 'adam', metrics=[ "acc" ] )
return mlp
def main():
# generate and get arguments
parser = argparse.ArgumentParser(description='Process arguments for model training.')
parser.add_argument('data_dir', type=str, help='Where are we dumping the output?')
parser.add_argument('dump_dir', type=str, help='Where are we dumping the output?')
parser.add_argument('proportion', type=float, help='What model config was used')
parser.add_argument('offset', type=int, help='Seed offset for rng')
# RANK is synonomous with the task task being evaluated
# RANK = 0 # used for example right now
task = int(RANK)
print('task:', task,flush=True)
# parse all the argument
args = parser.parse_args()
# set seed for rng
seed = int(task+args.offset)
print('RANDOM SEED:', seed,flush=True)
np.random.seed(seed)
# Step 1: Get data directories we are exploring
dirs = GetDataDirs(args.data_dir.strip(),args.proportion)
# Step 2: Get all data and transform it into one matrix
X = AggregateData(dirs,RANK,'training')
XV = AggregateData(dirs,RANK,'validating')
Y = GetYLabs(YLAB, RANK, 'train_y.pickle')
YV = GetYLabs(YLAB, RANK, 'val_y.pickle')
print('DATA RETURNED',flush=True)
print('X.shape:', X.shape,flush=True)
print('XV.shape:', XV.shape,flush=True)
print('Y.shape:', Y.shape,flush=True)
print('YV.shape:', YV.shape,flush=True)
# Step 3: Create the MLP
mlp = GetMLP(int(X.shape[1]), int(Y.shape[1]))
print('MULTI-LAYERED PERCEPTRON CREATED', flush=True)
# Step 3: Train Model
history = mlp.fit(X,Y, batch_size=256,epochs=EPOCHS, verbose=2, validation_data=(XV,YV),
callbacks=[EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto', restore_best_weights=True)])
# Step 4: Save softmax outputs
# save predictions from all data inputs
pred = mlp.predict(X)
predV = mlp.predict(XV)
predT = mlp.predict(AggregateData(dirs,RANK,'testing'))
del X,XV
print('Saving Training Softmax Output', flush= True)
fname = args.dump_dir + 'training-task-' + str(task) + '.npy'
np.save(fname, pred)
print('Saving Validation Softmax Output', flush= True)
fname = args.dump_dir + 'validating-task-' + str(task) + '.npy'
np.save(fname, predV)
print('Saving Testing Softmax Output', flush= True)
fname = args.dump_dir + 'testing-task-' + str(task) + '.npy'
np.save(fname, predT)
print('',flush=True)
if __name__ == '__main__':
main()
```
|
{
"source": "jghaines/aws-config-rules",
"score": 2
}
|
#### File: aws-config-rules/python/iam-mfa.py
```python
import json
import boto3
APPLICABLE_RESOURCES = ["AWS::IAM::User"]
def evaluate_compliance(configuration_item):
if configuration_item["resourceType"] not in APPLICABLE_RESOURCES:
return "NOT_APPLICABLE"
user_name = configuration_item["configuration"]["userName"]
iam = boto3.client("iam")
mfa = iam.list_mfa_devices(UserName=user_name)
if len(mfa["MFADevices"]) > 0:
return "COMPLIANT"
else:
return "NON_COMPLIANT"
def lambda_handler(event, context):
invoking_event = json.loads(event["invokingEvent"])
configuration_item = invoking_event["configurationItem"]
result_token = "No token found."
if "resultToken" in event:
result_token = event["resultToken"]
config = boto3.client("config")
config.put_evaluations(
Evaluations=[
{
"ComplianceResourceType":
configuration_item["resourceType"],
"ComplianceResourceId":
configuration_item["resourceId"],
"ComplianceType":
evaluate_compliance(configuration_item),
"Annotation":
"The user doesn't have MFA enabled.",
"OrderingTimestamp":
configuration_item["configurationItemCaptureTime"]
},
],
ResultToken=result_token
)
```
#### File: aws-config-rules/python/vpc_flow_logs_enabled.py
```python
import boto3
import json
def evaluate_compliance(config_item, vpc_id):
if (config_item['resourceType'] != 'AWS::EC2::VPC'):
return 'NOT_APPLICABLE'
elif is_flow_logs_enabled(vpc_id):
return 'COMPLIANT'
else:
return 'NON_COMPLIANT'
def is_flow_logs_enabled(vpc_id):
ec2 = boto3.client('ec2')
response = ec2.describe_flow_logs(
Filter=[
{
'Name': 'resource-id',
'Values': [
vpc_id,
]
},
],
)
if len(response[u'FlowLogs']) != 0: return True
def lambda_handler(event, context):
invoking_event = json.loads(event['invokingEvent'])
compliance_value = 'NOT_APPLICABLE'
vpc_id = invoking_event['configurationItem']['resourceId']
compliance_value = evaluate_compliance(
invoking_event['configurationItem'], vpc_id)
config = boto3.client('config')
response = config.put_evaluations(
Evaluations=[
{
'ComplianceResourceType': invoking_event['configurationItem']['resourceType'],
'ComplianceResourceId': vpc_id,
'ComplianceType': compliance_value,
'OrderingTimestamp': invoking_event['configurationItem']['configurationItemCaptureTime']
},
],
ResultToken=event['resultToken'])
```
|
{
"source": "jgharris7/DocClass",
"score": 2
}
|
#### File: model/app/learnmodel.py
```python
root='C:/Users/jgharris/DocClass/'
dataFile='/data/shuffled-full-set-hashed.csv'
import statistics as stat
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pickle
from DocClf2 import DocClf2
from Documents import Documents
modelName="v3"
#dataFile='/test/testshort.csv'
maxlines=8000000
testsize=.2
random_state=45
MAXSTRINGLENGH=4000
FIRSTSTRINGLENGTH=80
def main():
# Set up corpus for training
corpus=Documents()
corpus.readFromFile(root+dataFile,maxline=maxlines)
'''
model1=DocClfComplNB(maxStringLength=MAXSTRINGLENGH, \
firstStringLength=FIRSTSTRINGLENGTH)
'''
model1=DocClf2(maxStringLength=MAXSTRINGLENGH, \
firstStringLength=FIRSTSTRINGLENGTH)
print()
# split into test and training sets
xtrain,xtest,ytrain,ytest=\
train_test_split(corpus.words,corpus.y,test_size=testsize, \
random_state=random_state)
ytrainpred=model1.fit(xtrain,ytrain)
ytestpred=model1.predict(xtest)
print([(i,ytest.count(i)) for i in sorted(set(ytest))])
trainAccuracy=accuracy_score(ytrain,ytrainpred)
testAccuracy=accuracy_score(ytest,ytestpred)
controlAccuracy=accuracy_score(np.random.permutation(ytest),ytestpred)
global conf_mat
conf_mat =model1.confidence(ytest, ytestpred)
print(model1.confidence)
print()
print( np.unique(ytestpred,return_counts=True))
print()
[print("%-25s" % key +" %5.3f" % value) for key,value in model1.confidence.items()]
labels=[]
[labels.append(key) for key in model1.confidence.keys()]
for row in range(0,conf_mat.shape[0]):
print( [" %4d" % conf_mat[row,col] for col in range(0,conf_mat.shape[1])])
rowsum=conf_mat.sum(axis=0)
colsum=conf_mat.sum(axis=1)
print("item rowsum colsum")
for ic in range(0,conf_mat.shape[0]):
print("%-25s" % labels[ic] + " %5d" % rowsum[ic]+ " %5d" % colsum[ic])
print("")
print('train=%6.2f test=%6.2f control=%6.2f' %
(trainAccuracy,testAccuracy,controlAccuracy))
pickle.dump(model1,open(root+modelName+".pckmdl","wb"))
print(model1.confidence)
print(ytestpred[0])
print(xtest[0][0:20])
testfile=open(root+modelName+"testdata.txt","wt")
testfile.write(ytestpred[0])
testfile.write("\n")
testfile.write(xtest[0])
testfile.write("\n")
testfile.write(ytestpred[10])
testfile.write("\n")
testfile.write(xtest[10])
testfile.write("\n")
testfile.close()
print( model1.message)
def docpeek():
corpus=Documents()
corpus.readFromFile(root+dataFile,maxline=maxlines)
print([(i,corpus.y.count(i)) for i in corpus.classes])
corpus.makeDataFrame()
x1=corpus.df[['class','count']].groupby(by='class')
cnt=x1.count()
a1=x1.min()
a2=x1.max()
a3=x1.mean()
a4=x1.std()
cnt.columns=['count']
a1.columns=['min']
a2.columns=['max']
a3.columns=['mean']
a4.columns=['std']
q=cnt.merge(a1,left_index=True,right_index=True)\
.merge(a2,left_index=True,right_index=True)\
.merge(a3,left_index=True,right_index=True)
q=q.merge(a4,left_index=True,right_index=True)
return corpus,q
if __name__=='__main__':
main()
```
|
{
"source": "jghibiki/corpusaurus",
"score": 3
}
|
#### File: jghibiki/corpusaurus/classification.py
```python
from flask import session, request, jsonify, abort
import json
import csv
from os import path
from math import floor
data = None
data_path = None
base_route = "/api/classification/"
def load(opts):
if opts.input_data != None:
preserve_fields = set()
# check to see if a data field has been defined else die
if opts.data_field:
preserve_fields.add(opts.data_field)
else:
raise Exception("--data-field is required when loading from input file.")
# check to see if there is a list of fields we should preserve
# in the output file
if opts.preserve_fields:
preserve_fields.update(opts.preserve_fields.split())
if opts.debug: print("Loading Data")
with open(opts.input_data, "r") as f:
global data
data = json.load(f)
# clean up un-preserved columns
bad_keys = set(data[0].keys()) - preserve_fields
for key in bad_keys:
for row in data:
del row[key]
# add classification field
for row in data:
row["classification"] = "none"
if opts.debug: print("Loaded %s rows of data." % len(data) )
# load output path
global data_path
data_path = opts.output_data if opts.output_data else "out.json"
# create an base output file
if not path.exists(data_path):
if opts.debug: print("Create new output file.")
with open(data_path, "w") as f:
json.dump(data, f)
print("Finished Initializing Output File.")
exit()
else:
if opts.debug: print("Load existing output file.")
# load output path
global data_path
data_path = opts.output_data if opts.output_data else "out.json"
with open(data_path, "r") as f:
global data
data = json.load(f)
def register(app, opts):
@app.route(base_route + "element/count/", methods=["GET"])
def getNumberOfElements():
global data
return jsonify(result=len(data))
@app.route(base_route + "element/id/<int:element_id>", methods=["GET"])
def getElement(element_id):
element = data[element_id][opts.data_field]
resp = jsonify(element)
session["next"] += 1
return resp
@app.route(base_route + "element/<string:key_type>/<int:key>/classify/", methods=["POST"])
def setElementClassification(key_type, key):
if key_type == "index":
data[key]["classification"] = request.json["classification"]
else:
try:
for x in data:
if key_type in x and x[key_type] == key:
data[data.index(x)]["classification"] = request.json["classification"]
except:
raise Exception("Invalid key_type in url request.")
return jsonify(result=True)
@app.route(base_route + "save/", methods=["GET"])
def save():
global data_path
global data
with open(data_path, "w") as f:
json.dump(data, f)
return jsonify(result=True)
@app.route(base_route + "element/", defaults={"page": 0, "mask":""}, methods=["GET"])
@app.route(base_route + "element/page/<int:page>/", defaults={"mask":""}, methods=["GET"])
@app.route(base_route + "element/mask/<string:mask>/", defaults={"page":0}, methods=["GET"])
@app.route(base_route + "element/mask/<string:mask>/page/<int:page>/", methods=["GET"])
def getElements(page, mask):
"""
Get a page of elements
"""
global data
page_size = 100
# Apply mask if nessisary
if mask != "":
masked_data = [ x for x in data if x["classification"] == mask ]
else:
masked_data = data
max_page = floor(len(masked_data)/page_size)
if page > max_page:
page = 0
start = page_size * page
end = start + page_size
page_data = masked_data[start:end]
return jsonify(data=page_data, page=page, max_page=max_page, page_size=100, mask=mask)
```
|
{
"source": "jghibiki/DungeonBuilder",
"score": 2
}
|
#### File: jghibiki/DungeonBuilder/subscriptions.py
```python
import magic
import os
import json
##########################
# Config Operations #
##########################
def getConfig(client, req):
client.sendTarget(req["id"], key="get.config", payload={"payload": magic.config})
##########################
# User Operations #
##########################
def getUsers(client, req):
""" Get list of users and public data about them"""
client.sendTarget(req["id"], key="get.users", payload={"payload": magic.users})
def moveUser(client, req):
if "username" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="move.user",
payload={"msg": "Request details missing \"username\""})
return False
username = req["details"]["username"]
if "map_name" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="move.user",
payload={"msg": "Request details missing \"map_name\""})
return False
map_name = req["details"]["map_name"]
user = _getUserInfo(username=username)
if not user:
client.sendTarget(
req["id"],
type="error",
key="move.user",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
if map_name not in set(magic.game_data["maps"].keys()):
client.sendTarget(
req["id"],
type="error",
key="move.user",
payload={
"msg": "Map with name \"{0}\" does not exist."
.format(new_map_name)})
return False
user["current_map"] = map_name
client.sendTarget(
req["id"],
type="acknowledge",
key="move.user",
payload={})
return True
def registerUser(client, req):
if "username" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="register.user",
payload={"msg": "Request details missing \"username\""})
return False
new_username = req["details"]["username"]
if "current_map" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="register.user",
payload={"msg": "Request details missing \"current_map\""})
return False
current_map = req["details"]["current_map"]
exists = False
for user in magic.users:
if user["username"] == new_username:
exists = True
break
if not exists:
new_user = {}
new_user["username"] = new_username
new_user["current_map"] = current_map
new_user["client_id"] = req["id"]
if req["password"] == <PASSWORD>:
new_user["role"] = "gm"
elif req["password"] == <PASSWORD>:
new_user["role"] = "pc"
if new_user["current_map"] == None:
new_user["current_map"] = "__staging__"
magic.users.append(new_user)
client.sendTarget(
req["id"],
type="acknowledge",
key="register.user",
payload={})
return True
else:
# reassociate user with client
for user in magic.users:
if user["username"] == new_username:
user["client_id"] = req["id"]
client.sendTarget(
req["id"],
type="acknowledge",
key="register.user",
payload={})
return True
##########################
# Chat Operations #
##########################
def getChat(client, req):
username = None
for user in magic.users:
if user["client_id"] == req["id"]:
username = user["username"]
break
if username:
all_messages = magic.game_data["chat"]
messages = []
for msg in all_messages:
if ( msg["recipient"] == username or
msg["sender"] == username or
msg["recipient"] == None ):
messages.append(msg)
client.sendTarget(
req["id"],
key="get.chat",
payload={"payload": messages})
return True
return False
def addChatMessage(client, req):
if "sender" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="register.user",
payload={"msg": "Request details missing \"sender\""})
return False
sender = req["details"]["sender"]
if "message" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="register.user",
payload={"msg": "Request details missing \"message\""})
return False
message = req["details"]["message"]
recipient = req["details"]["recipient"] if "recipient" in req["details"] else None
persona = req["details"]["persona"] if "persona" in req["details"] else None
magic.game_data["chat"].append({
"sender": sender,
"recipient": recipient,
"message": message,
"persona": persona
})
return True
def clearChat(client, req):
magic.game_data["chat"] = []
return True
##########################
# Map Operations #
##########################
def listMaps(client, req):
maps = list(magic.game_data["maps"].keys())
client.sendTarget(req["id"], key="list.maps", payload={"payload": maps})
def addMap(client, req):
if "name" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map",
payload={"msg": "Request details missing \"name\""})
return False
new_map_name = req["details"]["name"]
if "width" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map",
payload={"msg": "Request details missing \"width\""})
return False
width = req["details"]["width"]
if "height" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map",
payload={"msg": "Request details missing \"height\""})
return False
height = req["details"]["height"]
for map_name in magic.game_data["maps"].keys():
if map_name == new_map_name:
client.sendTarget(
req["id"],
type="error",
key="add.map",
payload={
"msg": "Map with name \"{0}\" already exists."
.format(new_map_name)})
return False
magic.game_data["maps"][new_map_name] = {
"max_x": width,
"max_y": height,
"feature_types": [],
"features": [],
"notes": [],
"units": [],
"fow": [ [ False for y in range(height) ] for x in range(width) ]
}
client.sendTarget(
req["id"],
type="acknowledge",
key="add.map",
payload={})
return True
def removeMap(client, req):
if "name" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="remove.map",
payload={"msg": "Request details missing \"name\""})
return False
map_to_remove = req["details"]["name"]
exists = False
for map_name in magic.game_data["maps"].keys():
if map_name == map_to_remove:
exists = True
break
if exists:
del magic.game_data["maps"][map_to_remove]
client.sendTarget(
req["id"],
type="acknowledge",
key="remove.map",
payload={})
return True
else:
client.sendTarget(
req["id"],
type="error",
key="remove.map",
payload={
"msg": "Map with name \"{0}\" does not exist."
.format(new_map_name)})
return False
def renameMap(client, req):
if "current_name" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="rename.map",
payload={"msg": "Request details missing \"current_name\""})
return False
current_name = req["details"]["current_name"]
if "new_name" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="rename.map",
payload={"msg": "Request details missing \"new_name\""})
return False
new_name = req["details"]["new_name"]
print(current_name, new_name)
if current_name == new_name:
client.sendTarget(
req["id"],
type="error",
key="rename.map",
payload={
"msg": "Current map name and new map name cannot be the same."})
return False
exists = False
for map_name in magic.game_data["maps"].keys():
if map_name == current_name:
exists = True
break
if exists:
magic.game_data["maps"][new_name] = magic.game_data["maps"][current_name]
del magic.game_data["maps"][current_name]
client.sendTarget(
req["id"],
type="acknowledge",
key="rename.map",
payload={})
return True
else:
client.sendTarget(
req["id"],
type="error",
key="rename.map",
payload={
"msg": "Map with name \"{0}\" does not exist."
.format(new_map_name)})
return False
def getMap(client, req):
user = _getUserInfo(id=req["id"])
if user:
if user["current_map"] in magic.game_data["maps"]:
data = magic.game_data["maps"][user["current_map"]]
client.sendTarget(req["id"], key="get.map", payload={"payload": data})
return True
else:
client.sendTarget(
req["id"],
type="error",
key="get.map",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
else:
client.sendTarget(
req["id"],
type="error",
key="get.map",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
#
# Note Operations
#
def getMapNotes(client, req):
user = _getUserInfo(id=req["id"])
if user:
if user["current_map"] in magic.game_data["maps"]:
notes = magic.game_data["maps"][user["current_map"]]["notes"]
client.sendTarget(req["id"], key="get.map.notes", payload={"payload": notes})
return True
else:
client.sendTarget(
req["id"],
type="error",
key="get.map.notes",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
else:
client.sendTarget(
req["id"],
type="error",
key="get.map.notes",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
def addMapNote(client, req):
# x and y should be optional
x = req["details"]["x"] if "x" in req["details"] else None
y = req["details"]["y"] if "y" in req["details"] else None
if "name" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.notes",
payload={"msg": "Request details missing \"name\""})
return False
name = req["details"]["name"]
if "text" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.notes",
payload={"msg": "Request details missing \"text\""})
return False
text = req["details"]["text"]
if "id" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.note",
payload={"msg": "Request details missing \"id\""})
return False
id = req["details"]["id"]
user = _getUserInfo(id=req["id"])
if not user:
client.sendTarget(
req["id"],
type="error",
key="add.map.note",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
if user["current_map"] not in magic.game_data["maps"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.note",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
magic.game_data["maps"][user["current_map"]]["notes"].append({
"x": x,
"y": y,
"text": text,
"name": name,
"id": id
})
client.sendTarget(
req["id"],
type="acknowledge",
key="add.map.notes",
payload={})
return True
def removeMapNote(client, req):
if "id" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="remove.map.note",
payload={"msg": "Request details missing \"id\""})
return False
id = req["details"]["id"]
user = _getUserInfo(id=req["id"])
if not user:
client.sendTarget(
req["id"],
type="error",
key="remove.map.note",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
if user["current_map"] not in magic.game_data["maps"]:
client.sendTarget(
req["id"],
type="error",
key="remove.map.note",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
notes = magic.game_data["maps"][user["current_map"]]["notes"]
for note in notes:
if note["id"] == id:
notes.remove(note)
client.sendTarget(
req["id"],
type="acknowledge",
key="add.map.note",
payload={})
return True
client.sendTarget(
req["id"],
type="error",
key="remove.map.note",
payload={
"msg": "Note with id \"{0}\" on map \"{1}\".".format(id, user["current_map"])
})
return False
def modifyMapNote(client, req):
x = req["details"]["x"] if "x" in req["details"] else None
y = req["details"]["y"] if "y" in req["details"] else None
if "text" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="modify.map.note",
payload={"msg": "Request details missing \"text\""})
return False
text = req["details"]["text"]
if "id" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="modify.map.note",
payload={"msg": "Request details missing \"id\""})
return False
id = req["details"]["id"]
user = _getUserInfo(id=req["id"])
if not user:
client.sendTarget(
req["id"],
type="error",
key="modify.map.note",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
if user["current_map"] not in magic.game_data["maps"]:
client.sendTarget(
req["id"],
type="error",
key="modiy.map.note",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
notes = magic.game_data["maps"][user["current_map"]]["notes"]
for note in notes:
if note["id"] == id:
note["text"] = text
note["x"] = x
note["y"] = y
client.sendTarget(
req["id"],
type="acknowledge",
key="modify.map.note",
payload={})
return True
client.sendTarget(
req["id"],
type="error",
key="modify.map.note",
payload={
"msg": "Note with id \"{0}\" on map \"{1}\".".format(id, user["current_map"])
})
return False
#
# Feature Operations
#
def getMapFeatureTypes(client, req):
user = _getUserInfo(id=req["id"])
if not user:
client.sendTarget(
req["id"],
type="error",
key="get.map.feature.types",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
if user["current_map"] not in magic.game_data["maps"]:
client.sendTarget(
req["id"],
type="error",
key="get.map.feature.types",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
types = magic.game_data["maps"][user["current_map"]]["feature_types"]
client.sendTarget(req["id"], key="get.map.feature.types", payload={"payload": types})
return True
def removeMapFeatureType(client, req):
if "name" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="remove.map.feature.type",
payload={"msg": "Request details missing \"name\""})
return False
name = req["details"]["name"]
user = _getUserInfo(id=req["id"])
if not user:
client.sendTarget(
req["id"],
type="error",
key="remove.map.feature.type",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
if user["current_map"] not in magic.game_data["maps"]:
client.sendTarget(
req["id"],
type="error",
key="remove.map.feature.type",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
for feature_type in magic.game_data["maps"][user["current_map"]]["feature_types"]:
if feature_type["name"] == name:
magic.game_data["maps"][user["current_map"]]["feature_types"].remove(feature_type)
client.sendTarget(
req["id"],
type="acknowledge",
key="remove.map.feature.type",
payload={})
return True
client.sendTarget(
req["id"],
type="error",
key="remove.map.feature.type",
payload={
"msg": "Feature type with name \"{0}\" does not exist.".format(name)})
return False
def addMapFeatureType(client, req):
if "name" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.feature.type",
payload={"msg": "Request details missing \"name\""})
return False
name = req["details"]["name"]
if "color" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.feature.type",
payload={"msg": "Request details missing \"color\""})
return False
color = req["details"]["color"]
if "character" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.feature.type",
payload={"msg": "Request details missing \"character\""})
return False
character = req["details"]["character"]
if "key" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.feature.type",
payload={"msg": "Request details missing \"key\""})
return False
key = req["details"]["key"]
user = _getUserInfo(id=req["id"])
if not user:
client.sendTarget(
req["id"],
type="error",
key="add.map.feature.type",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
if user["current_map"] not in magic.game_data["maps"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.feature.type",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
magic.game_data["maps"][user["current_map"]]["feature_types"].append({
"name": name,
"character": character,
"color": color,
"key": key
})
client.sendTarget(
req["id"],
type="acknowledge",
key="add.map.feature.type",
payload={})
return True
def addMapFeature(client, req):
if "x" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.feature",
payload={"msg": "Request details missing \"x\""})
return False
x = req["details"]["x"]
if "y" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.feature",
payload={"msg": "Request details missing \"y\""})
return False
y = req["details"]["y"]
if "type" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.feature",
payload={"msg": "Request details missing \"type\""})
return False
type = req["details"]["type"]
user = _getUserInfo(id=req["id"])
if user:
if user["current_map"] in magic.game_data["maps"]:
features = magic.game_data["maps"][user["current_map"]]["features"]
# check if feature already exists here if so error out
for feature in features:
if feature["y"] == y and feature["x"] == x:
client.sendTarget(
req["id"],
type="error",
key="add.map.feature",
payload={
"msg": "Feature already exists at ({0}, {1})"
.format(x, y)})
return False
features.append({
"type": type,
"x": x,
"y": y
})
client.sendTarget(
req["id"],
type="acknowledge",
key="add.map.feature",
payload={})
return True
else:
client.sendTarget(
req["id"],
type="error",
key="add.map.feature",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
else:
client.sendTarget(
req["id"],
type="error",
key="add.map.feature",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
def removeMapFeature(client, req):
if "x" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="remove.map.feature",
payload={"msg": "Request details missing \"x\""})
return False
x = req["details"]["x"]
if "y" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="remove.map.feature",
payload={"msg": "Request details missing \"y\""})
return False
y = req["details"]["y"]
user = _getUserInfo(id=req["id"])
if user:
if user["current_map"] in magic.game_data["maps"]:
features = magic.game_data["maps"][user["current_map"]]["features"]
# check if feature already exists here if so error out
for feature in features:
if feature["y"] == y and feature["x"] == x:
features.pop(features.index(feature))
client.sendTarget(
req["id"],
type="acknowledge",
key="add.map.feature",
payload={})
return True
client.sendTarget(
req["id"],
type="error",
key="add.map.feature",
payload={
"msg": "Feature does not exist at ({0}, {1})"
.format(x, y)})
return False
else:
client.sendTarget(
req["id"],
type="error",
key="add.map.feature",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
else:
client.sendTarget(
req["id"],
type="error",
key="add.map.feature",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
#
# Fog Of War Operations
#
def getMapFow(client, req):
user = _getUserInfo(id=req["id"])
if not user:
client.sendTarget(
req["id"],
type="error",
key="get.map.fow",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
if user["current_map"] not in magic.game_data["maps"]:
client.sendTarget(
req["id"],
type="error",
key="get.map.fow",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
fow = magic.game_data["maps"][user["current_map"]]["fow"]
client.sendTarget(req["id"], key="get.map.fow", payload={"payload": fow})
return True
def addMapFow(client, req):
if "x" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.fow",
payload={"msg": "Request details missing \"x\""})
return False
x = req["details"]["x"]
if "y" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.fow",
payload={"msg": "Request details missing \"y\""})
return False
y = req["details"]["y"]
user = _getUserInfo(id=req["id"])
if not user:
client.sendTarget(
req["id"],
type="error",
key="remove.map.fow",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
if user["current_map"] not in magic.game_data["maps"]:
client.sendTarget(
req["id"],
type="error",
key="remove.map.fow",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
magic.game_data["maps"][user["current_map"]]["fow"][x][y] = True
client.sendTarget(
req["id"],
type="acknowledge",
key="add.map.fow",
payload={})
return True
def removeMapFow(client, req):
if "x" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="remove.map.fow",
payload={"msg": "Request details missing \"x\""})
return False
x = req["details"]["x"]
if "y" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="remove.map.fow",
payload={"msg": "Request details missing \"y\""})
return False
y = req["details"]["y"]
user = _getUserInfo(id=req["id"])
if not user:
client.sendTarget(
req["id"],
type="error",
key="remove.map.fow",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
if user["current_map"] not in magic.game_data["maps"]:
client.sendTarget(
req["id"],
type="error",
key="remove.map.fow",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
magic.game_data["maps"][user["current_map"]]["fow"][x][y] = False
client.sendTarget(
req["id"],
type="acknowledge",
key="remove.map.fow",
payload={})
return True
##########################
# Unit Functions #
##########################
def getMapUnits(client, req):
user = _getUserInfo(id=req["id"])
if not user:
client.sendTarget(
req["id"],
type="error",
key="get.map.units",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
if user["current_map"] not in magic.game_data["maps"]:
client.sendTarget(
req["id"],
type="error",
key="get.map.units",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
units = magic.game_data["maps"][user["current_map"]]["units"]
client.sendTarget(req["id"], key="get.map.units", payload={"payload": units})
return True
def exportMapUnit(client, req):
if "id" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="export.map.unit",
payload={"msg": "Request details missing \"id\""})
return False
id = req["details"]["id"]
user = _getUserInfo(id=req["id"])
if not user:
client.sendTarget(
req["id"],
type="error",
key="export.map.unit",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
if user["current_map"] not in magic.game_data["maps"]:
client.sendTarget(
req["id"],
type="error",
key="export.map.unit",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
unit_exp = None
for unit in magic.game_data["maps"][user["current_map"]]["units"]:
if unit["id"] == id:
unit_exp = unit
break
if not unit:
client.sendTarget(
req["id"],
type="error",
key="export.map.unit",
payload={
"msg": "Unit with id \"{0}\" could not be found. Export failed.".format(id)
})
return False
if not os.path.exists("exports"): #TODO make export dir configurable
os.makedirs("exports")
file_name = unit["name"].replace(" ", "_") + ".json"
with open("exports/" + file_name, mode='w') as f:
json.dump(unit_exp, f, indent=4)
def addMapUnit(client, req):
if "x" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.unit",
payload={"msg": "Request details missing \"x\""})
return False
x = req["details"]["x"]
if "y" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.unit",
payload={"msg": "Request details missing \"y\""})
return False
y = req["details"]["y"]
if "name" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.unit",
payload={"msg": "Request details missing \"name\""})
return False
name = req["details"]["name"]
if "max_health" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.unit",
payload={"msg": "Request details missing \"max_health\""})
return False
max_health = req["details"]["max_health"]
if "current_health" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.unit",
payload={"msg": "Request details missing \"current_health\""})
return False
current_health = req["details"]["current_health"]
if "controller" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.unit",
payload={"msg": "Request details missing \"controller\""})
return False
controller = req["details"]["controller"]
if "type" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.unit",
payload={"msg": "Request details missing \"type\""})
return False
_type = req["details"]["type"]
if "id" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.unit",
payload={"msg": "Request details missing \"id\""})
return False
_id = req["details"]["id"]
#optional params
template_type = None
if "template_type" in req["details"]:
template_type = req["details"]["template_type"]
template_values = []
if "template_values" in req["details"]:
template_values = req["details"]["template_values"]
user = _getUserInfo(id=req["id"])
if not user:
client.sendTarget(
req["id"],
type="error",
key="add.map.unit",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
if user["current_map"] not in magic.game_data["maps"]:
client.sendTarget(
req["id"],
type="error",
key="add.map.unit",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
units = magic.game_data["maps"][user["current_map"]]["units"]
units.append({
"x": x,
"y": y,
"name": name,
"max_health": max_health,
"current_health": current_health,
"controller": controller,
"type": _type,
"id": _id,
"template_type": template_type,
"template_values": template_values
})
client.sendTarget(
req["id"],
type="acknowledge",
key="add.map.unit",
payload={})
return True
def modifyMapUnit(client, req):
if "x" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="modify.map.unit",
payload={"msg": "Request details missing \"x\""})
return False
x = req["details"]["x"]
if "y" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="modify.map.unit",
payload={"msg": "Request details missing \"y\""})
return False
y = req["details"]["y"]
if "name" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="modify.map.unit",
payload={"msg": "Request details missing \"name\""})
return False
name = req["details"]["name"]
if "max_health" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="modify.map.unit",
payload={"msg": "Request details missing \"max_health\""})
return False
max_health = req["details"]["max_health"]
if "current_health" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="modify.map.unit",
payload={"msg": "Request details missing \"current_health\""})
return False
current_health = req["details"]["current_health"]
if "controller" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="modify.map.unit",
payload={"msg": "Request details missing \"controller\""})
return False
controller = req["details"]["controller"]
if "type" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="modify.map.unit",
payload={"msg": "Request details missing \"type\""})
return False
type = req["details"]["type"]
if "id" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="modify.map.unit",
payload={"msg": "Request details missing \"id\""})
return False
id = req["details"]["id"]
#optional params
template_type = None
if "template_type" in req["details"]:
template_type = req["details"]["template_type"]
template_values = []
if "template_values" in req["details"]:
template_values = req["details"]["template_values"]
user = _getUserInfo(id=req["id"])
if not user:
client.sendTarget(
req["id"],
type="error",
key="modify.map.unit",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
if user["current_map"] not in magic.game_data["maps"]:
client.sendTarget(
req["id"],
type="error",
key="modify.map.unit",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
units = magic.game_data["maps"][user["current_map"]]["units"]
for unit in units:
if unit["id"] == id:
unit["x"] = x
unit["y"] = y
unit["name"] = name
unit["max_health"] = max_health
unit["current_health"] = current_health
unit["controller"] = controller
unit["type"] = type
unit["template_type"] = template_type
unit["template_values"] = template_values
client.sendTarget(
req["id"],
type="acknowledge",
key="modify.map.unit",
payload={})
return True
client.sendTarget(
req["id"],
type="error",
key="modify.map.unit",
payload={
"msg": "Note with id \"{0}\" on map \"{1}\".".format(id, user["current_map"])
})
return False
def removeMapUnit(client, req):
if "id" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="remove.map.unit",
payload={"msg": "Request details missing \"id\""})
return False
id = req["details"]["id"]
user = _getUserInfo(id=req["id"])
if not user:
client.sendTarget(
req["id"],
type="error",
key="remove.map.unit",
payload={
"msg": "User with id \"{0}\" has not been registered.".format(req["id"])
})
return False
if user["current_map"] not in magic.game_data["maps"]:
client.sendTarget(
req["id"],
type="error",
key="remove.map.unit",
payload={
"msg": "User \"{0}\" is on map \"{1}\", however this map could not be found.".format(user["username"], user["current_map"])
})
return False
units = magic.game_data["maps"][user["current_map"]]["units"]
for unit in units:
if unit["id"] == id:
units.remove(unit)
client.sendTarget(
req["id"],
type="acknowledge",
key="remove.map.unit",
payload={})
return True
client.sendTarget(
req["id"],
type="error",
key="remove.map.unit",
payload={
"msg": "Unit with id \"{0}\" not found on map \"{1}\".".format(id, user["current_map"])
})
return False
##########################
# Narrative Functions #
##########################
def addNarrative(client, req):
if "name" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.narrative",
payload={"msg": "Request details missing \"name\""})
return False
name = req["details"]["name"]
if "chapter_no" not in req["details"]:
nos = [ el["chapter_no"] for el in magic.game_data["story"]]
if len(nos) > 0:
maximum = max(nos)
else:
maximum = 0
chapter_no = maximum + 1
else:
chapter_no = req["details"]["chapter_no"]
text = req["details"]["text"] if "text" in req["details"] else None
magic.game_data["story"].append({
"name": name,
"chapter_no": chapter_no,
"text": text
})
client.sendTarget(
req["id"],
type="acknowledge",
key="add.narrative",
payload={})
return True
def removeNarrative(client, req):
if "chapter_no" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="add.narrative",
payload={"msg": "Request details missing \"chapter_no\""})
return False
chapter_no = req["details"]["chapter_no"]
for chapter in magic.game_data["story"]:
if chapter["chapter_no"] == chapter_no:
chapters = magic.game_data["story"]
chapters.remove(chapter)
higher_no_chapters = [ chapters.index(x) for x in chapters if x["chapter_no"] > chapter_no]
for x in higher_no_chapters:
chapters[x]["chapter_no"] -= 1
client.sendTarget(
req["id"],
type="acknowledge",
key="remove.narrative",
payload={})
return True
client.sendTarget(
req["id"],
type="error",
key="remove.narrative",
payload={
"msg": "Narrative with chapter number \"{0}\" not found .".format(chapter_no)
})
return False
def modifyNarrative(client, req):
if "name" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="modify.narrative",
payload={"msg": "Request details missing \"name\""})
return False
name = req["details"]["name"]
if "chapter_no" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="modify.narrative",
payload={"msg": "Request details missing \"chapter_no\""})
return False
chapter_no = req["details"]["chapter_no"]
new_chapter_no = req["details"]["new_chapter_no"] if "new_chapter_no" in req["details"] else None
if "text" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="modify.narrative",
payload={"msg": "Request details missing \"text\""})
return False
text = req["details"]["text"]
for chapter in magic.game_data["story"]:
if chapter["chapter_no"] == chapter_no:
if new_chapter_no:
chapter["chapter_no"] = new_chapter_no
chapter["name"] = name
chapter["text"] = text
client.sendTarget(
req["id"],
type="acknowledge",
key="modify.narrative",
payload={})
return True
client.sendTarget(
req["id"],
type="error",
key="modify.narrative",
payload={
"msg": "Narrative with chapter number \"{0}\" not found .".format(chapter_no)
})
return False
def getNarrative(client, req):
if "chapter_no" not in req["details"]:
client.sendTarget(
req["id"],
type="error",
key="get.narrative",
payload={"msg": "Request details missing \"chapter_no\""})
return False
chapter_no= req["details"]["chapter_no"]
chapters = sorted(magic.game_data["story"], key=lambda x: x["chapter_no"])
for chapter in chapters:
if chapter["chapter_no"] == chapter_no:
client.sendTarget(
req["id"],
key="get.narrative", payload={"payload": chapter})
return True
client.sendTarget(
req["id"],
type="error",
key="get.narrative",
payload={
"msg": "Narrative with chapter number \"{0}\" not found .".format(chapter_no)
})
return False
def listNarratives(client, req):
chapter_names = []
chapters = sorted(magic.game_data["story"], key=lambda x: x["chapter_no"])
for chapter in chapters:
chapter_names.append(chapter["name"])
client.sendTarget(
req["id"],
key="list.narratives", payload={"payload": chapter_names})
return True
def exportNarratives(client, req):
out = ""
chapters = sorted(magic.game_data["story"], key=lambda x: x["chapter_no"])
for chapter in chapters:
out += "Chapter {0}: {1}\n {2}\n\n".format(
chapter["chapter_no"],
chapter["name"],
chapter["text"])
client.sendTarget(
req["id"],
key="export.narrative", payload={"payload": chapters})
return True
##########################
# Save Data Function #
##########################
def save(client, req):
print("Saving Data...")
magic.save()
print("Data Saved.")
client.sendTarget(
req["id"],
type="acknowledge",
key="save",
payload={})
##########################
# Util Functions #
##########################
def _getUserInfo(id=None, username=None):
if id is not None:
for user in magic.users:
if user["client_id"] == id:
return user
elif username is not None:
for user in magic.users:
if user["username"] == username:
return user
return None
##########################
# Handlers #
##########################
common_handlers = {
"get.config": [getConfig],
"get.map": [getMap],
"register.user": [registerUser],
"get.chat": [getChat],
"get.map.units": [getMapUnits],
"get.map.fow": [getMapFow],
"get.map.feature.types": [getMapFeatureTypes]
}
gm_handlers = {
"get.users": [getUsers],
"move.user": [moveUser],
"add.chat.message": [addChatMessage],
"clear.chat": [clearChat],
"add.map": [addMap],
"remove.map": [removeMap],
"rename.map": [renameMap],
"list.maps": [listMaps],
"add.map.note": [addMapNote],
"remove.map.note": [removeMapNote],
"modify.map.note": [modifyMapNote],
"get.map.notes": [getMapNotes],
"add.map.feature.type": [addMapFeatureType],
"remove.map.feature.type": [removeMapFeatureType],
"add.map.feature": [addMapFeature],
"remove.map.feature": [removeMapFeature],
"add.map.fow": [addMapFow],
"remove.map.fow": [removeMapFow],
"add.map.unit": [addMapUnit],
"remove.map.unit": [removeMapUnit],
"modify.map.unit": [modifyMapUnit],
"export.map.unit": [exportMapUnit],
"add.narrative": [addNarrative],
"remove.narrative": [removeNarrative],
"modify.narrative": [modifyNarrative],
"get.narrative": [getNarrative],
"list.narratives": [listNarratives],
"export.narratives": [exportNarratives],
"save": [save]
}
```
#### File: DungeonBuilder/terminal/app_server.py
```python
from flask import Flask, jsonify, request, abort,send_from_directory
from flask_cors import CORS, cross_origin
import sys
import json
import hashlib
import random
import authentication
import log
from uuid import uuid4
from authentication import requires_auth, requires_gm_auth
app = Flask(__name__, static_url_path='')
CORS(app)
game_data = None
save_callback = None
users = []
######
## Helpers
#####
def _get_user():
auth = request.authorization
username = auth.username
map_name = None
for user in users:
if user["username"] == username:
return user
return None
######
## Error Handler
#####
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
###
# Routes
###
# STATIC FILES
@app.route('/client/')
def root():
return app.send_static_file('js_app/index.html')
@app.route('/client/<path:path>')
def send_static(path):
return send_from_directory("js_app", path)
@app.route("/map/data/", methods=["GET"])
@requires_auth
def get_map(index=None):
user = _get_user()
if not user:
return 'username not set', 400
name = user["current_map"]
if not name:
return 'user current_map not set', 400
return jsonify(game_data["maps"][name])
@app.route("/map/add", methods=["POST"])
@requires_gm_auth
def add_feature_to_map():
data = request.json
# validate request data
if data is None:
return 'No payload recieved', 400
if "y" not in data:
return 'Payload missing field "y"', 400
if "x" not in data :
return 'Payload missing field "x"', 400
if "type" not in data:
return 'Playload missing field "type"', 400
if "notes" not in data:
return ('Payload midding field "notes"', 400)
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
features = game_data["maps"][map_name]["features"]
for feature in features:
if feature["y"] == data["y"] and feature["x"] == data["x"]:
return ('', 500)
game_data["maps"][map_name]["features"].append(data)
data = json.dumps(game_data["maps"][map_name], sort_keys=True).encode("utf-8")
hsh = hashlib.md5(data).hexdigest()
game_data["map_hashes"][map_name]["map"] = hsh
return jsonify({})
@app.route("/map/bulk/add", methods=["POST"])
@requires_gm_auth
def bulk_add_feature_to_map():
data = request.json
if data is None:
return 'No payload recieved', 400
new_features = data["features"]
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
features = game_data["maps"][map_name]["features"]
already_exist = []
for new_feature in new_features:
for feature in features:
if feature["y"] == new_feature["y"] and feature["x"] == new_feature["x"]:
already_exist.append(new_feature)
new_features = [ feature for feature in new_features if feature not in already_exist ]
for new_feature in new_features:
game_data["maps"][map_name]["features"].append(new_feature)
data = json.dumps(game_data["maps"][map_name], sort_keys=True).encode("utf-8")
hsh = hashlib.md5(data).hexdigest()
game_data["map_hashes"][map_name]["map"] = hsh
return jsonify({})
@app.route('/map/rm', methods=["POST"])
@requires_gm_auth
def rm_feature_from_map():
data = request.json
if ( ( data is None ) or
( "y" not in data ) or
( "x" not in data ) ):
return ('', 400)
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
features = game_data["maps"][map_name]["features"]
for feature in features:
if feature["y"] == data["y"] and feature["x"] == data["x"]:
features.remove(feature)
game_data["maps"][map_name]["features"] = features
data = json.dumps(game_data["maps"][map_name], sort_keys=True).encode("utf-8")
hsh = hashlib.md5(data).hexdigest()
game_data["map_hashes"][map_name]["map"] = hsh
return jsonify({})
return '', 500
@app.route('/map/new', methods=["POST"])
@requires_gm_auth
def create_new_map():
data = request.json
map_name = data["name"]
max_x = data["width"]
max_y = data["height"]
game_data["maps"][map_name] = {
"max_x": max_x,
"max_y": max_y,
"features": [],
"notes": [],
"units": [],
"fow": [ [ False for y in range(max_y) ] for x in range(max_x) ]
}
@app.route('/map/bulk/rm', methods=["POST"])
@requires_gm_auth
def bulk_rm_feature_from_map():
data = request.json
if data is None:
return ('', 400)
new_features = data["features"]
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
features = game_data["maps"][map_name]["features"]
for new_feature in new_features:
for feature in features:
if feature["y"] == new_feature["y"] and feature["x"] == new_feature["x"]:
try:
features.remove(feature)
except:
pass
game_data["maps"][map_name]["features"] = features
data = json.dumps(game_data["maps"][map_name], sort_keys=True).encode("utf-8")
hsh = hashlib.md5(data).hexdigest()
game_data["map_hashes"][map_name]["map"] = hsh
return jsonify({})
@app.route('/map/update/', methods=["POST"])
@requires_gm_auth
def update_feature():
data = request.json
# validate request data
if data is None:
return 'No payload recieved', 400
if "y" not in data:
return 'Payload missing field "y"', 400
if "x" not in data :
return 'Payload missing field "x"', 400
if "type" not in data:
return 'Playload missing field "type"', 400
if "notes" not in data:
return ('Payload midding field "notes"', 400)
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
features = game_data["maps"][map_name]["features"]
for idx, feature in enumerate(features):
if feature["y"] == data["y"] and feature["x"] == data["x"]:
game_data["maps"][map_name]["features"][idx] = data
data = json.dumps(game_data["maps"][map_name], sort_keys=True).encode("utf-8")
hsh = hashlib.md5(data).hexdigest()
game_data["map_hashes"][map_name]["map"] = hsh
return jsonify({})
@app.route("/map", methods=["POST"])
@requires_gm_auth
def set_map_name():
data = request.json
user_to_change = data["username"]
new_map = data["map_name"]
valid_map = False
for map_name in game_data["maps"].keys():
if map_name == new_map:
valid_map = True
if not valid_map:
return "Invalid map name", 400
global users
for user in users:
if user["username"] == user_to_change:
user["current_map"] = new_map
data = json.dumps(users, sort_keys=True).encode("utf-8")
hsh = hashlib.md5(data).hexdigest()
game_data["global_hashes"]["users"] = hsh
return jsonify({"result": True})
return jsonify({"result": False})
@app.route("/map", methods=["GET"])
@requires_auth
def get_map_name():
return jsonify({"maps": list(game_data["maps"].keys())})
@app.route("/narrative", methods=["GET"])
@requires_gm_auth
def get_narratives():
narratives = [ nar["name"] for nar in game_data["story"] ]
return jsonify({"chapters": narratives})
@app.route("/narrative", methods=["POST"])
@requires_gm_auth
def add_narrative():
data = request.json
if "name" not in data:
return "Missing name field.", 400
game_data["story"].append({
"name": data["name"],
"text": ""
})
return jsonify({})
@app.route("/narrative/delete/<int:index>", methods=["GET"])
@requires_gm_auth
def rm_narrative(index):
game_data["story"].pop(index)
return jsonify({})
@app.route("/narrative/<int:index>", methods=["GET"])
@requires_gm_auth
def get_narrative_by_index(index):
return jsonify(game_data["story"][index])
@app.route("/narrative/<int:index>", methods=["POST"])
@requires_gm_auth
def update_narrative_by_index(index):
data = request.json
if data is None:
return 'No payload recieved', 400
if "name" not in data:
return 'Payload missing field "name"', 400
if "text" not in data :
return 'Payload missing field "text"', 400
global game_data
game_data["story"][index] = data
return jsonify({})
@app.route('/chat', methods=["POST"])
@requires_auth
def add_chat_message():
data = request.json
if data is None:
return 'No payload recieved', 400
if "sender" not in data:
return 'Payload missing field "sender"', 400
if "recipient" not in data:
return 'Payload missing field "recipient"', 400
if "message" not in data:
return 'Payload missing field "message"', 400
global game_data
game_data["chat"].append(data)
data = json.dumps(game_data["chat"], sort_keys=True).encode("utf-8")
hsh = hashlib.md5(data).hexdigest()
game_data["global_hashes"]["chat"] = hsh
return jsonify({})
@app.route('/users', methods=["GET"])
@requires_auth
def get_users():
return jsonify({"users": users})
@app.route('/users', methods=["POST"])
@requires_auth
def set_user_info():
data = request.json
if "username" not in data:
raise InvalidUsage('Payload missing field "username')
if "current_map" not in data:
raise InvalidUsage('Payload missing field "current_map')
exists = False
global users
for user in users:
if user["username"] == data["username"]:
exists = True
break
if not exists:
new_user = {}
new_user["username"] = data["username"]
new_user["current_map"] = "__staging__"
auth = request.authorization
if authentication.gm_password == auth.password:
new_user["role"] = "gm"
else:
new_user["role"] = "pc"
if new_user["current_map"] == None:
new_user["current_map"] == "__staging__"
users.append(new_user)
data = json.dumps(users, sort_keys=True).encode("utf-8")
hsh = hashlib.md5(data).hexdigest()
user_hash = hsh
return jsonify({})
@app.route('/notes', methods=["GET"])
@requires_auth
def get_notes():
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
return jsonify(game_data["maps"][map_name]["notes"])
#TODO: actually implement set notes (part of the point of interest replacement)
@app.route('/notes', methods=["GET"])
@requires_auth
def set_notes():
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
return jsonify(game_data["maps"][map_name]["notes"])
@app.route('/chat/<username>', methods=["GET"])
@requires_auth
def get_chat_messages(username):
all_messages = game_data["chat"]
messages = []
for message in all_messages:
if ( message["recipient"] == username or
message["sender"] == username or
message["recipient"] == None ):
messages.append(message)
return jsonify({ "messages": messages })
@app.route('/chat/<username>/hash', methods=["GET"])
@requires_auth
def get_chat_hash(username):
all_messages = game_data["chat"]
messages = []
#for message in all_messages:
# if ( message["recipient"] == username or
# message["sender"] == username or
# message["recipient"] == None ):
# messages.append(message)
#data = json.dumps(messages, sort_keys=True).encode("utf-8")
#hash = hashlib.md5(data).hexdigest()
return jsonify({ "hash": chat_hash })
@app.route('/hash', methods=["GET"])
@requires_auth
def get_hashes():
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
return jsonify({
"map": game_data["map_hashes"][map_name]["map"],
"fow": game_data["map_hashes"][map_name]["fow"],
"units": game_data["map_hashes"][map_name]["units"],
"chat": game_data["global_hashes"]["chat"],
"users": game_data["global_hashes"]["users"]
})
@app.route('/save', methods=["GET"])
@requires_gm_auth
def save_data():
save_callback(game_data)
return jsonify({})
@app.route('/fow/add', methods=["POST"])
@requires_gm_auth
def add_fow():
data = request.json
if "x" not in data:
return 'Payload missing field "x"', 400
if "y" not in data:
return 'Payload missing field "y"', 400
x = data["x"]
y = data["y"]
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
global dame_data
game_data["maps"][map_name]["fow"][x][y] = True
data = json.dumps(game_data["maps"][map_name]["fow"], sort_keys=True).encode("utf-8")
hsh = hashlib.md5(data).hexdigest()
game_data["map_hashes"][map_name]["fow"] = hsh
return jsonify({})
@app.route('/fow/bulk/add', methods=["POST"])
@requires_gm_auth
def bulk_add_fow():
data = request.json
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
global game_data
for row in data["fow"]:
game_data["maps"][map_name]["fow"][row["x"]][row["y"]] = True
data = json.dumps(game_data["maps"][map_name]["fow"], sort_keys=True).encode("utf-8")
hsh = hashlib.md5(data).hexdigest()
game_data["map_hashes"][map_name]["fow"] = hsh
return jsonify({})
@app.route('/fow/rm', methods=["POST"])
@requires_gm_auth
def rm_fow():
data = request.json
if "x" not in data:
return 'Payload missing field "x"', 400
if "y" not in data:
return 'Payload missing field "y"', 400
x = data["x"]
y = data["y"]
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
global game_data
game_data["maps"][map_name]["fow"][x][y] = False
data = json.dumps(game_data["maps"][map_name]["fow"], sort_keys=True).encode("utf-8")
hsh = hashlib.md5(data).hexdigest()
game_data["map_hashes"][map_name]["fow"] = hsh
return jsonify({})
@app.route('/fow/bulk/rm', methods=["POST"])
@requires_gm_auth
def bulk_rm_fow():
data = request.json
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
global dame_data
for row in data["fow"]:
game_data["maps"][map_name]["fow"][row["x"]][row["y"]] = False
data = json.dumps(game_data["maps"][map_name]["fow"], sort_keys=True).encode("utf-8")
hsh = hashlib.md5(data).hexdigest()
game_data["map_hashes"][map_name]["fow"] = hsh
return jsonify({})
@app.route('/fow', methods=["GET"])
@requires_auth
def get_fow():
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
return jsonify({
"fow": game_data["maps"][map_name]["fow"]
})
@app.route('/fow/toggle', methods=["GET"])
@requires_gm_auth
def toggle_fow():
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
global game_data
initial = not game_data["maps"][map_name]["fow"][0][0]
for x in range(0, game_data["maps"][map_name]["max_x"]):
for y in range(0, game_data["maps"][map_name]["max_y"]):
game_data["maps"][map_name]["fow"][x][y] = initial
data = json.dumps(game_data["maps"][map_name]["fow"], sort_keys=True).encode("utf-8")
hsh = hashlib.md5(data).hexdigest()
game_data["map_hashes"][map_name]["fow"] = hsh
return jsonify({})
@app.route('/fow/fill', methods=["GET"])
@requires_gm_auth
def all_fow():
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
global game_data
for x in range(0, game_data["maps"][map_name]["max_x"]):
for y in range(0, game_data["maps"][map_name]["max_y"]):
game_data["maps"][map_name]["fow"][x][y] = True
data = json.dumps(game_data["maps"][map_name]["fow"], sort_keys=True).encode("utf-8")
hsh = hashlib.md5(data).hexdigest()
game_data["map_hashes"][map_name]["fow"] = hsh
return jsonify({})
@app.route('/fow/clear', methods=["GET"])
@requires_gm_auth
def none_fow():
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
global game_data
for x in range(0, game_data["maps"][map_name]["max_x"]):
for y in range(0, game_data["maps"][map_name]["max_y"]):
game_data["maps"][map_name]["fow"][x][y] = False
data = json.dumps(game_data["maps"][map_name]["fow"], sort_keys=True).encode("utf-8")
hsh = hashlib.md5(data).hexdigest()
game_data["map_hashes"][map_name]["fow"] = hsh
return jsonify({})
@app.route('/unit/add', methods=["POST"])
@requires_gm_auth
def add_unit():
data = request.json
if "x" not in data:
return 'Payload missing field "x"', 400
if "y" not in data:
return 'Payload missing field "y"', 400
if "name" not in data:
return 'Payload missing field "name"', 400
if "max_health" not in data:
return 'Payload missing field "max_health"', 400
if "current_health" not in data:
return 'Payload missing field "current_health"', 400
if "controller" not in data:
return 'Payload missing field "controller"', 400
if "type" not in data:
return 'Payload missing field "type"', 400
data["id"] = str(uuid4())
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
global game_data
game_data["maps"][map_name]["units"].append(data)
data = json.dumps(game_data["maps"][current_map]["units"], sort_keys=True).encode("utf-8")
game_data["map_hashes"][map_name]["units"] = hashlib.md5(data).hexdigest()
return jsonify({})
@app.route('/unit/rm', methods=["POST"])
@requires_gm_auth
def rm_unit():
data = request.json
if "id" not in data:
return 'Payload missing field "id"', 400
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
global game_data
units = game_data["maps"][map_name]["units"]
for unit in units:
if unit["id"] == data["id"]:
game_data["maps"][map_name]["units"].remove(unit)
break
data = json.dumps(game_data["maps"][current_map]["units"], sort_keys=True).encode("utf-8")
game_data["map_hashes"][map_name]["units"] = hashlib.md5(data).hexdigest()
return jsonify({})
@app.route('/unit/update', methods=["POST"])
@requires_auth
def update_unit():
data = request.json
if "x" not in data:
return 'Payload missing field "x"', 400
if "y" not in data:
return 'Payload missing field "y"', 400
if "name" not in data:
return 'Payload missing field "name"', 400
if "max_health" not in data:
return 'Payload missing field "max_health"', 400
if "current_health" not in data:
return 'Payload missing field "current_health"', 400
if "controller" not in data:
return 'Payload missing field "controller"', 400
if "type" not in data:
return 'Payload missing field "type"', 400
if "id" not in data:
return 'Payload missing field "id"', 400
global game_data
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
for i in range(len(game_data["maps"][map_name]["units"])):
if game_data["maps"][map_name]["units"][i]["id"] == data["id"]:
game_data["maps"][map_name]["units"][i] = data
break
data = json.dumps(game_data["maps"][map_name]["units"], sort_keys=True).encode("utf-8")
game_data["map_hashes"][map_name]["units"] = hashlib.md5(data).hexdigest()
return jsonify({})
@app.route('/unit', methods=["GET"])
@requires_auth
def get_units():
user = _get_user()
if not user:
return 'username not set', 400
map_name = user["current_map"]
if not map_name:
return 'user current_map not set', 400
units = game_data["maps"][map_name]["units"]
return jsonify({"units": units})
def run(data, port, host, gm_passwd, passwd, map_name, save):
global game_data
game_data = data
global save_callback
save_callback = save
# add staging map
game_data["maps"]["__staging__"] = {
"max_x": 100,
"max_y": 100,
"features": [{"y": 4,"x": 8,"type": "Snow","notes": ""},
{"y": 4,"x": 7,"type": "Snow","notes": ""},{"y": 4,"x": 6,"type": "Snow","notes": ""},
{"y": 4,"x": 5,"type": "Snow","notes": ""},{"y": 5,"x": 5,"type": "Snow","notes": ""},
{"y": 6,"x": 5,"type": "Snow","notes": ""},{"y": 6,"x": 6,"type": "Snow","notes": ""},
{"y": 6,"x": 7,"type": "Snow","notes": ""},{"y": 6,"x": 8,"type": "Snow","notes": ""},
{"y": 7,"x": 8,"type": "Snow","notes": ""},{"y": 8,"x": 8,"type": "Snow","notes": ""},
{"y": 8,"x": 7,"type": "Snow","notes": ""},{"y": 8,"x": 6,"type": "Snow","notes": ""},
{"y": 8,"x": 5,"type": "Snow","notes": ""},{"y": 6,"x": 10,"type": "Snow","notes": ""},
{"y": 6,"x": 11,"type": "Snow","notes": ""},{"y": 6,"x": 12,"type": "Snow","notes": ""},
{"y": 6,"x": 13,"type": "Snow","notes": ""},{"y": 6,"x": 14,"type": "Snow","notes": ""},
{"y": 4,"x": 12,"type": "Snow","notes": ""},{"y": 5,"x": 12,"type": "Snow","notes": ""},
{"y": 7,"x": 12,"type": "Snow","notes": ""},{"y": 8,"x": 12,"type": "Snow","notes": ""},
{"y": 6,"x": 19,"type": "Snow","notes": ""},{"y": 6,"x": 18,"type": "Snow","notes": ""},
{"y": 6,"x": 17,"type": "Snow","notes": ""},{"y": 6,"x": 16,"type": "Snow","notes": ""},
{"y": 7,"x": 16,"type": "Snow","notes": ""},{"y": 8,"x": 16,"type": "Snow","notes": ""},
{"y": 8,"x": 17,"type": "Snow","notes": ""},{"y": 8,"x": 18,"type": "Snow","notes": ""},
{"y": 8,"x": 19,"type": "Snow","notes": ""},{"y": 7,"x": 19,"type": "Snow","notes": ""},
{"y": 7,"x": 20,"type": "Snow","notes": ""},{"y": 8,"x": 21,"type": "Snow","notes": ""},
{"y": 6,"x": 23,"type": "Snow","notes": ""},{"y": 7,"x": 23,"type": "Snow","notes": ""},
{"y": 8,"x": 23,"type": "Snow","notes": ""},{"y": 8,"x": 24,"type": "Snow","notes": ""},
{"y": 8,"x": 25,"type": "Snow","notes": ""},{"y": 8,"x": 26,"type": "Snow","notes": ""},
{"y": 7,"x": 26,"type": "Snow","notes": ""},{"y": 6,"x": 26,"type": "Snow","notes": ""},
{"y": 6,"x": 25,"type": "Snow","notes": ""},{"y": 6,"x": 24,"type": "Snow","notes": ""},
{"y": 9,"x": 26,"type": "Snow","notes": ""},{"y": 10,"x": 26,"type": "Snow","notes": ""},
{"y": 10,"x": 25,"type": "Snow","notes": ""},{"y": 10,"x": 24,"type": "Snow","notes": ""},
{"y": 10,"x": 23,"type": "Snow","notes": ""},{"y": 6,"x": 28,"type": "Snow","notes": ""},
{"y": 7,"x": 28,"type": "Snow","notes": ""},{"y": 8,"x": 28,"type": "Snow","notes": ""},
{"y": 4,"x": 28,"type": "Snow","notes": ""},{"y": 6,"x": 30,"type": "Snow","notes": ""},
{"y": 7,"x": 30,"type": "Snow","notes": ""},{"y": 8,"x": 30,"type": "Snow","notes": ""},
{"y": 6,"x": 31,"type": "Snow","notes": ""},{"y": 6,"x": 32,"type": "Snow","notes": ""},
{"y": 6,"x": 33,"type": "Snow","notes": ""},{"y": 7,"x": 33,"type": "Snow","notes": ""},
{"y": 8,"x": 33,"type": "Snow","notes": ""},{"y": 6,"x": 35,"type": "Snow","notes": ""},
{"y": 7,"x": 35,"type": "Snow","notes": ""},{"y": 8,"x": 35,"type": "Snow","notes": ""},
{"y": 8,"x": 36,"type": "Snow","notes": ""},{"y": 8,"x": 37,"type": "Snow","notes": ""},
{"y": 8,"x": 38,"type": "Snow","notes": ""},{"y": 7,"x": 38,"type": "Snow","notes": ""},
{"y": 6,"x": 38,"type": "Snow","notes": ""},{"y": 6,"x": 37,"type": "Snow","notes": ""},
{"y": 6,"x": 36,"type": "Snow","notes": ""},{"y": 9,"x": 38,"type": "Snow","notes": ""},
{"y": 10,"x": 38,"type": "Snow","notes": ""},{"y": 10,"x": 37,"type": "Snow","notes": ""},
{"y": 10,"x": 36,"type": "Snow","notes": ""},{"y": 10,"x": 35,"type": "Snow","notes": ""}
],
"notes": [],
"units": [],
"fow": [ [ False for y in range(100) ] for x in range(100) ]
}
game_data["map_hashes"] = {}
for map in game_data["maps"].keys():
data = json.dumps(game_data["maps"][map], sort_keys=True).encode("utf-8")
map_hash = hashlib.md5(data).hexdigest()
data = json.dumps(game_data["maps"][map]["fow"], sort_keys=True).encode("utf-8")
fow_hash = hashlib.md5(data).hexdigest()
data = json.dumps(game_data["maps"][map]["units"], sort_keys=True).encode("utf-8")
unit_hash = hashlib.md5(data).hexdigest()
game_data["map_hashes"][map] = {
"map": map_hash,
"fow": fow_hash,
"units": unit_hash,
}
data = json.dumps(users, sort_keys=True).encode("utf-8")
hsh = hashlib.md5(data).hexdigest()
user_hash = hsh
data = json.dumps(game_data["chat"], sort_keys=True).encode("utf-8")
chat_hash = hashlib.md5(data).hexdigest()
game_data["global_hashes"] = {
"users": user_hash,
"chat": chat_hash
}
tmp = "%s%s%s%s" % (random.randint(0, 9), random.randint(0, 9), random.randint(0, 9), random.randint(0, 9))
authentication.gm_password = gm_passwd if gm_passwd else tmp
print("GM Password: %s" % authentication.gm_password)
tmp = "%s%s%s%s" % (random.randint(0, 9), random.randint(0, 9), random.randint(0, 9), random.randint(0, 9))
authentication.password = passwd if passwd else tmp
print("PC Password: %s" % authentication.password)
app.run(port=port, host=host, threaded=True, debug=False) #, ssl_context=("ssl.crt", "ssl.key"))
```
#### File: DungeonBuilder/terminal/initiative_tracker.py
```python
from interactive import InteractiveModule, TextDisplayModule
from viewport import Viewport
from text_box import TextBox
import random
import json
import curses
import log
log = log.logger
class InitiativeTracker(InteractiveModule, TextDisplayModule):
def __init__(self):
self._showing = False
self.init_units = []
def _handle(self, viewer, ch):
if ch == ord("I"):
vp = viewer.get_submodule(Viewport)
self.init_units = []
for unit in vp._units:
self.init_units.append({
"include": False,
"name": unit.name,
"id": unit.id,
"modifier": 0
})
text = json.dumps(self.init_units, indent=4)
import sys, tempfile, os
import subprocess
EDITOR = os.environ.get('EDITOR','vim')
with tempfile.NamedTemporaryFile(suffix=".md") as tf:
text = text.encode("UTF-8")
tf.write(text)
tf.flush()
subprocess.call([EDITOR, tf.name])
# do the parsing with `tf` using regular File operations.
# for instance:
tf.seek(0)
out_text = tf.read().decode("UTF-8")
# fix cursor after opening editor
curses.curs_set(1)
curses.curs_set(0)
self.init_units = json.loads(out_text)
viewer._draw(force=True) # force redraw after closing vim
included = []
for unit in self.init_units:
log.error(unit)
if unit["include"]:
included.append(unit)
log.error(included)
self.init_units = included
for unit in self.init_units:
unit["roll"] = unit["modifier"] + random.randint(0, 20)
self.init_units = sorted(self.init_units, key=lambda x: x["roll"], reverse=True)
viewer.apply_to_submodules(TextDisplayModule, lambda x: x._hide(viewer))
self._show(viewer)
elif ch == ord("i"):
viewer.apply_to_submodules(TextDisplayModule, lambda x: x._hide(viewer))
self._show(viewer)
def _handle_help(self, viewer, buf):
pass
def _handle_combo(self, viewer, buf):
split = buf.split(" ")
if split[0] == "init" and ( split[1] == "roll" or split[1] == "r" ):
for unit in self.init_units:
unit["roll"] = unit["modifier"] + random.randint(0, 20)
self.init_units = sorted(self.init_units, key=lambda x: x["roll"], reverse=True)
self._show(viewer)
def _show(self, viewer):
self._showing = True
tb = viewer.get_submodule(TextBox)
lines = [ [{
"text": "Initiative:",
"color": "Gold"
}] ]
for unit in self.init_units:
lines.append([ {
"text": "{0} - {1}".format(unit["roll"], unit["name"]),
"color": None
} ])
log.error(lines)
tb.set(lines)
def _hide(self, viewer):
self._showing = False
```
#### File: DungeonBuilder/terminal/text_box.py
```python
from interactive import VisibleModule, InteractiveModule, TextDisplayModule
from viewer import ViewerConstants
from state import State
import colors
import log
import math
import curses
log = log.logger
class TextBox(VisibleModule, InteractiveModule):
def __init__(self):
self.initial_draw_priority = -1
self.draw_priority = 10
self.x = 0
self.y = 0
self.h = ViewerConstants.max_y-2
self.w = math.floor(ViewerConstants.max_x/3)
self._screen = curses.newwin(self.h, self.w, self.y, self.x)
self._default_lines = [
[
{
"text": "Text Box:" ,
"color": "Gold"
}
],
[
{
"text": "ctrl + j",
"color": "Gold"
},
{
"text": " - scroll down",
"color": None
}
],
[
{
"text": "ctrl + k" ,
"color": "Gold"
},
{
"text": " - scroll up",
"color": None
}
],
[
{
"text": ":clear",
"color": "Gold"
},
{
"text": " - clear text box.",
"color": None
}
],
[
{
"text": ":read",
"color": "Gold"
},
{
"text": " - read text in window. GM only.",
"color": None
}
],
[
{
"text": "Narrative (GM Only):",
"color": "Gold"
}
],
[
{
"text": ":n list",
"color": "Gold"
},
{
"text": " - list chapters.",
"color": None
}
],
[
{
"text": ":n view <chapter number>",
"color": "Gold"
},
{
"text": " - view chapter.",
"color": None
}
],
[
{
"text": ":n edit <chapter number>",
"color": "Gold"
},
{
"text": " - edit chapter.",
"color": None
}
],
[
{
"text": ":n read <chapter number>",
"color": "Gold"
},
{
"text": ": - read chapter. requires espeak.",
"color": None
}
],
[
{
"text": "Chat:",
"color": "Gold"
}
],
[
{
"text": ":chat <message>",
"color": "Gold"
},
{
"text": " - send a message to all players",
"color": None
}
],
[
{
"text": ":whisper <username> <message>",
"color": "Gold"
},
{
"text": " - send a message to a specific player",
"color": None
}
]
]
self._lines = self._default_lines
self._previous_lines = []
self._page = 0
self._max_text_w = self.w - 2
self._max_text_h = self.h - 2
self._dirty = True
def draw(self, viewer, force=False):
if self._dirty or force:
if force: log.debug("narrative.draw forced")
self._screen.erase()
state = viewer.get_submodule(State)
self._screen.attrset(colors.get("Gold"))
if state.get_state("easter_egg") is not None:
self._screen.border(
curses.ACS_VLINE,
curses.ACS_VLINE,
curses.ACS_HLINE,
curses.ACS_HLINE,
curses.ACS_DIAMOND,
curses.ACS_DIAMOND,
curses.ACS_DIAMOND,
curses.ACS_DIAMOND
)
else:
self._screen.border(
curses.ACS_BOARD,
curses.ACS_BOARD,
curses.ACS_BOARD,
curses.ACS_BOARD,
curses.ACS_BOARD,
curses.ACS_BOARD,
curses.ACS_BOARD,
curses.ACS_BOARD
)
self._screen.attroff(colors.get("Gold"))
offset_width = self._max_text_w
line_no = 1
for line in self._lines:
char = 2
for part in line:
for text in part["text"]:
if char == offset_width:
char = 2
line_no += 1
if part["color"]:
self._screen.addstr(line_no, char, text, colors.get(part["color"]))
else:
self._screen.addstr(line_no, char, text)
char += len(text)
line_no += 1
self._paged_text = []
#for line in self._text.splitlines():
# splits = [ line[i:i+self._max_text_w] for i in range(0, len(line), self._max_text_w) ]
# self._paged_text = self._paged_text + (splits if splits else [""])
#x = 0
#page = 0
#for line in self._paged_text:
# if page >= self._page:
# self._screen.addstr(x+1, 2, line)
# x += 1
# if x > self._max_text_h-1:
# break
# page += 1
self._screen.noutrefresh()
self._dirty = False
return True
return False
def _handle(self, viewer, ch):
if curses.keyname(ch) == b'^J':
if self._page+self._max_text_h < len(self._paged_text):
self._page += 1
self._dirty = True
if curses.keyname(ch) == b'^K':
if (self._page - 1) >= 0:
self._page -= 1
self._dirty = True
def _handle_combo(self, viewer, buff):
buff = buff.split(" ")
if buff[0] == "back":
if self._previous:
self.set(self._previous_lines)
elif buff[0] == "clear":
viewer.apply_to_submodules(TextDisplayModule, lambda x: x._hide(viewer))
self.set(self._default_lines)
self._dirty = True
elif buff[0] == "read" and len(buff) == 1:
state = viewer.get_submodule(State)
if state.get_state("role") == "gm":
import subprocess
import os
text = self._paged_text
FNULL = open(os.devnull, 'w')
for line in text:
try: # lazily handle failure
subprocess.call(["espeak", line], stdout=FNULL, stderr=subprocess.STDOUT)
except:
pass
def _handle_help(self, viewer, buff):
pass
def set_text(self, text):
raise Exception()
def set(self, lines):
self._previous_lines = self._lines
self._lines = lines
self._page = 0
self._dirty = True
def get_text(self):
return self._text
lines = [
[ {"color": "Gold", "text": "this is a line of text"}]
]
```
|
{
"source": "jghibiki/Sydney",
"score": 2
}
|
#### File: Sydney/server/utils.py
```python
import json
import os
def load():
pipeline_defs = {}
with open("../config.json", "r") as f:
schema = json.load(f)
pipeline_defs["states"] = schema["states"]
pipeline_defs["failure_state"] = schema["failure_state"]
pipeline_defs["pending_state"] = schema["pending_state"]
pipeline_defs["success_state"] = schema["success_state"]
pipeline_defs["running_state"] = schema["running_state"]
pipeline_defs["root_hash"] = schema["root_hash"]
pipeline_defs["notifications"] = schema["notifications"]
pipeline_defs["environments"] = []
for env in schema["environments"]:
env = {"name": env, "pipelines": []}
for file_name in schema["pipelines"]:
file_name = os.path.join("../pipelines", file_name)
print(f"Attempting to load file: {file_name}")
if os.path.exists(file_name) and os.path.isfile(file_name):
print(f'File "{file_name}" exists')
with open(file_name, "r") as f:
pipeline = json.load(f)
for step in pipeline["steps"]:
step["state"] = step["initial_state"]
step["exit_state"] = None
env["pipelines"].append(pipeline)
pipeline_defs["environments"].append(env)
# make child piplines aware of their parents
for env in pipeline_defs["environments"]:
for pipeline in env["pipelines"]:
for step in pipeline["steps"]:
if "info" in step:
if "child_pipeline" in step["info"]:
for potential_child in env["pipelines"]:
if (
"#" + potential_child["name"]
== step["info"]["child_pipeline"]
):
potential_child["parent"] = "#" + pipeline["name"]
break
print("Loaded data", pipeline_defs)
return pipeline_defs
```
#### File: sydney/schema/config_loader.py
```python
from pathlib import Path
from sydney.schema.config_schemas import config_schema, pipeline_schema
class ConfigLoader:
def __init__(self, config, pipelines):
self.config_path = Path(config)
self.pipelines_path = Path(pipelines)
def read_config(self):
with self.config_path.open() as f:
config_str = f.read()
return config_schema.loads(config_str)
def read_pipelines(self):
for pipeline in self.pipelines_path.glob("**/*.json"):
print(f"Loading pipeline file {pipeline}")
with pipeline.open() as f:
pipeline_str = f.read()
yield pipeline_schema.loads(pipeline_str)
```
|
{
"source": "jghosn/bert_reranker",
"score": 2
}
|
#### File: bert_reranker/data/predict.py
```python
import json
import logging
import math
import pickle
import numpy as np
from tqdm import tqdm
from bert_reranker.data.data_loader import (
get_passages_by_source,
_encode_passages,
get_passage_last_header, get_question, get_passage_id, is_in_distribution, OOD_STRING,
get_passage_content2pid, )
logger = logging.getLogger(__name__)
def get_batched_pairs(qa_pairs, batch_size):
result = []
for i in range(0, len(qa_pairs), batch_size):
result.append(qa_pairs[i: i + batch_size])
return result
class Predictor:
"""
Main class to generate prediction. It consider only in-domain part.
(so, there is no model to decide if something is in-domain or out of domain)
"""
def __init__(self, retriever_trainee):
self.retriever_trainee = retriever_trainee
self.max_question_len = self.retriever_trainee.retriever.max_question_len
self.tokenizer = self.retriever_trainee.retriever.tokenizer
self.retriever = retriever_trainee.retriever
self.no_candidate_warnings = 0
def generate_predictions(self, json_file, predict_to, multiple_thresholds, write_fix_report):
with open(json_file, "r", encoding="utf-8") as f:
json_data = json.load(f)
source2passages, _, passage_id2index = get_passages_by_source(
json_data
)
source2passages, _, _ = _encode_passages(
source2passages,
self.max_question_len,
self.tokenizer,
do_not_encode=True
)
res = self.compute_results(json_data, passage_id2index, source2passages)
predictions, questions, sources, normalized_scores, indices_of_correct_passage = res
generate_and_log_results(indices_of_correct_passage, normalized_scores, predict_to,
predictions, questions, source2passages, sources,
multiple_thresholds=multiple_thresholds,
write_fix_report=write_fix_report,
json_data=json_data)
def compute_results(self, json_data, passage_id2index, source2passages):
predictions = []
questions = []
sources = []
normalized_scores = []
indices_of_correct_passage = []
# first collect and embed all the candidates (so we avoid recomputing them again and again
source2embedded_passages = {}
for source, passages in source2passages.items():
logger.info('encoding source {}'.format(source))
if passages:
embedded_passages = self.retriever.embed_paragrphs(passages,
progressbar=True)
source2embedded_passages[source] = embedded_passages
else:
source2embedded_passages[source] = None
# then loop over the examples and produce a prediction
for example in tqdm(json_data["examples"]):
question = example["question"]
questions.append(question)
source = example["source"]
sources.append(source)
index_of_correct_passage = passage_id2index[example["passage_id"]]
prediction, norm_score = self.make_single_prediction(question, source,
source2embedded_passages)
predictions.append(prediction)
normalized_scores.append(norm_score)
indices_of_correct_passage.append(index_of_correct_passage)
return predictions, questions, sources, normalized_scores, indices_of_correct_passage
def make_single_prediction(self, question, source, source2embedded_passages,
question_already_embedded=False):
if source in source2embedded_passages and source2embedded_passages[source] is not None:
embedded_candidates = source2embedded_passages[source]
return self.retriever.predict(question, embedded_candidates,
passages_already_embedded=True,
question_already_embedded=question_already_embedded)
else:
self.no_candidate_warnings += 1
logger.warning('no candidates for source {} - returning 0 by default (so far, this '
'happened {} times)'.format(source, self.no_candidate_warnings))
return -2, 1.0
class PredictorWithOutlierDetector(Predictor):
"""
Generates predictionand it include also the model used to detect outliers.
"""
def __init__(self, retriever_trainee, outlier_detector_model):
super(PredictorWithOutlierDetector, self).__init__(retriever_trainee)
self.outlier_detector_model = outlier_detector_model
def make_single_prediction(self, question, source, source2embedded_passages):
emb_question = self.retriever.embed_question(question)
in_domain = self.outlier_detector_model.predict(emb_question)
in_domain = np.squeeze(in_domain)
if in_domain == 1: # in-domain
return super(PredictorWithOutlierDetector, self).make_single_prediction(
emb_question, source, source2embedded_passages, question_already_embedded=True)
else: # out-of-domain (-1 is the result we return for out-of-domain)
return -1, 1.0
def make_readable(passages):
result = []
for passage in passages:
new_entry = {'passage_id': passage['passage_id'], 'source': passage['source'],
'reference_type': passage['reference_type'],
'section_headers': passage['reference']['section_headers']}
result.append(new_entry)
return result
def generate_and_log_results(indices_of_correct_passage, normalized_scores, predict_to,
predictions, questions, source2passages, sources,
multiple_thresholds, write_fix_report, json_data):
with open(predict_to, "w") as out_stream:
if write_fix_report:
fix_json = {'passages': make_readable(json_data['passages']), 'fixes': []}
passage_content2pid = get_passage_content2pid(json_data['passages'])
log_results_to_file(indices_of_correct_passage, normalized_scores, out_stream,
predictions, questions, source2passages, sources, fix_json,
passage_content2pid)
with open(predict_to + '_fix.json', 'w', encoding="utf-8") as ostream:
json.dump(fix_json, ostream, indent=4, ensure_ascii=False)
else:
log_results_to_file(indices_of_correct_passage, normalized_scores, out_stream,
predictions, questions, source2passages, sources, None, None)
out_stream.write('results:\n\n')
if multiple_thresholds:
for threshold in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
result_message = compute_result_at_threshold(
predictions, indices_of_correct_passage, normalized_scores, threshold, True
)
logger.info(result_message)
out_stream.write(result_message + "\n")
else:
result_message = compute_result_at_threshold(
predictions, indices_of_correct_passage, normalized_scores, 0.0, False
)
logger.info(result_message)
out_stream.write(result_message + "\n")
def log_results_to_file(indices_of_correct_passage, normalized_scores, out_stream,
predictions, questions, source2passages, sources, fix_json=None,
passage_content2pid=None):
for i in range(len(predictions)):
question = questions[i]
prediction = predictions[i]
index_of_correct_passage = indices_of_correct_passage[i]
norm_score = normalized_scores[i]
source = sources[i]
out_stream.write("-------------------------\n")
out_stream.write("question:\n\t{}\n".format(question))
if prediction == index_of_correct_passage and prediction == -1:
pred_outcome = "OOD_CORRECT"
elif prediction == index_of_correct_passage and prediction >= 0:
pred_outcome = "ID_CORRECT"
elif prediction != index_of_correct_passage and index_of_correct_passage == -1:
pred_outcome = "OOD_MISCLASSIFIED_AS_ID"
elif prediction == -1 and index_of_correct_passage >= 0:
pred_outcome = "ID_MISCLASSIFIED_AS_OOD"
elif (prediction >= 0 and index_of_correct_passage >= 0 and
prediction != index_of_correct_passage):
pred_outcome = "ID_MISCLASSIFIED_AS_ANOTHER_ID"
else:
raise ValueError('wrong prediction/target combination')
prediction_content = source2passages[source][prediction] if prediction >= 0 else OOD_STRING
out_stream.write(
"prediction: {} / norm score {:3.3}\nprediction content:"
"\n\t{}\n".format(
pred_outcome,
norm_score,
prediction_content
)
)
target_content = source2passages[source][
index_of_correct_passage] if index_of_correct_passage >= 0 else OOD_STRING
out_stream.write(
"target content:\n\t{}\n\n".format(
target_content
)
)
if fix_json is not None:
new_entry = {}
new_entry['source'] = source
new_entry['question'] = question
target_pid = passage_content2pid[source][target_content]
new_entry['target'] = (target_pid, target_content)
prediction_pid = passage_content2pid[source][prediction_content]
new_entry['prediction'] = (prediction_pid, prediction_content)
new_entry['fix'] = target_pid
fix_json['fixes'].append(new_entry)
def generate_embeddings(ret_trainee, input_file=None, out_file=None, json_data=None,
embed_passages=True):
if input_file:
with open(input_file, "r", encoding="utf-8") as f:
json_data = json.load(f)
elif json_data:
pass
else:
raise ValueError("You should specify either the input file or the json_data")
source2passages, pid2passage, _ = get_passages_by_source(json_data)
question_embs = []
question_texts = []
labels = []
if json_data.get("examples"):
for example in tqdm(json_data["examples"]):
pid = get_passage_id(example)
passage = pid2passage[pid]
labels.append('id' if is_in_distribution(passage) else 'ood')
question = get_question(example)
emb = ret_trainee.retriever.embed_question(question)
question_embs.append(emb)
question_texts.append(question)
passage_header_embs = []
ood = 0
passage_texts = []
if embed_passages:
for source, passages in source2passages.items():
logger.info('embedding passages for source {}'.format(source))
for passage in tqdm(passages):
if is_in_distribution(passage):
passage_text = get_passage_last_header(passage, return_error_for_ood=True)
emb = ret_trainee.retriever.embed_paragraph(
passage_text)
passage_header_embs.append(emb)
passage_texts.append(passage_text)
else:
ood += 1
to_serialize = {"question_embs": question_embs, "passage_header_embs": passage_header_embs,
"question_labels": labels, "passage_texts": passage_texts,
"question_texts": question_texts}
if out_file:
with open(out_file, "wb") as out_stream:
pickle.dump(to_serialize, out_stream)
logger.info(
'generated {} question embeddings and {} passage header embeddings ({} skipped because '
'out-of-distribution)'.format(
len(question_embs), len(passage_header_embs), ood))
return to_serialize
def compute_result_at_threshold(
predictions, indices_of_correct_passage, normalized_scores, threshold, log_threshold
):
count = len(indices_of_correct_passage)
ood_count = sum([x == -1 for x in indices_of_correct_passage])
id_count = count - ood_count
correct = 0
id_correct = 0
ood_correct = 0
ood_misclassified_as_id = 0
id_misclassified_as_ood = 0
id_misclassified_as_id = 0
for i, prediction in enumerate(predictions):
if normalized_scores[i] >= threshold:
after_threshold_pred = prediction
# id_correct += int(after_threshold_pred == indices_of_correct_passage[i])
else:
after_threshold_pred = -1
# ood_correct += int(after_threshold_pred == indices_of_correct_passage[i])
correct += int(after_threshold_pred == indices_of_correct_passage[i])
if indices_of_correct_passage[i] != -1 and after_threshold_pred == -1:
# target id - prediction ood
id_misclassified_as_ood += 1
elif (indices_of_correct_passage[i] != -1 and
indices_of_correct_passage[i] != after_threshold_pred):
# target id - prediction id but wrong
id_misclassified_as_id += 1
elif (indices_of_correct_passage[i] != -1 and
indices_of_correct_passage[i] == after_threshold_pred):
# target id - prediction id and correct
id_correct += 1
elif indices_of_correct_passage[i] == -1 and after_threshold_pred == -1:
# target ood - prediction ood
ood_correct += 1
elif indices_of_correct_passage[i] == -1 and after_threshold_pred != -1:
# target ood - prediction id
ood_misclassified_as_id += 1
else:
raise ValueError()
acc = ((correct / count) * 100) if count > 0 else math.nan
id_acc = ((id_correct / id_count) * 100) if id_count > 0 else math.nan
ood_acc = ((ood_correct / ood_count) * 100) if ood_count > 0 else math.nan
threshold_msg = "threshold {:1.3f}: ".format(threshold) if log_threshold else ""
result_message = "\n{}overall: {:3}/{}={:3.2f}% acc".format(threshold_msg, correct, count, acc)
result_message += "\n\tin-distribution: {:3}/{}={:3.2f}% acc".format(id_correct, id_count,
id_acc)
result_message += "\n\t\twrong because marked ood: {:3}/{}={:3.2f}% err".format(
id_misclassified_as_ood, id_count,
((id_misclassified_as_ood / id_count) * 100) if id_count > 0 else math.nan)
result_message += "\n\t\tmarked id but wrong candidate: {:3}/{}={:3.2f}% err".format(
id_misclassified_as_id, id_count,
((id_misclassified_as_id / id_count) * 100) if id_count > 0 else math.nan)
result_message += "\n\tout-of-distribution: {:3}/{}={:3.2f}% acc".format(
ood_correct, ood_count, ood_acc)
result_message += "\n\t------\n\t(OOD/ID classifier): correct {:3}(OOD) " \
"+ {:3}(ID) / {:3} = {:3.2f}% acc".format(
ood_correct, id_count - id_misclassified_as_ood, count,
100 * ((ood_correct + (id_count - id_misclassified_as_ood)) / count))
return result_message
```
#### File: bert_reranker/models/sklearn_grid_search.py
```python
import argparse
import logging
import os
import pickle
import numpy as np
from sklearn.neighbors import LocalOutlierFactor
from sklearn.ensemble import IsolationForest
from sklearn.svm import OneClassSVM
from sklearn.covariance import EllipticEnvelope
from sklearn.model_selection import GridSearchCV
from bert_reranker.models.sklearn_outliers_model import collect_question_embeddings
logger = logging.getLogger(__name__)
SKLEARN_MODEL_FILE_NAME = "sklearn_outlier_model.pkl"
def get_model_and_params(model_name):
if model_name == "lof":
base_clf = LocalOutlierFactor()
parameters = {
"n_neighbors": [3, 4, 5, 6],
"contamination": list(np.arange(0.1, 0.5, 0.05)),
"novelty": [True],
}
elif model_name == "isolation_forest":
base_clf = IsolationForest()
parameters = {
"max_samples": [10, 50, 100, 200, 313],
"n_estimators": [100, 150, 200],
"contamination": list(np.arange(0.1, 0.5, 0.1)),
"max_features": [1, 2, 5],
"random_state": [42],
}
elif model_name == "ocsvm":
base_clf = OneClassSVM()
parameters = {
"kernel": ["linear", "poly", "rbf"],
"gamma": [0.001, 0.005, 0.01, 0.1]
}
elif model_name == "elliptic_env":
base_clf = EllipticEnvelope()
parameters = {
"contamination": list(np.arange(0.1, 0.5, 0.1)),
"random_state": [42],
}
else:
raise NotImplementedError()
return base_clf, parameters
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--embeddings", help="numpy file with embeddings", required=True
)
parser.add_argument(
"--output", help="will store the model output in this folder", required=True
)
parser.add_argument(
"--test-embeddings",
help="embeddings do evaluate the sklearn model on",
required=True,
)
parser.add_argument(
"--keep-ood-for-questions",
help="will keep ood embeddings for questions- by default, they are "
"filtered out",
action="store_true",
)
parser.add_argument(
"--train-on-questions",
help="will include question embeddings in train",
action="store_true",
)
parser.add_argument(
"--train-on-passage-headers",
help="will include passage-headers in train",
action="store_true",
)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
with open(args.embeddings, "rb") as in_stream:
data = pickle.load(in_stream)
if args.train_on_questions:
embeddings = collect_question_embeddings(args, data)
elif args.train_on_passage_headers:
embeddings = data["passage_header_embs"]
logger.info("found {} passage headers embs".format(len(embeddings)))
# labels = np.ones(len(embeddings))
logger.info("final size of the collected embeddings: {}".format(len(embeddings)))
embedding_array = np.concatenate(embeddings)
def scoring(estimator, X, y=None, args=args):
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
# Load testing embeddings
with open(args.test_embeddings, "rb") as in_stream:
data = pickle.load(in_stream)
question_embeddings = np.concatenate(data["question_embs"])
labels = [1 if label == "id" else -1 for label in data["question_labels"]]
preds = estimator.predict(question_embeddings)
acc = accuracy_score(labels, preds)
conf_mat = confusion_matrix(labels, preds)
print("estimator params", estimator)
print("Accuracy:", acc)
print(conf_mat)
print("="*50)
return acc
models = ["lof", "isolation_forest", "ocsvm", "elliptic_env"]
best_score = 0
for model in models:
logger.info("Testing model: {}".format(model))
base_clf, parameters = get_model_and_params(model)
cv = [(slice(None), slice(None))] # Hack to disable CV
clf = GridSearchCV(base_clf, parameters, scoring=scoring, cv=cv)
clf.fit(embedding_array)
logger.info("best model accuracy: {}".format(clf.best_score_))
logger.info("best model parameters: {}".format(clf.best_params_))
if clf.best_score_ > best_score:
best_score = clf.best_score_
best_params = clf.best_params_
best_model_name = model
logger.info(
"New overall best model found with accuracy: {}".format(clf.best_score_)
)
logger.info("Best model name: {}".format(best_model_name))
logger.info("Best model params: {}".format(best_params))
with open(
os.path.join(args.output, SKLEARN_MODEL_FILE_NAME), "wb"
) as out_stream:
pickle.dump(clf.best_estimator_, out_stream)
if __name__ == "__main__":
main()
```
|
{
"source": "jghsrobotics/RaspberryPiCam",
"score": 3
}
|
#### File: jghsrobotics/RaspberryPiCam/sender.py
```python
import os
import sys
import socket
from PythonFileLibrary.FileReader import *
## Read from configuration file first
configFile = FileReader('config')
def GetSetting(fileReader : FileReader, setting : str):
fileReader.ResetCursor()
for line in configFile:
line = line.strip()
if setting in line:
fileReader.MoveCursorDown()
line = fileReader.GetCurrentLine().strip()
return line
return None
def getPrivateIP():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(("10.10.0.0", 1))
return s.getsockname()[0]
except:
pass
return None
localIP = getPrivateIP()
remoteIP = GetSetting(configFile, '# Remote Public IP Address')
port = GetSetting(configFile, '# Port')
protocol = GetSetting(configFile, '# Protocol (UDP / TCP)')
height = GetSetting(configFile, '# Height')
width = GetSetting(configFile, '# Width')
FPS = GetSetting(configFile, '# FPS')
bitrate = GetSetting(configFile, '# Bitrate')
quality = GetSetting(configFile, '# Quality (10 - 40)')
UDPcommand = "raspivid -n -t 0 -w {0} -h {1} -qp {6} -fps {2} --flush -b {3} -o - | gst-launch-1.0 -e -vvvv fdsrc ! h264parse ! rtph264pay config-interval=1 pt=96 ! udpsink host={4} port={5}"
TCPcommand = "raspivid -n -t 0 -w {0} -h {1} -qp {6} -fps {2} --flush -b {3} -o - | gst-launch-1.0 -v fdsrc ! h264parse ! rtph264pay config-interval=1 pt=96 ! gdppay ! tcpserversink host={4} port={5}"
if protocol == "TCP":
print("Trying to send through TCP pipe...")
os.system(TCPcommand.format(width, height, FPS, bitrate, localIP, port, quality))
elif protocol == "UDP":
print("Trying to send through UDP pipe...")
os.system(UDPcommand.format(width, height, FPS, bitrate, remoteIP, port, quality))
```
|
{
"source": "jghyllebert/ChatterBot",
"score": 2
}
|
#### File: ChatterBot/chatterbot/__main__.py
```python
import importlib
import sys
def get_chatterbot_version():
chatterbot = importlib.import_module('chatterbot')
return chatterbot.__version__
if __name__ == '__main__':
if '--version' in sys.argv:
print(get_chatterbot_version())
```
|
{
"source": "jgiannuzzi-org/publish-unit-test-result-action",
"score": 2
}
|
#### File: python/test/test_action_yml.py
```python
import unittest
import yaml
from yaml import Loader
class TestActionYml(unittest.TestCase):
def test_composite_action(self):
with open('../../action.yml', encoding='utf-8') as r:
dockerfile_action = yaml.load(r, Loader=Loader)
with open('../../composite/action.yml', encoding='utf-8') as r:
composite_action = yaml.load(r, Loader=Loader)
self.assertIn('runs', dockerfile_action)
self.assertIn('runs', composite_action)
dockerfile_action_wo_runs = {k:v for k,v in dockerfile_action.items() if k != 'runs'}
composite_action_wo_runs = {k:v for k,v in composite_action.items() if k != 'runs'}
self.assertEqual(dockerfile_action_wo_runs, composite_action_wo_runs)
self.assertIn(('using', 'composite'), composite_action.get('runs', {}).items())
def test_composite_inputs(self):
with open('../../composite/action.yml', encoding='utf-8') as r:
action = yaml.load(r, Loader=Loader)
# these are not documented in the action.yml files but still needs to be forwarded
extra_inputs = ['root_log_level', 'log_level']
expected = {key.upper(): f'${{{{ inputs.{key} }}}}' for key in list(action.get('inputs', {}).keys()) + extra_inputs}
steps = action.get('runs', {}).get('steps', [])
step = next((step for step in steps if step.get('name') == 'Publish Unit Test Results'), {})
inputs = {key.upper(): value for key, value in step.get('env', {}).items()}
self.assertEqual(expected, inputs)
```
|
{
"source": "jgibbons-cp/quarantine_ticketing",
"score": 3
}
|
#### File: app/lib/jira_lib.py
```python
from config import CONFIG
import requests
class JiraApi(object):
def __init__(self):
self.config = CONFIG['jira']
self.auth = (self.config['user'], self.config['<PASSWORD>'])
self.headers = {'Content-Type' : 'application/json'}
def url(self, endpoint):
return self.config['url'] + endpoint
def get(self, endpoint):
resp = requests.get(self.url(endpoint), auth=(self.auth), headers=self.headers)
if resp.status_code != 200:
print('Status:', resp.status_code, 'Problem with the request. Exiting.')
exit()
return resp.json()
def post(self, endpoint, data):
resp = requests.post(self.url(endpoint), auth=(self.auth), headers=self.headers, data=data)
if resp.status_code != 201:
print('Status:', resp.status_code, 'Problem with the post request. Exiting')
exit()
return resp.json()
```
#### File: app/quarantine/halo_general.py
```python
import cloudpassage
import sys
class HaloGeneral(object):
"""This class wraps Halo API functionality, except for /v1/events."""
def __init__(self, config):
"""Pass in a quarantine.ConfigHelper object on instantiation."""
self.ua = config.ua_string
self.session = cloudpassage.HaloSession(config.halo_key,
config.halo_secret,
integration_string=self.ua)
self.server_obj = cloudpassage.Server(self.session)
self.event_obj = cloudpassage.Event(self.session)
self.group_obj = cloudpassage.ServerGroup(self.session)
self.target_group_id = self.get_groupid(config.quarantine_grp_name)
return
def get_groupid(self, group_name):
"""determine group_id for a named group"""
for group in self.group_obj.list_all():
if group["name"] == group_name:
target_id = group["id"]
return target_id
# Fall through if no match
print "ERROR: No group in account named %s" % group_name
sys.exit(2)
def quarantine_workload(self, agent_id):
"""Move a workload, identified by agent_id, into quarantine"""
self.server_obj.assign_group(agent_id, self.target_group_id)
return
```
|
{
"source": "jgiboney/python-forensic-toolkit",
"score": 4
}
|
#### File: python-forensic-toolkit/python_forensic_toolkit/file_hashing.py
```python
import hashlib, os
def get_hash_of_string (string, algorithm='MD5'):
"""This function takes a string and hashes it using an algorithm.
Acknowledgments:
Addition of multiple hashing algorithms courtesy of <NAME>, <NAME>,
<NAME>, <NAME>, and <NAME>.
Dictionary and associated functionality is courtesy of Jordan Polun.
Documentation is courtesy of <NAME> and Jordan Polun.
:param string: this is the string that will be hashed.
:type string: str.
:param algorithm: The hashing algorithm to be used. Defaults to 'MD5'.
:type algorithm: str.
:returns: str -- The hash of the string.
"""
hashing_algorithms = {'MD5' :hashlib.md5(),
'SHA1' :hashlib.sha1(),
'SHA224':hashlib.sha224(),
'SHA256':hashlib.sha256(),
'SHA384':hashlib.sha384(),
'SHA512':hashlib.sha512()
}
if algorithm in hashing_algorithms:
binary_string = convert_string_to_binary(string)
hash_object = hashing_algorithms[algorithm]
hash_object.update(binary_string)
hash_digest = hash_object.hexdigest()
else:
hash_digest = ''
return hash_digest
def read_binary_file (file_path):
"""This function will read and hash the contents of a file.
Acknowledgments:
Documentation is courtesy of <NAME> and Jordan Polun.
Thanks to <NAME> for fixing the return error.
:param file_path: this is the path to the file to be read.
:type file_path: str.
:returns: str -- the content of the file to be read
"""
try:
file_object = open(file_path,'rb')
file_contents = file_object.read()
file_object.close()
return file_contents
except:
raise
def get_hash_of_binary_file_contents (file_path, algorithm='MD5'):
"""This function will read and hash the contents of a file.
:param file_path: The path to the file to be hashed.
:type file_path: str.
:param algorithm: The hashing algorithm to be used. Defaults to 'MD5'.
:type algorithm: str.
:returns: str -- The hash of the contents of the file.
"""
file_contents = read_binary_file(file_path)
file_hash = get_hash_of_string(file_contents, algorithm)
return file_hash
def get_hashes_of_files_in_directory (directory, algorithm='MD5'):
"""This function will get the hashes of all files in a directory.
Acknowledgments:
This function is courtesy of <NAME>, <NAME>, <NAME>,
and <NAME>.
"""
hash_dictionary = {}
if os.path.isdir(directory):
for file_name in os.listdir(directory):
file_path = os.path.join(directory, file_name)
if os.path.isfile(file_path):
file_hash = get_hash_of_binary_file_contents(file_path)
hash_dictionary[file_path] = file_hash
return hash_dictionary
def convert_string_to_binary (string):
"""This function attempts to convert a string to binary.
Acknowledgments:
Thanks to <NAME> for finding an error in the
get_hash_of_string function and providing a solution.
Code based an answer from Elizafox on Stack Overflow:
http://stackoverflow.com/questions/34869889/what-is-the-proper-way-to-determine-if-an-object-is-a-bytes-like-object-in-pytho
"""
try:
string = string.encode()
except:
pass
return string
def do_files_have_same_content (file_1, file_2, algorithm='MD5'):
"""This function checks the hashes of the contents of two files. It returns
a boolean.
Acknowledgments:
Thanks to <NAME> and <NAME> for working on this function.
"""
file_1_hash = get_hash_of_binary_file_contents (file_1)
file_2_hash = get_hash_of_binary_file_contents (file_2)
if file_1_hash == file_2_hash:
return True
else:
return False
```
|
{
"source": "jgibson2/contrastive-learning",
"score": 2
}
|
#### File: jgibson2/contrastive-learning/get_nearest_neighbors.py
```python
import pytorch_lightning as pl
import torch
import torchvision
from torch.utils.data import DataLoader
import pathlib
import networks.resnet as resnet
import torchvision.io as io
import torchvision.transforms.functional as AF
import torch.nn.functional as F
from networks.simclr import SimCLR
from networks.nnclr import NNCLR
from data.iNaturalist_dataset import iNaturalistDataset
import click
import tqdm
def read_image(img_path, image_size=(224, 224)):
img = io.read_image(str(img_path.absolute()))
img = AF.convert_image_dtype(img, torch.float)
# take a center crop of the correct aspect ratio and resize
crop_size = [img.shape[1], int(round(img.shape[1] * image_size[0] / image_size[1]))]
img = AF.center_crop(img, crop_size)
img = AF.resize(img, image_size)
return img
@click.command()
@click.option('--metadata-file-path', type=click.Path(),
help='Path of JSON metadata file')
@click.option('--query-image-path', type=click.Path(),
help='Path of query image file')
@click.option('--batch-size', type=click.INT, default=192, help="Batch size")
@click.option('--image-size', type=click.INT, default=224, help="Image size (one side)")
@click.option('--method', type=click.Choice(["simclr", "nnclr"]), required=True, help="Method used to train encoder")
@click.option('--encoder-weights', type=click.Path(), required=True, help="Encoder checkpoint to use")
@click.option('--top-k', type=click.INT, default=31, help="Number of results to return")
@click.option('--output-dir', type=click.Path(), default="./results", help="Path of output dir")
def main(
metadata_file_path,
query_image_path,
batch_size,
image_size,
method,
encoder_weights,
top_k,
output_dir
):
pl.seed_everything(42)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
if method == "simclr":
model = SimCLR.load_from_checkpoint(encoder_weights)
elif method == "nnclr":
model = NNCLR.load_from_checkpoint(encoder_weights)
else:
raise ValueError(f"Unknown method {method}")
model.to(device)
model.eval()
query_image_path = pathlib.Path(query_image_path)
output_dir = pathlib.Path(output_dir)
output_dir.mkdir(exist_ok=True)
query_img = read_image(query_image_path, image_size=(image_size, image_size)).to(device)
dataset = iNaturalistDataset(metadata_file_path, (image_size, image_size))
loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=16,
pin_memory=True)
x1 = model(query_img.unsqueeze(0)).squeeze().detach()
closest_imgs = torch.rand((top_k, *query_img.shape), device=device)
closest_embeddings = torch.randn((top_k, *x1.shape), device=device)
for imgs, _ in tqdm.tqdm(loader):
imgs = imgs.to(device)
x2 = torch.cat((closest_embeddings, model(imgs).detach()), 0)
imgs = torch.cat((closest_imgs, imgs), 0)
dists = F.pairwise_distance(x2, x1.expand(x2.shape[0], *x1.shape))
topk_indices = torch.topk(dists, top_k, largest=False, sorted=True).indices
closest_imgs = imgs[topk_indices]
closest_embeddings = x2[topk_indices]
img_path = output_dir / f"{query_image_path.stem}_closest_{top_k}.jpg"
torchvision.utils.save_image(torch.cat((query_img.unsqueeze(0), closest_imgs), 0), str(img_path.absolute()))
if __name__ == "__main__":
main()
```
#### File: jgibson2/contrastive-learning/train.py
```python
import torch
import pytorch_lightning as pl
from pytorch_lightning import loggers as pl_loggers
from torch.utils.data import DataLoader
from networks.simclr import SimCLR
from networks.nnclr import NNCLR
import networks.resnet as resnet
from data.iNaturalist_dataset import iNaturalistDataset
import click
@click.command()
@click.option('--train-metadata-file-path', type=click.Path(), required=True,
help='Path of training JSON metadata file')
@click.option('--validation-metadata-file-path', type=click.Path(), required=False,
help='Path of validation JSON metadata file')
@click.option('--batch-size', type=click.INT, default=192, help="Batch size")
@click.option('--gpus', type=click.INT, default=1, help="Number of GPUs to use")
@click.option('--image-size', type=click.INT, default=224, help="Image size (one side)")
@click.option('--epochs', type=click.INT, default=100, help="Number of epochs to train for")
@click.option('--method', type=click.Choice(["simclr", "nnclr"]), default="simclr", help="Method with which to train")
@click.option('--encoder', type=click.Choice(
["reset18", "resnet34", "resnet50", "resnet101", "resnet152"]), default="resnet50", help="Encoder to use")
@click.option('--encoder-weights', type=click.Path(), required=False, help="Encoder weights, omit for random init")
@click.option('--encoder-key', type=click.STRING, required=False, help="Key of encoder weights in state dict")
def main(
train_metadata_file_path,
validation_metadata_file_path,
batch_size,
gpus,
image_size,
epochs,
method,
encoder,
encoder_weights,
encoder_key,
):
pl.seed_everything(42)
tb_logger = pl_loggers.TensorBoardLogger("logs/")
trainer = pl.Trainer(
logger=tb_logger,
gpus=gpus,
precision=16 if gpus > 0 else 32,
min_epochs=epochs,
max_epochs=epochs
)
train_dataset = iNaturalistDataset(train_metadata_file_path, (image_size, image_size))
train_loaders = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=16)
val_loaders = None
if validation_metadata_file_path:
val_dataset = iNaturalistDataset(validation_metadata_file_path, (image_size, image_size))
val_loaders = DataLoader(
val_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=16)
base_encoder = getattr(resnet, encoder)(pretrained=False)
if encoder_weights is not None:
state_dict = torch.load(encoder_weights)["state_dict"]
if encoder_key:
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(state_dict, prefix=encoder_key)
base_encoder.load_state_dict(state_dict, strict=False)
model = None
if method == "simclr":
model = SimCLR(base_encoder.fc.in_features, 128, (image_size, image_size), encoder=base_encoder)
elif method == "nnclr":
model = NNCLR(base_encoder.fc.in_features, 128, (image_size, image_size), 32768, encoder=base_encoder)
else:
raise ValueError(f"Unknown method {method}")
# explicit device for setup step
device = torch.device('cuda:0') if gpus > 0 else torch.device('cpu')
model.to(device)
trainer.fit(model, train_dataloaders=train_loaders, val_dataloaders=val_loaders)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
main()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
```
|
{
"source": "jgibson2/pytorch-ngp",
"score": 2
}
|
#### File: jgibson2/pytorch-ngp/ngp.py
```python
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import torchvision
import utils
from matplotlib import pyplot as plt
import tqdm
import numpy as np
import einops
class NeuralGraphicsPrimitiveModel(pl.LightningModule):
def __init__(self, mlp, dimension=2, levels=16, max_entries_per_level=2 ** 16, feature_dim=2, min_resolution=16,
max_resolution=16384, **kwargs):
super().__init__(**kwargs)
self.mlp = mlp
self.d = dimension
self.L = levels
self.T = max_entries_per_level
self.F = feature_dim
self.N_min = min_resolution
self.N_max = max_resolution
b = np.exp((np.log(self.N_max) - np.log(self.N_min)) / (self.L - 1))
N_l = torch.Tensor([np.floor(self.N_min * (b ** l)) for l in range(self.L)]).detach()
self.register_buffer("N_l", N_l)
self.create_hashmap_parameters()
self.ema = utils.ExponentialMovingAverage(self.parameters(), decay=0.95)
def create_hashmap_parameters(self) -> None:
feat_init_min = -10 ** -4
feat_init_max = 10 ** -4
shape = (self.L, self.T, self.F)
feats = ((feat_init_max - feat_init_min) * torch.rand(shape, device=self.device)) + feat_init_min
self.register_parameter("features", torch.nn.Parameter(feats))
def get_hypercube_vertices(self, low_coords_bld, high_coords_bld):
b, l, d = low_coords_bld.shape
coords_bl2d = torch.stack((low_coords_bld, high_coords_bld), dim=2)
indices_blDd = einops.repeat(torch.cartesian_prod(*([torch.arange(2, device=self.device)] * d)),
"v d -> b l v d",
b=b, l=l)
vertices_blDd = torch.gather(coords_bl2d, 2, indices_blDd)
return vertices_blDd
def interpolate(self, scaled_coordinates_bld, feats_blDf, vertices_blDd, smooth=False):
"""
:param scaled_coordinates_bld:
:param feats_blDf:
:param vertices_blDd:
:return:
"""
b, l, d = scaled_coordinates_bld.shape
# get cube side length, when the coordinates are scaled all voxels
# have side length one
side_lengths_bl1d = torch.ones((b, l, 1, d), device=self.device)
# n-linear interpolation can be taken as the vertex's value times the volume of the
# n-dimensional volume with corners defined by the *opposite* vertex and the point in the interior
residuals_blDd = torch.clamp(
side_lengths_bl1d - torch.abs(vertices_blDd - scaled_coordinates_bld.view(b, l, 1, d)),
min=0.0001,
max=0.9999
)
# the volume is obviously the reduction along that dimension via multiplication
weights_blD1 = einops.reduce(residuals_blDd,
"b l D d -> b l D 1",
"prod")
if smooth:
weights_blD1 = (weights_blD1 ** 2) * (3.0 - (2.0 * weights_blD1))
# multiply each vertex value by the weights and sum along the vertices
interpolated_feats_blf = einops.reduce(feats_blDf * weights_blD1,
"b l D f -> b l f",
"sum")
return interpolated_feats_blf
def get_interpolated_features(self, coords, smooth=False):
"""
:param x: b x d position vector in [0,1] for each dimension
:return: b x (L F) features to use as input to the network
"""
scaled_coords_bld = torch.einsum("bd,l->bld", coords, self.N_l)
if smooth:
# add half voxel size
scaled_coords_bld += 1.0 / (2.0 * einops.rearrange(self.N_l, "l -> 1 l 1"))
low_coords_bld = torch.floor(scaled_coords_bld).long()
# add a bit to make sure we round up
high_coords_bld = torch.ceil(scaled_coords_bld + (1.0 / (self.N_max + 1))).long()
vertices_blDd = self.get_hypercube_vertices(low_coords_bld, high_coords_bld)
b, l, D, d = vertices_blDd.shape
feat_indices_lN = einops.rearrange(utils.spatial_hash(vertices_blDd.view(b * l * D, d), self.T),
"(b l D) -> l (b D)", b=b, l=l, D=D)
l_indices = torch.arange(l, dtype=torch.long, device=self.device)
feats_blDf = einops.rearrange(self.features[l_indices[:, None], feat_indices_lN, :],
"l (b D) f -> b l D f", b=b, l=l, D=D)
interpolated_feats_blf = self.interpolate(scaled_coords_bld, feats_blDf, vertices_blDd, smooth=smooth)
return interpolated_feats_blf.flatten(start_dim=1)
def forward(self, x):
coords, eta = x[:, :self.d], x[:, self.d:]
interpolated_feats_bF = self.get_interpolated_features(coords, smooth=True)
final_feats_bF = torch.cat((interpolated_feats_bF, eta), dim=1)
return self.mlp(final_feats_bF)
def step(self, batch, batch_idx, phase):
raise NotImplementedError("Implement this for various methods!")
def training_step(self, train_batch, batch_idx):
return self.step(train_batch, batch_idx, "training")
def validation_step(self, val_batch, batch_idx):
return self.step(val_batch, batch_idx, "validation")
def configure_optimizers(self):
self.ema.to(self.device)
return torch.optim.AdamW(self.parameters(), betas=(0.9, 0.99), eps=10e-15, weight_decay=10e-6)
def on_before_zero_grad(self, *args, **kwargs):
self.ema.update(self.parameters())
class SDFNGPModel(NeuralGraphicsPrimitiveModel):
def __init__(self, pos_enc_freqs=6, coords_min=-1.0, coords_max=1.0):
mlp = utils.make_mlp(3 * 2 * pos_enc_freqs, 1, hidden_dim=128, hidden_layers=4)
super().__init__(mlp, dimension=3, feature_dim=2)
self.coords_min = coords_min
self.coords_max = coords_max
self.pos_enc_freqs = pos_enc_freqs
def forward(self, x):
# coords are between [coords_min, coords_max], scale to be between [0, 1]
x -= self.coords_min
x /= (self.coords_max - self.coords_min)
pos_enc_x = utils.pos_encoding(x, self.pos_enc_freqs, dim=1)
return super().forward(pos_enc_x)
def step(self, batch, batch_idx, phase):
x, d = batch
outputs = self(x)
loss = F.l1_loss(outputs.squeeze(), d.squeeze())
self.log(f"{phase}/loss", loss)
return loss
class GigapixelNGPModel(NeuralGraphicsPrimitiveModel):
def __init__(self, pos_enc_freqs=6, coords_min=0.0, coords_max=1.0):
mlp = utils.make_mlp(2 * 2 * pos_enc_freqs, 3, hidden_dim=64, hidden_layers=2,
output_nonlinearity=torch.nn.Sigmoid())
super().__init__(mlp, dimension=2, max_resolution=4096, max_entries_per_level=2**18, feature_dim=2)
self.coords_min = coords_min
self.coords_max = coords_max
self.pos_enc_freqs = pos_enc_freqs
def forward(self, x):
# coords are between [coords_min, coords_max], scale to be between [0, 1]
x -= self.coords_min
x /= (self.coords_max - self.coords_min)
pos_enc_x = utils.pos_encoding(x, self.pos_enc_freqs, dim=1)
return super().forward(pos_enc_x)
def step(self, batch, batch_idx, phase):
x, y = batch
outputs = self(x)
loss = F.mse_loss(outputs.squeeze(), y.squeeze())
self.log(f"{phase}/loss", loss)
self.log(f"{phase}/psnr", 20 * torch.log10(torch.Tensor([1.0]).to(self.device)) - 10 * torch.log10(loss))
return loss
```
|
{
"source": "jgibson/Xponents",
"score": 3
}
|
#### File: python/opensextant/CommonsUtils.py
```python
version = 'v3'
from cStringIO import StringIO
import os
import csv
import re
from chardet import detect as detect_charset
## ---------------------------------------
## TEXT UTILITIES
## ---------------------------------------
##
#
# Acceptable test: http://stackoverflow.com/questions/196345/how-to-check-if-a-string-in-python-is-in-ascii
def is_ascii(s):
if isinstance(s, unicode):
return all(ord(c) < 128 for c in s)
else:
try:
return all(ord(c) < 128 for c in s)
except:
pass
return False
## ISO-8859-2 is a common answer, when they really mean ISO-1
CHARDET_LATIN2_ENCODING = 'ISO-8859-1'
def guess_encoding(text):
''' Given bytes, determine the character set encoding
@return: dict with encoding and confidence
'''
if not text: return {'confidence':0, 'encoding':None}
enc = detect_charset(text)
cset = enc['encoding']
if cset.lower() == 'iso-8859-2':
## Anamoly -- chardet things Hungarian (iso-8850-2) is
## a close match for a latin-1 document. At least the quotes match
## Other Latin-xxx variants will likely match, but actually be Latin1
## or win-1252. see Chardet explanation for poor reliability of Latin-1 detection
##
enc['encoding'] = CHARDET_LATIN2_ENCODING
return enc
def bytes2unicode(buf, encoding=None):
if not encoding:
enc = guess_encoding(buf)
encoding = enc['encoding']
if not encoding:
return None
if encoding.lower() == 'utf-8':
return unicode(buf)
else:
text = buf.decode(encoding)
return unicode(text)
return None
reSqueezeWhiteSpace = re.compile(r'\s+', re.MULTILINE)
def squeeze_whitespace(s):
return reSqueezeWhiteSpace.sub(' ', s).strip()
def scrub_eol(t):
return t.replace('\n', ' ').replace('\r', '')
BOOL_F_STR = set(["false", "0", "n", "f", "no", "", "null"])
BOOL_T_STR = set(["true", "1", "y", "t", "yes" ])
def get_bool(token):
if not token:
return False
if isinstance(token, bool):
return token
t=token.lower()
if t in BOOL_F_STR:
return False
if t in BOOL_T_STR:
return True
return False
def get_number(token):
''' Turn leading part of a string into a number, if possible.
'''
num = StringIO()
for ch in token:
if ch.isdigit() or ch == '.' or ch == '-':
num.write(ch)
else:
break
val = num.getvalue()
num.close()
return val
def has_digit(text):
'''
Used primarily to report places and appears to be critical for
name filtering when doing phonetics.
'''
if text is None:
return False
for ch in text:
# ascii
if ch.isdigit():
return True
return False
def get_text_window(offset, matchlen, textsize, width):
''' prepreprepre MATCH postpostpost
^ ^ ^ ^
l-width l l+len l+len+width
left_y left_x right_x right_y
'''
left_x = offset - width
left_y = offset - 1
right_x = offset + matchlen
right_y = right_x + width
if left_x < 0:
left_x = 0
if left_y < left_x:
left_y = left_x
# bounds checking END....y? then y=END, results in shorter postmatch
if right_y >= textsize:
right_y = textsize - 1
# bounds checking y.... x? then x=y, results in empty postmatch
if right_x > right_y:
right_x = right_y
return [ left_x, left_y, right_x, right_y]
## ---------------------------------------
## FILE UTILITIES
## ---------------------------------------
##
def get_csv_writer(fh, columns, delim=','):
return csv.DictWriter(fh, columns, restval="", extrasaction='raise',
dialect='excel', lineterminator='\n',
delimiter=delim, quotechar='"',
quoting=csv.QUOTE_ALL, escapechar='\\')
def get_csv_reader(fh, columns, delim=','):
return csv.DictReader(fh, columns,
restval="", dialect='excel', lineterminator='\n', escapechar='\\',
delimiter=delim, quotechar='"', quoting=csv.QUOTE_ALL)
# |||||||||||||||||||||||||||||||||||||||||||||
# |||||||||||||||||||||||||||||||||||||||||||||
class ConfigUtility:
# |||||||||||||||||||||||||||||||||||||||||||||
# |||||||||||||||||||||||||||||||||||||||||||||
''' A utility to load parameter lists, CSV files, word lists, etc. from a folder *dir*
functions here take an Oxygen cfg parameter keyword or a file path.
If the keyword is valid and points to a valid file path, then the file path is used.
In otherwords, keywords are aliases for a file on disk.
Ex. 'mywords' = '.\cfg\mywords_v03_filtered.txt'
oxygen.cfg file would have this mapping. Your code just references 'mywords' to load it.
'''
def __init__(self, CFG, rootdir='.'):
# If config is None, then caller can still use loadDataFromFile(abspath, delim) for example.
#
self.config = CFG
self.rootdir = rootdir
def loadCSVFile(self, keyword, delim):
'''
Load a named CSV file. If the name is not a cfg parameter, the keyword name *is* the file.
'''
f = self.config.get(keyword)
if f is None:
f = keyword
path = os.path.join(self.rootdir, f)
return self.loadDataFromFile(path, delim)
def loadDataFromFile(self, path, delim):
'''
Load columnar data from a file.
Returns array of non-comment rows.
'''
if not os.path.exists(path):
raise Exception('File does not exist, FILE=%s' % path)
f = open(path, 'rb')
filereader = csv.reader(f, delimiter=delim, lineterminator='\n')
data = []
for row in filereader:
first_cell = row[0].strip()
if first_cell.startswith('#'):
continue
#if not delim and not first_cell:
# continue
data.append(row)
f.close()
return data
def loadFile(self, keyword):
'''
Load a named word list file.
If the name is not a cfg parameter, the keyword name *is* the file.
'''
filename = ''
if os.path.exists(keyword):
path = keyword
else:
filename = self.config.get(keyword)
if filename is None:
filename = keyword
path = os.path.join(self.rootdir, filename)
if not os.path.exists(path):
raise Exception('File does not exist, FILE=%s' % path)
return self.loadListFromFile(path)
def loadListFromFile(self, path):
'''
Load text data from a file.
Returns array of non-comment rows. One non-whitespace row per line.
'''
if not os.path.exists(path):
raise Exception('File does not exist, FILE=%s' % path)
file = open(path, 'r')
termlist = []
for line in file:
line = line.strip()
if line.startswith('#'):
continue
if len(line) == 0:
continue
termlist.append(line.lower())
file.close()
return termlist
```
#### File: python/opensextant/Data.py
```python
class Country:
''' Country metadata
'''
def __init__(self):
self.cc_iso2 = None
self.cc_iso3 = None
self.cc_fips = None
self.name = None
self.namenorm = None
self.aliases = []
self.is_territory = False
self.is_unique_name = False
self.timezones = []
self.languages = set([])
self.primary_language = None
self.lat = 0
self.lon = 0
def __str__(self):
return u'%s (%s)' %(self.name, self.cc_iso2)
def validate_lat(f):
return (f >= -90.0) and (f <= 90.0)
def validate_lon(f):
return (f >= -180.0) and (f <= 180.0)
class Coordinate:
''' Convenient class for Lat/Lon pair.
Expects a row dict with 'lat' and 'lon',
or kwd args 'lat', 'lon'
@param row default dictionary
'''
def __init__(self, row, lat=None, lon=None):
self.X = 0.0
self.Y = 0.0
self.mgrs = None
if row:
if (row.has_key('lat') and row.has_key('lon')):
lat = row['lat']
lon = row['lon']
if (lat and lon):
self.X = float(lon)
self.Y = float(lat)
else:
return None
def validate(self):
return validate_lat(self.Y) and validate_lon(self.X) and (self.X is not None and self.Y is not None)
def set(self, strLAT, strLON):
self.X = float(strLON)
self.Y = float(strLAT)
def __str__(self):
if self.Y:
return '%3.4f, %3.4f' % (self.Y, self.X)
else:
return 'unset'
class Place(Coordinate):
'''
Location or GeoBase
+ Coordinate
+ Place
+ Country
or
Location
+ Coordinate
+ Place
etc. Not sure of the best data model for inheritance.
This Python API hopes to simplify the concepts in the Java API.
'''
def __init__(self, pid, name, lat=None, lon=None):
Coordinate.__init__(self, None, lat=lat, lon=lon)
self.place_id = pid
self.name = name
self.is_ascii = False
self.is_upper = False
self.adm1_postalcode = None # Province Postal CODE?
self.place_postalcode = None # ZIP CODE?
self.name_type = None
self.country = None
self.country_code = None
self.feature_class = None
self.feature_code = None
self.adm1 = None
self.adm1_name = None
self.adm2 = None
self.adm2_name = None
self.source = None
self.name_bias = 0.0
self.id_bias = 0.0
self.precision= -1
self.method = None;
self.population = 0
self.hierachical_path= None
def has_coordinate(self):
if self.validate():
return (self.Y != 0 and self.X != 0)
return False
def get_location(self):
''' Returns (LAT, LON) tuple
@return: tuple, (lat,lon)
'''
return (self.Y, self.X)
def set_location(self, lat, lon):
self.set(lat, lon)
def __str__(self):
crd = 'Unset'
if self.Y:
crd = '%3.4f, %3.4f' % (self.Y, self.X)
meta = '%s (%s), %s' %( self.name, self.place_id, self.country_code)
return ', '.join([meta, crd])
```
#### File: python/opensextant/TaxCat.py
```python
import os
__API_PATH = os.path.realpath( __file__ )
SOLR_SERVER = "http://localhost:7000/solr/taxcat"
def _scrub_cdata_content(text):
''' User should scrub data themselves; but this gives ideas of what goes wrong when adding text to Solr
<,>,& all must be escaped.
'''
return text.replace('<', '(less than)').replace('>','(greater than)').replace('&', '& ')
def get_taxnode(t, val):
return t.lower() + "." + val.strip()
_FALSE_VAL = set(['f', 'false', '0', 'n', 'no'])
_TRUE_VAL = set(['t', 'true', '1', 'y', 'yes'])
def add_bool(dct, f, val, default=None):
if not val:
if default is not None:
dct[f] = default
return
if val.lower() in _FALSE_VAL:
dct[f] = 'false'
elif val.lower() in _TRUE_VAL:
dct[f] = 'true'
return
def add_text(dct, f, val):
''' add_text offers a basic idea of how to add values to dict
before sending to solr. TEXT strings may need scrubbing
but you just add non-TEXT values.
'''
if (isinstance(val, str) or isinstance(val, unicode)):
dct[f] = val
else:
dct[f] = val
def add_value(f, val, case=0):
''' add a value to a given field, f; And normalize case if non-zero.
case = CASE_LOWER | CASE_UPPER | 0(default) no change
'''
if val is None:
f.append(u'')
return
if (isinstance(val, str) or isinstance(val, unicode)):
v = val
#if "&" in val or "<" in val:
# print "SCRUB THIS:", val
# val.replace('&','+').replace('<', ' lt ')
if not case:
f.append(v)
elif case == CASE_LOWER:
f.append( v.lower() )
elif case == CASE_UPPER:
f.append( v.upper() )
else:
f.append(str(val))
return
CASE_LOWER=1
CASE_UPPER=2
'''
# Catalogs must be registered -- Solr has no concept of how to manage string-based record IDs
# that is something you must manage as you create your combined catalog,
#
# Catalog Registry maps your catalog ID to a starting offset for solr records
# If you think your reference data for catalog X will have 1 million entries, then
# start catalog X at 1,000,000 and let other smaller catalogs start at 0 or at less than 1 million
# start the next catalog at 3,000,000 to give X some breathing room.
#
'''
CATALOG_REGISTRY = {
"DEFAULT" : 0
}
class Taxon:
def __init__(self):
self.name = None
self.phrase = None
self.id = None
self.is_valid = True
# An array of additional tags.
self.tags = None
self.is_acronym = False
class TaxCatalogBuilder:
def __init__(self, server=None):
'''
@param server: solr server http URL; Not solrhome -- this is not SolrEmbedded.
@param stopwords: file of stopwords
'''
self.server = None
self.set_server(server)
self._record_count = 0l
self._byte_count = 0l
self._add_byte_count = 0l
self.commit_rate = -1
self._records = []
self.count = 0
from CommonsUtils import ConfigUtility
## Load file
self.utility = ConfigUtility(None)
self.stopwords = set( [] )
def add_stopwords( self, stopfile ):
if not os.path.exists(stopfile):
raise Exception("No stopwords found at " + stopfile)
print "Loading stopwords ", stopfile
_stopwords_list = self.utility.loadListFromFile(stopfile)
self.stopwords.add( _stopwords_list )
def get_starting_id(self, cat):
offset = CATALOG_REGISTRY.get(cat)
if not offset:
raise Exception("Catalog is not registered: " + cat)
return offset
def set_server(self, svr):
self.server_url = svr
if not self.server_url:
return
try:
from pysolr import Solr
self.server = Solr(self.server_url, timeout=600)
print "SERVER ", self.server_url, self.server
except Exception, err:
print "Problem with that server %s, ERR=%s" % (self.server_url, err)
def optimize(self):
if self.server:
self.server.optimize()
def save(self, flush=False):
if not self.server:
print "No server"
return
if not flush:
qty = len(self._records)
if self.commit_rate>0 and qty % self.commit_rate != 0:
return
if qty < self.commit_rate:
return
self.server.add(self._records)
self.server.commit()
self._records = []
return
def add(self, catalog, taxon):
'''
@param catalog ID of catalog where this taxon lives
@param taxon Taxon obj
'''
self.count = self.count + 1
rec = {'catalog':catalog, 'taxnode':taxon.name, 'phrase':taxon.phrase, 'id':taxon.id, 'valid': taxon.is_valid,
'name_type':'N' }
if taxon.tags:
rec['tag'] = taxon.tags
if taxon.is_acronym:
rec['name_type'] = 'A'
self._records.append( rec )
def add_wordlist(self, catalog, datafile, start_id, taxnode=None, minlen=1):
''' Given a simple one column word list file, each row of data is added
to catalog as a Taxon; taxnode may be used as a prefix for the words
Add a series of organized word lists to a single Catalog, but manage
each wordlist with some prefix taxon path.
add_wordlist('CAT', f1, 400, taxonode='first')
add_wordlist('CAT', f2, 500, taxonode='second')
add_wordlist('CAT', f3, 600, taxonode='third')
add_wordlist('CAT', f4, 700, taxonode='fourth')
'''
_name = os.path.basename(datafile)
if taxnode:
_name = taxnode
sheet = open(datafile,'rb')
words = set([])
for row in sheet:
_phrase = row.strip()
if not _phrase:
continue
if _phrase.startswith("#"):
# is a comment or commented out word.
continue
self.count += 1
_id = start_id + self.count
key = _phrase.lower()
if key in words:
print "Not adding ", key
continue
words.add(key)
t = Taxon()
t.id = _id
t.is_valid = len(key) >= minlen
t.name = _name
t.phrase = _phrase
# Allow case-sensitve entries. IFF input text contains UPPER
# case data, we'll mark it as acronym.
if t.phrase.isupper():
t.is_acronym = True
self.add(catalog, t)
print "COUNT: %d" %( self.count)
sheet.close()
```
|
{
"source": "jgidi/varqus",
"score": 3
}
|
#### File: varqus/varqus/analytic.py
```python
import numpy as np
import scipy.linalg as la
from varqus.utils import parse_gate
def R_k(theta_k : float, fs_k : list, gates_k : list):
"""
Calculate the unitary R_k.
Args:
theta_k: float. theta_k parameter in the paper.
fs_k: list. Contains the complex coefficients f_ki that appear in R_k.
gates_k: list. Contains the operators sigma_ki that appear in R_k.
Returns:
R_k: Gate.
"""
U = sum( f * parse_gate(g) for (f, g) in zip(fs_k, gates_k) )
return la.expm(theta_k * U)
def A_kqij(theta, fs, gates, state, k, q, i, j):
R_ki = np.copy(state.reshape(-1, 1))
R_qj = np.copy(state.reshape(-1, 1))
gate_ki = parse_gate(gates[k][i])
gate_qj = parse_gate(gates[q][j])
for l in range(len(theta)):
if l == k:
R_ki = gate_ki @ R_ki
elif l == q:
R_qj = gate_qj @ R_qj
U = R_k(theta[l], fs[l], gates[l])
R_ki = U @ R_ki
R_qj = U @ R_qj
coefs = 1j * np.conj(fs[k][i]) * fs[q][j]
return 2*np.real( coefs * np.vdot(R_ki, R_qj) )
def A_kq(theta, fs, gates, state, k, q):
s = 0.0
for i in range(len(fs[k])):
for j in range(len(fs[q])):
s += A_kqij(theta, fs, gates, state, k, q, i, j)
return s
def A(theta, fs, gates, state):
N = len(theta)
a = np.zeros((N, N))
for q in range(N):
for k in range(q+1): # Calculate only a half
a[k, q] = A_kq(theta, fs, gates, state, k, q)
return a - a.T # Complete the other half
def V_kij(theta, fs, hs, gates, h_gates, state, k, i, j):
R = np.copy(state.reshape(-1, 1))
R_ki = np.copy(state.reshape(-1, 1))
gate_ki = parse_gate(gates[k][i])
h_gate_j = parse_gate(h_gates[j])
for l in range(len(theta)):
if l == k:
R_ki = gate_ki @ R_ki
U = R_k(theta[l], fs[l], gates[l])
R = U @ R
R_ki = U @ R_ki
coefs = hs[j] * np.conjugate(fs[k][i])
return 2*np.real(coefs * np.vdot(R_ki, h_gate_j @ R))
def V_k(theta, fs, hs, gates, h_gates, state, k):
s = 0.0
for i in range(len(fs[k])):
for j in range(len(hs)):
s += V_kij(theta, fs, hs, gates, h_gates, state, k, i, j)
return s
def V(theta, fs, hs, gates, h_gates, state):
return np.array([ V_k(theta, fs, hs, gates, h_gates, state, k)
for k in range(len(theta)) ])
```
#### File: varqus/varqus/states.py
```python
import numpy as np
from varqus.analytic import R_k
def state_infidelity(state1, state2):
s1 = state1 / np.linalg.norm(state1)
s2 = state2 / np.linalg.norm(state2)
fidelity = np.abs( np.vdot(s1, s2) )**2
return 1 - fidelity
def state_from_parameters(params, ops, fs, initial_state):
state = np.copy(initial_state)
for l in range(len(params)):
state = R_k(params[l], fs[l], ops[l]) @ state
return state / np.linalg.norm(state)
```
|
{
"source": "jgieseler/solo-epd-loader",
"score": 2
}
|
#### File: solo-epd-loader/solo_epd_loader/__init__.py
```python
from pkg_resources import DistributionNotFound, get_distribution
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
pass # package is not installed
import datetime as dt
import glob
import itertools
import os
import re
import urllib.request
import warnings
from pathlib import Path
import cdflib
import numpy as np
import pandas as pd
from astropy.io.votable import parse_single_table
from sunpy.io.cdf import read_cdf
"""
Example code that loads low latency (ll) electron and proton (+alphas) fluxes
(and errors) for 'ept' 'north' telescope from Apr 15 2021 to Apr 16 2021 into
two Pandas dataframes (one for protons & alphas, one for electrons). In general
available are 'sun', 'asun', 'north', and 'south' viewing directions for 'ept'
and 'het' telescopes of SolO/EPD.
from epd_loader import *
df_protons, df_electrons, energies = \
_read_epd_cdf('ept', 'north', 'll', 20210415, 20210416,
path='/home/userxyz/solo/data/')
# plot protons and alphas
ax = df_protons.plot(logy=True, subplots=True, figsize=(20,60))
plt.show()
# plot electrons
ax = df_electrons.plot(logy=True, subplots=True, figsize=(20,60))
plt.show()
"""
"""
Example code that loads level 2 (l2) electron and proton (+alphas) fluxes
(and errors) for 'het' 'sun' telescope from Aug 20 2020 to Aug 20 2020 into
two Pandas dataframes (one for protons & alphas, one for electrons).
from epd_loader import *
df_protons, df_electrons, energies = \
_read_epd_cdf('het', 'sun', 'l2', 20200820, 20200821,
path='/home/userxyz/solo/data/')
# plot protons and alphas
ax = df_protons.plot(logy=True, subplots=True, figsize=(20,60))
plt.show()
# plot electrons
ax = df_electrons.plot(logy=True, subplots=True, figsize=(20,60))
plt.show()
"""
def _check_duplicates(filelist, verbose=True):
"""
Checks for duplicate file entries in filelist (that are only different by
version number). Returns filelist with duplicates removed.
"""
for _, g in itertools.groupby(filelist, lambda f: f.split('_')[:-1]):
dups = list(g)
if len(dups) > 1:
dups.sort()
if verbose:
print('')
print('WARNING: Following data files are duplicates with ' +
'different version numbers:')
for i in dups:
print(i)
if verbose:
print('')
print('Removing following files from list that will be read: ')
for n in range(len(dups)-1):
print(dups[n])
filelist.remove(dups[n])
if verbose:
print('You might want to delete these files in order to get ' +
'rid of this message.')
return filelist
def _get_filename_url(cd):
"""
Get download filename for a url from content-disposition
"""
if not cd:
return None
fname = re.findall('filename=(.+)', cd)
if len(fname) == 0:
return None
return fname[0][1:-1]
def _load_tqdm(verbose=True):
"""
Tries to load tqdm package for displaying download progress.
Return True or False, depending of success state.
If not available, returns False.
"""
try:
from tqdm import tqdm
class DownloadProgressBar(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def download_url(url, output_path):
with DownloadProgressBar(unit='B', unit_scale=True, miniters=1,
desc=output_path.split('/')[-1]) as t:
urllib.request.urlretrieve(url, filename=output_path,
reporthook=t.update_to)
tqdm_available = True
except ModuleNotFoundError:
if verbose:
print("Module tqdm not installed, won't show progress bar. To get rid of this: pip install tqdm")
tqdm_available = False
download_url = None
return tqdm_available, download_url
def _get_epd_filelist(sensor, level, startdate, enddate, path,
filenames_only=False):
"""
INPUT:
sensor: 'ept' or 'het'
level: 'll', 'l2'
startdate, enddate: YYYYMMDD
path: directory in which the data is located;
e.g. '/home/userxyz/uni/solo/data/l2/epd/ept/'
filenames_only: if True only give the filenames, not the full path
RETURNS:
Dictionary with four entries for 'sun', 'asun', 'north', 'south';
each containing a list of files matching selection criteria.
"""
if level == 'll':
l_str = 'LL02'
t_str = 'T??????-????????T??????'
if level == 'l2':
l_str = 'L2'
t_str = ''
filelist_sun = []
filelist_asun = []
filelist_north = []
filelist_south = []
for i in range(startdate, enddate+1):
filelist_sun = filelist_sun + \
glob.glob(path+'solo_'+l_str+'_epd-'+sensor+'-sun-rates_' +
str(i) + t_str + '_V*.cdf')
filelist_asun = filelist_asun + \
glob.glob(path+'solo_'+l_str+'_epd-'+sensor+'-asun-rates_' +
str(i) + t_str + '_V*.cdf')
filelist_north = filelist_north + \
glob.glob(path+'solo_'+l_str+'_epd-'+sensor+'-north-rates_' +
str(i) + t_str + '_V*.cdf')
filelist_south = filelist_south + \
glob.glob(path+'solo_'+l_str+'_epd-'+sensor+'-south-rates_' +
str(i) + t_str + '_V*.cdf')
if filenames_only:
filelist_sun = [os.path.basename(x) for x in filelist_sun]
filelist_asun = [os.path.basename(x) for x in filelist_asun]
filelist_north = [os.path.basename(x) for x in filelist_north]
filelist_south = [os.path.basename(x) for x in filelist_south]
filelist = {'sun': filelist_sun,
'asun': filelist_asun,
'north': filelist_north,
'south': filelist_south
}
return filelist
def _get_step_filelist(level, startdate, enddate, path,
filenames_only=False):
"""
INPUT:
level: 'll', 'l2'
startdate, enddate: YYYYMMDD
path: directory in which the data is located;
e.g. '/home/userxyz/uni/solo/data/l2/epd/step/'
filenames_only: if True only give the filenames, not the full path
RETURNS:
List of files matching selection criteria.
"""
sensor = 'step'
if level == 'll':
l_str = 'LL02'
t_str = 'T??????-????????T??????'
if level == 'l2':
l_str = 'L2'
t_str = ''
filelist = []
for i in range(startdate, enddate+1):
filelist = filelist + \
glob.glob(path+'solo_'+l_str+'_epd-'+sensor+'-rates_' +
str(i) + t_str + '_V*.cdf')
if filenames_only:
filelist = [os.path.basename(x) for x in filelist]
return filelist
def _epd_ll_download(date, path, sensor, viewing=None):
"""
Download EPD low latency data from http://soar.esac.esa.int/soar
One file/day per call.
Note: for sensor 'step' the 'viewing' parameter is necessary, but it
Example:
_epd_ll_download(20210415,
'/home/userxyz/solo/data/low_latency/epd/ept/',
'ept', 'north')
_epd_ll_download(20200820,
'/home/userxyz/solo/data/low_latency/epd/step/',
'step')
"""
# try loading tqdm for download progress display
tqdm_available, download_url = _load_tqdm(verbose=True)
# get list of available data files, obtain corresponding start & end time
fl = get_available_soar_files(date, date, sensor, 'll')
# try:
if sensor.lower() == 'step':
stime = 'T'+fl[0].split('T')[1].split('-')[0]
etime = 'T'+fl[0].split('T')[2].split('_')[0]
url = 'http://soar.esac.esa.int/soar-sl-tap/data?' + \
'retrieval_type=LAST_PRODUCT&data_item_id=solo_LL02_epd-' + \
sensor.lower()+'-rates_'+str(date) + \
stime+'-'+str(date+1)+etime+'&product_type=LOW_LATENCY'
else:
stime = 'T'+fl[0].split('T')[1].split('-')[0] # fl[0][-32:-25]
etime = 'T'+fl[0].split('T')[2].split('_')[0] # fl[0][-16:-9]
url = 'http://soar.esac.esa.int/soar-sl-tap/data?' + \
'retrieval_type=LAST_PRODUCT&data_item_id=solo_LL02_epd-' + \
sensor.lower()+'-'+viewing.lower()+'-rates_'+str(date) + \
stime+'-'+str(date+1)+etime+'&product_type=LOW_LATENCY'
# Get filename from url
file_name = _get_filename_url(
urllib.request.urlopen(url).headers['Content-Disposition'])
if tqdm_available:
download_url(url, path+file_name)
else:
urllib.request.urlretrieve(url, path+file_name)
return path+file_name
def _epd_l2_download(date, path, sensor, viewing=None):
"""
Download EPD level 2 data from http://soar.esac.esa.int/soar
One file/day per call.
Example:
_epd_l2_download(20200820,
'/home/userxyz/solo/data/l2/epd/ept/',
'ept', 'north')
_epd_l2_download(20200820,
'/home/userxyz/solo/data/l2/epd/step/',
'step')
"""
# try loading tqdm for download progress display
tqdm_available, download_url = _load_tqdm(verbose=True)
if sensor.lower() == 'step':
url = 'http://soar.esac.esa.int/soar-sl-tap/data?' + \
'retrieval_type=LAST_PRODUCT&data_item_id=solo_L2_epd-' + \
sensor.lower()+'-rates_'+str(date) + \
'&product_type=SCIENCE'
else:
url = 'http://soar.esac.esa.int/soar-sl-tap/data?' + \
'retrieval_type=LAST_PRODUCT&data_item_id=solo_L2_epd-' + \
sensor.lower()+'-'+viewing.lower()+'-rates_'+str(date) + \
'&product_type=SCIENCE'
# Get filename from url
file_name = _get_filename_url(
urllib.request.urlopen(url).headers['Content-Disposition'])
if tqdm_available:
download_url(url, path+file_name)
else:
urllib.request.urlretrieve(url, path+file_name)
return path+file_name
def get_available_soar_files(startdate, enddate, sensor, level='l2'):
"""
Get list of files available at SOAR
Check ESA's SOAR database for available Solar Orbiter/EPD files in date
range for give sensor and data level. Returns list of file names.
Parameters
----------
startdate : yyyymmdd (int)
Provides year (yyyy), month (mm) and day (dd) of the start date as one
combined integer; fill empty positions with zeros, e.g. '20210415'
enddate : yyyymmdd (int)
Provides year (yyyy), month (mm) and day (dd) of the end date as one
combined integer; fill empty positions with zeros, e.g. '20210415'
sensor : {'ept', 'het', 'step'}
Defines EPD sensor
level : {'l2', 'll'}, optional
Defines level of data product: level 2 ('l2') or low-latency ('ll');
by default 'l2'
Returns
-------
filelist : list of str
List of corresponding files available at SOAR.
"""
# add 1 day to enddate to better work with SOAR's API
# enddate = (pd.to_datetime(str(enddate))+
# pd.to_timedelta('1d')).strftime('%Y%m%d')
sy = str(startdate)[0:4]
sm = str(startdate)[4:6]
sd = str(startdate)[6:8]
ey = str(enddate)[0:4]
em = str(enddate)[4:6]
ed = str(enddate)[6:8]
if level.lower() == 'l2':
p_level = 'L2' # "processing_level"
# data_type = 'v_sc_data_item'
if level.lower() == 'll':
p_level = 'LL02' # "processing_level"
# data_type = 'v_ll_data_item'
data_type = 'v_public_files'
url = "http://soar.esac.esa.int/soar-sl-tap/tap/sync?REQUEST=doQuery&" + \
"LANG=ADQL&retrieval_type=LAST_PRODUCT&FORMAT=votable_plain&" + \
"QUERY=SELECT+*+FROM+"+data_type + \
"+WHERE+(instrument='EPD')+AND+((begin_time%3E%3D'"+sy+"-"+sm + \
"-"+sd+"+00:00:00')+AND+(begin_time%3C%3D'"+ey+"-"+em+"-"+ed + \
"+01:00:00'))"
filelist = urllib.request.urlretrieve(url)
# open VO table, convert to astropy table, convert to pandas dataframe
df = parse_single_table(filelist[0]).to_table().to_pandas()
# convert bytestrings to unicode, from stackoverflow.com/a/67051068/2336056
for col, dtype in df.dtypes.items():
if dtype == np.object: # Only process object columns.
# decode, or return original value if decode return Nan
df[col] = df[col].str.decode('utf-8').fillna(df[col])
# remove duplicates with older version number
df = df.sort_values('file_name')
df.drop_duplicates(subset=['item_id'], keep='last', inplace=True)
# only use data level wanted; i.e., 'LL' or 'L2'
df = df[df['processing_level'] == p_level]
# list filenames for given telescope (e.g., 'HET')
# filelist = df['filename'][df['sensor'] == sensor.upper()].sort_values()
filelist = [s for s in df['file_name'].values if sensor.lower() in s]
# list filenames for 'rates' type (i.e., remove 'hcad')
filelist = [s for s in filelist if "rates" in s]
# filelist.sort()
if len(filelist) == 0:
print('No corresponding data found at SOAR!')
return filelist
def _autodownload_cdf(startdate, enddate, sensor, level, path):
"""
Uses get_available_soar_files() to check which files for selection criteria
are available online. Compares with locally available files at 'path', and
downloads missing files to 'path' using epd_l*_download()
"""
fls = get_available_soar_files(startdate, enddate, sensor, level)
for i in fls:
my_file = Path(path)/i
if not my_file.is_file():
if os.path.exists(path) is False:
print(f'Creating dir {path}')
os.makedirs(path)
tdate = int(i.split('_')[3].split('T')[0])
tview = i.split('-')[2]
if level.lower() == 'll':
_ = _epd_ll_download(date=tdate, path=path, sensor=sensor,
viewing=tview)
if level.lower() == 'l2':
_ = _epd_l2_download(date=tdate, path=path, sensor=sensor,
viewing=tview)
return
def epd_load(sensor, level, startdate, enddate=None, viewing=None, path=None,
autodownload=False):
"""
Load SolO/EPD data
Load-in data for Solar Orbiter/EPD energetic charged particle sensors EPT,
HET, and STEP. Supports level 2 and low latency data provided by ESA's
Solar Orbiter Archive. Optionally downloads missing data directly. Returns
data as Pandas dataframe.
Parameters
----------
sensor : {'ept', 'het', 'step'}
Defines EPD sensor
level : {'l2', 'll'}
Defines level of data product: level 2 ('l2') or low-latency ('ll')
startdate : (datetime or int)
Provides start date. Either a datetime object (e.g., dt.date(2021,12,31)
or dt.datetime(2021,4,15)). Or a combined integer yyyymmdd with year
(yyyy), month (mm) and day (dd) with empty positions filled with zeros,
e.g. 20210415
enddate : (datetime or int), optional
Provides end date. Either a datetime object (e.g., dt.date(2021,12,31)
or dt.datetime(2021,4,15)). Or a combined integer yyyymmdd with year
(yyyy), month (mm) and day (dd) with empty positions filled with zeros,
e.g. 20210415
(if no enddate is given, 'enddate = startdate' will be set)
viewing : {'sun', 'asun', 'north', 'south' or None}, optional
Viewing direction of sensor. Needed for 'ept' or 'het'; for 'step'
shoule be None. By default None
path : str, optional
User-specified directory in which Solar Orbiter data is/should be
organized; e.g. '/home/userxyz/solo/data/', by default None
autodownload : bool, optional
If True, will try to download missing data files from SOAR, by default
False.
Returns
-------
For EPT & HET:
1. Pandas dataframe with proton fluxes and errors (for EPT also alpha particles) in 'particles / (s cm^2 sr MeV)'
2. Pandas dataframe with electron fluxes and errors in 'particles / (s cm^2 sr MeV)'
3. Dictionary with energy information for all particles:
- String with energy channel info
- Value of lower energy bin edge in MeV
- Value of energy bin width in MeV
For STEP:
1. Pandas dataframe with fluxes and errors in 'particles / (s cm^2 sr MeV)'
2. Dictionary with energy information for all particles:
- String with energy channel info
- Value of lower energy bin edge in MeV
- Value of energy bin width in MeV
Raises
------
Exception
Sensors 'ept' or 'het' need a provided 'viewing' direction. If None is
given, Exception is raised.
Examples
--------
Load EPD/HET sun viewing direction low-latency data for Aug 20 to Aug 22,
2020 from user-defined directory, downloading missing files from SOAR:
>>> df_protons, df_electrons, energies = epd_load('het', 'll', 20200820,
... 20200822, 'sun', None, True)
Load EPD/STEP level 2 data for Aug 20 to Aug 22, 2020 from user-defined
directory, downloading missing files from SOAR:
>>> df, energies = epd_load(sensor='step', level='l2', startdate=20200820,
... enddate=20200822, autodownload=True)
"""
# refuse string as date input:
for d in [startdate, enddate]:
if isinstance(d, str):
raise SystemExit("startdate & enddate must be datetime objects or YYYYMMDD integer!")
# accept datetime object as date input by converting it to internal integer:
if isinstance(startdate, dt.datetime) or isinstance(startdate, dt.date):
startdate = int(startdate.strftime("%Y%m%d"))
if isinstance(enddate, dt.datetime) or isinstance(enddate, dt.date):
enddate = int(enddate.strftime("%Y%m%d"))
# check integer date input for length:
for d in [startdate, enddate]:
if isinstance(d, int):
if len(str(d)) != 8:
raise SystemExit(f"startdate & enddate must be (datetime objects or) integers of the form YYYYMMDD, not {d}!")
if sensor.lower() == 'step':
datadf, energies_dict = \
_read_step_cdf(level, startdate, enddate, path, autodownload)
return datadf, energies_dict
if sensor.lower() == 'ept' or sensor.lower() == 'het':
if viewing is None:
raise Exception("EPT and HET need a telescope 'viewing' " +
"direction! No data read!")
df_epd_p = []
df_epd_e = []
energies_dict = []
else:
df_epd_p, df_epd_e, energies_dict = \
_read_epd_cdf(sensor, viewing, level, startdate, enddate, path,
autodownload)
return df_epd_p, df_epd_e, energies_dict
def _read_epd_cdf(sensor, viewing, level, startdate, enddate=None, path=None,
autodownload=False):
"""
INPUT:
sensor: 'ept' or 'het' (string)
viewing: 'sun', 'asun', 'north', or 'south' (string)
level: 'll' or 'l2' (string)
startdate,
enddate: YYYYMMDD, e.g., 20210415 (integer)
(if no enddate is given, 'enddate = startdate' will be set)
path: directory in which Solar Orbiter data is/should be organized;
e.g. '/home/userxyz/uni/solo/data/' (string)
autodownload: if True will try to download missing data files from SOAR
RETURNS:
1. Pandas dataframe with proton fluxes and errors (for EPT also alpha
particles) in 'particles / (s cm^2 sr MeV)'
2. Pandas dataframe with electron fluxes and errors in
'particles / (s cm^2 sr MeV)'
3. Dictionary with energy information for all particles:
- String with energy channel info
- Value of lower energy bin edge in MeV
- Value of energy bin width in MeV
"""
# if no path to data directory is given, use the current directory
if path is None:
path = os.getcwd()
# select sub-directory for corresponding sensor (EPT, HET)
if level.lower() == 'll':
path = Path(path)/'low_latency'/'epd'/sensor.lower()
if level.lower() == 'l2':
path = Path(path)/'l2'/'epd'/sensor.lower()
# add a OS-specific '/' to end end of 'path'
path = f'{path}{os.sep}'
# if no 'enddate' is given, get data only for single day of 'startdate'
if enddate is None:
enddate = startdate
# if autodownload, check online available files and download if not locally
if autodownload:
_autodownload_cdf(startdate, enddate, sensor.lower(), level.lower(),
path)
# get list of local files for date range
filelist = _get_epd_filelist(sensor.lower(), level.lower(), startdate,
enddate, path=path)[viewing.lower()]
# check for duplicate files with different version numbers and remove them
filelist = _check_duplicates(filelist, verbose=True)
if len(filelist) == 0:
raise Exception('WARNING: No corresponding data files found! ' +
'Try different settings, path or autodownload.')
df_epd_p = []
df_epd_e = []
energies_dict = []
else:
""" <-- get column names of dataframe """
if sensor.lower() == 'ept':
if level.lower() == 'll':
protons = 'Prot'
electrons = 'Ele'
e_epoch = 0 # 'EPOCH'
if level.lower() == 'l2':
protons = 'Ion'
electrons = 'Electron'
e_epoch = 1 # 'EPOCH_1'
if sensor.lower() == 'het':
if level.lower() == 'll':
protons = 'H'
electrons = 'Ele'
e_epoch = 0 # 'EPOCH'
if level.lower() == 'l2':
protons = 'H' # EPOCH
electrons = 'Electron' # EPOCH_4, QUALITY_FLAG_4
e_epoch = 4 # 'EPOCH_4'
# load cdf files using read_cdf from sunpy (uses cdflib)
data = read_cdf(filelist[0])
df_p = data[0].to_dataframe()
df_e = data[e_epoch].to_dataframe()
if len(filelist) > 1:
for f in filelist[1:]:
data = read_cdf(f)
t_df_p = data[0].to_dataframe()
t_df_e = data[e_epoch].to_dataframe()
df_p = pd.concat([df_p, t_df_p])
df_e = pd.concat([df_e, t_df_e])
# directly open first cdf file with cdflib to access metadata used in the following
t_cdf_file = cdflib.CDF(filelist[0])
# p intensities:
flux_p_channels = \
[protons+f'_Flux_{i}' for i in
range(t_cdf_file.varinq(protons+'_Flux')['Dim_Sizes'][0])]
# p errors:
if level.lower() == 'll':
flux_sigma_p_channels = \
[protons+f'_Flux_Sigma_{i}' for i in
range(t_cdf_file.varinq(protons+'_Flux')['Dim_Sizes'][0])]
if level.lower() == 'l2':
flux_sigma_p_channels = \
[protons+f'_Uncertainty_{i}' for i in
range(t_cdf_file.varinq(protons+'_Flux')['Dim_Sizes'][0])]
# p rates:
rate_p_channels = \
[protons+f'_Rate_{i}' for i in
range(t_cdf_file.varinq(protons+'_Rate')['Dim_Sizes'][0])]
if sensor.lower() == 'ept':
# alpha intensities:
flux_a_channels = \
[f'Alpha_Flux_{i}' for i in
range(t_cdf_file.varinq("Alpha_Flux")['Dim_Sizes'][0])]
# alpha errors:
if level.lower() == 'll':
flux_sigma_a_channels = \
[f'Alpha_Flux_Sigma_{i}' for i in
range(t_cdf_file.varinq("Alpha_Flux")['Dim_Sizes'][0])]
if level.lower() == 'l2':
flux_sigma_a_channels = \
[f'Alpha_Uncertainty_{i}' for i in
range(t_cdf_file.varinq("Alpha_Flux")['Dim_Sizes'][0])]
# alpha rates:
rate_a_channels = \
[f'Alpha_Rate_{i}' for i in
range(t_cdf_file.varinq("Alpha_Rate")['Dim_Sizes'][0])]
# e intensities:
flux_e_channels = \
[electrons+f'_Flux_{i}' for i in
range(t_cdf_file.varinq(electrons+'_Flux')['Dim_Sizes'][0])]
# e errors:
if level.lower() == 'll':
flux_sigma_e_channels = \
[f'Ele_Flux_Sigma_{i}' for i in
range(t_cdf_file.varinq(electrons+'_Flux')['Dim_Sizes'][0])]
if level.lower() == 'l2':
flux_sigma_e_channels = \
[f'Electron_Uncertainty_{i}' for i in
range(t_cdf_file.varinq(electrons+'_Flux')['Dim_Sizes'][0])]
# e rates:
rate_e_channels = \
[electrons+f'_Rate_{i}' for i in
range(t_cdf_file.varinq(electrons+'_Rate')['Dim_Sizes'][0])]
if level.lower() == 'l2':
if sensor.lower() == 'het':
df_epd_p = pd.concat(
[df_p[flux_p_channels], df_p[flux_sigma_p_channels],
df_p[rate_p_channels], df_p['DELTA_EPOCH'],
df_p['QUALITY_FLAG'], df_p['QUALITY_BITMASK']],
axis=1,
keys=['H_Flux', 'H_Uncertainty', 'H_Rate',
'DELTA_EPOCH', 'QUALITY_FLAG', 'QUALITY_BITMASK'])
df_epd_e = pd.concat([df_e[flux_e_channels],
df_e[flux_sigma_e_channels],
df_e[rate_e_channels],
df_e['DELTA_EPOCH_4'],
df_e['QUALITY_FLAG_4'],
df_e['QUALITY_BITMASK_4']], axis=1,
keys=['Electron_Flux',
'Electron_Uncertainty',
'Electron_Rate',
'DELTA_EPOCH_4',
'QUALITY_FLAG_4',
'QUALITY_BITMASK_4'])
if sensor.lower() == 'ept':
df_epd_p = pd.concat(
[df_p[flux_p_channels], df_p[flux_sigma_p_channels],
df_p[rate_p_channels], df_p[flux_a_channels],
df_p[flux_sigma_a_channels], df_p[rate_a_channels],
df_p['DELTA_EPOCH'], df_p['QUALITY_FLAG'],
df_p['QUALITY_BITMASK']],
axis=1,
keys=['Ion_Flux', 'Ion_Uncertainty', 'Ion_Rate',
'Alpha_Flux', 'Alpha_Uncertainty', 'Alpha_Rate',
'DELTA_EPOCH', 'QUALITY_FLAG', 'QUALITY_BITMASK'])
df_epd_e = pd.concat([df_e[flux_e_channels],
df_e[flux_sigma_e_channels],
df_e[rate_e_channels],
df_e['DELTA_EPOCH_1'],
df_e['QUALITY_FLAG_1'],
df_e['QUALITY_BITMASK_1']], axis=1,
keys=['Electron_Flux',
'Electron_Uncertainty',
'Electron_Rate',
'DELTA_EPOCH_1',
'QUALITY_FLAG_1',
'QUALITY_BITMASK_1'])
if level.lower() == 'll':
if sensor.lower() == 'het':
df_epd_p = pd.concat(
[df_p[flux_p_channels], df_p[flux_sigma_p_channels]],
axis=1, keys=['H_Flux', 'H_Uncertainty', 'QUALITY_FLAG'])
if sensor.lower() == 'ept':
df_epd_p = pd.concat(
[df_p[flux_p_channels], df_p[flux_sigma_p_channels],
df_p[flux_a_channels], df_p[flux_sigma_a_channels],
df_p['QUALITY_FLAG']],
axis=1, keys=['Ion_Flux', 'Ion_Uncertainty',
'Alpha_Flux', 'Alpha_Uncertainty',
'QUALITY_FLAG'])
df_epd_e = pd.concat([df_e[flux_e_channels],
df_e[flux_sigma_e_channels],
df_e['QUALITY_FLAG']], axis=1,
keys=['Electron_Flux',
'Electron_Uncertainty',
'QUALITY_FLAG'])
# manual replace FILLVALUES in dataframes with np.nan
# t_cdf_file.varattsget("Ion_Flux")["FILLVAL"][0] = -1e+31
# same for l2 & ll and het & ept and e, p/ion, alpha
# remove this (i.e. following two lines) when sunpy's read_cdf is updated,
# and FILLVAL will be replaced directly, see
# https://github.com/sunpy/sunpy/issues/5908
df_epd_p = df_epd_p.replace(-1e+31, np.nan)
df_epd_e = df_epd_e.replace(-1e+31, np.nan)
energies_dict = {protons+"_Bins_Text":
t_cdf_file.varget(protons+'_Bins_Text'),
protons+"_Bins_Low_Energy":
t_cdf_file.varget(protons+'_Bins_Low_Energy'),
protons+"_Bins_Width":
t_cdf_file.varget(protons+'_Bins_Width'),
electrons+"_Bins_Text":
t_cdf_file.varget(electrons+'_Bins_Text'),
electrons+"_Bins_Low_Energy":
t_cdf_file.varget(electrons+'_Bins_Low_Energy'),
electrons+"_Bins_Width":
t_cdf_file.varget(electrons+'_Bins_Width')
}
if sensor.lower() == 'ept':
energies_dict["Alpha_Bins_Text"] = \
t_cdf_file.varget('Alpha_Bins_Text')
energies_dict["Alpha_Bins_Low_Energy"] = \
t_cdf_file.varget('Alpha_Bins_Low_Energy')
energies_dict["Alpha_Bins_Width"] = \
t_cdf_file.varget('Alpha_Bins_Width')
# name index column (instead of e.g. 'EPOCH' or 'EPOCH_1')
df_epd_p.index.names = ['Time']
df_epd_e.index.names = ['Time']
'''
Careful if adding more species - they might have different EPOCH
dependencies and cannot easily be put in the same dataframe!
'''
return df_epd_p, df_epd_e, energies_dict
def _read_step_cdf(level, startdate, enddate=None, path=None,
autodownload=False):
"""
INPUT:
level: 'll' or 'l2' (string)
startdate,
enddate: YYYYMMDD, e.g., 20210415 (integer)
(if no enddate is given, 'enddate = startdate' will be set)
path: directory in which Solar Orbiter data is/should be organized;
e.g. '/home/userxyz/uni/solo/data/' (string)
autodownload: if True will try to download missing data files from SOAR
RETURNS:
1. Pandas dataframe with fluxes and errors in
'particles / (s cm^2 sr MeV)'
2. Dictionary with energy information for all particles:
- String with energy channel info
- Value of lower energy bin edge in MeV
- Value of energy bin width in MeV
"""
sensor = 'step'
# if no path to data directory is given, use the current directory
if path is None:
path = os.getcwd()
# select sub-directory for corresponding sensor (in this case just 'step')
if level.lower() == 'll':
path = Path(path)/'low_latency'/'epd'/sensor.lower()
if level.lower() == 'l2':
path = Path(path)/'l2'/'epd'/sensor.lower()
# add a OS-specific '/' to end end of 'path'
path = f'{path}{os.sep}'
# if no 'enddate' is given, get data only for single day of 'startdate'
if enddate is None:
enddate = startdate
# if True, check online available files and download if not locally present
if autodownload:
_autodownload_cdf(startdate, enddate, sensor.lower(), level.lower(),
path)
# get list of local files for date range
filelist = _get_step_filelist(level.lower(), startdate, enddate, path=path)
# check for duplicate files with different version numbers and remove them
filelist = _check_duplicates(filelist, verbose=True)
if len(filelist) == 0:
raise Exception('WARNING: No corresponding data files found! ' +
'Try different settings, path or autodownload.')
datadf = []
energies_dict = []
else:
all_cdf = []
for file in filelist:
all_cdf.append(cdflib.CDF(file))
if level == 'l2':
param_list = ['Integral_Flux', 'Magnet_Flux', 'Integral_Rate',
'Magnet_Rate', 'Magnet_Uncertainty',
'Integral_Uncertainty']
# set up the dictionary:
energies_dict = \
{"Bins_Text": all_cdf[0].varget('Bins_Text'),
"Bins_Low_Energy": all_cdf[0].varget('Bins_Low_Energy'),
"Bins_Width": all_cdf[0].varget('Bins_Width'),
"Sector_Bins_Text": all_cdf[0].varget('Sector_Bins_Text'),
"Sector_Bins_Low_Energy": all_cdf[0].varget('Sector_Bins_Low_Energy'),
"Sector_Bins_Width": all_cdf[0].varget('Sector_Bins_Width')
}
if level == 'll':
param_list = ['Integral_Flux', 'Ion_Flux', 'Integral_Flux_Sigma',
'Ion_Flux_Sigma']
# set up the dictionary:
energies_dict = \
{"Integral_Bins_Text": all_cdf[0].varget('Integral_Bins_Text'),
"Integral_Bins_Low_Energy": all_cdf[0].varget('Integral_Bins_Low_Energy'),
"Integral_Bins_Width": all_cdf[0].varget('Integral_Bins_Width'),
"Ion_Bins_Text": all_cdf[0].varget('Ion_Bins_Text'),
"Ion_Bins_Low_Energy": all_cdf[0].varget('Ion_Bins_Low_Energy'),
"Ion_Bins_Width": all_cdf[0].varget('Ion_Bins_Width')
}
df_list = []
for cdffile in all_cdf:
col_list = []
for key in param_list:
try:
t_df = pd.DataFrame(cdffile[key], index=cdffile['EPOCH'])
# Replace FILLVAL dynamically for each element of param_list
fillval = cdffile.varattsget(key)["FILLVAL"]
t_df = t_df.replace(fillval, np.nan)
col_list.append(t_df)
except TypeError:
print(' ')
print("WARNING: Gap in dataframe due to missing cdf file.")
break
try:
temp_df = pd.concat(col_list, axis=1, keys=param_list)
df_list.append(temp_df)
except ValueError:
continue
datadf = pd.concat(df_list)
# transform the index of the dataframe into pd_datetime
# notice the transform alldata.index -> np.int_ so that encode()
# understands the format
datetimes = cdflib.cdfepoch.encode(np.int_(datadf.index))
datadf.index = pd.to_datetime(datetimes)
datadf.index.names = ['Time']
'''
Careful if adding more species - they might have different EPOCH
dependencies and cannot easily be put in the same dataframe!
'''
return datadf, energies_dict
```
|
{
"source": "jgieseler/solo-mag-loader",
"score": 2
}
|
#### File: solo-mag-loader/solo_mag_loader/__init__.py
```python
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
pass # package is not installed
import datetime as dt
import os
import sunpy
from sunpy.net import Fido
from sunpy.net import attrs as a
from sunpy.timeseries import TimeSeries
def _date2str(date):
year = str(date)[0:4]
month = str(date)[4:6]
day = str(date)[6:8]
return year+'/'+month+'/'+day
def mag_load(startdate, enddate, level='l2', type='normal', frame='rtn', path=None):
"""
Load SolO/MAG data
Load-in data for Solar Orbiter/MAG sensor. Supports level 2 and low latency
data provided by CDAWeb. Optionally downloads missing
data directly. Returns data as Pandas dataframe.
Parameters
----------
startdate, enddate : {datetime, str, or int}
Datetime object (e.g., dt.date(2021,12,31) or dt.datetime(2021,4,15)),
"standard" datetime string (e.g., "2021/04/15") or integer of the form
yyyymmdd with empty positions filled with zeros, e.g. '20210415'
(enddate must always be later than startdate)
level : {'l2', 'll'}, optional
Defines level of data product: level 2 ('l2') or low-latency ('ll').
By default 'l2'.
type : {'normal', 'normal-1-minute', or 'burst'}, optional
By default 'normal'.
frame : {'rtn', 'srf', or 'vso'}, optional
Coordinate frame of MAG data. By default 'rtn'.
path : {str}, optional
Local path for storing downloaded data, by default None
Returns
-------
Pandas dataframe with fluxes and errors in 'particles / (s cm^2 sr MeV)'
"""
if type == 'normal-1-minute' and frame == 'srf':
raise Exception("For SRF frame only 'normal' or 'burst' data type available!")
if type == 'normal-1-min':
type = 'normal-1-minute'
if level == 'll' or level == 'LL':
level = 'll02'
data_id = 'SOLO_'+level.upper()+'_MAG'
else:
data_id = 'SOLO_'+level.upper()+'_MAG-'+frame.upper()+'-'+type.upper()
if isinstance(startdate, int):
startdate = _date2str(startdate)
if isinstance(enddate, int):
enddate = _date2str(enddate)
trange = a.Time(startdate, enddate)
dataset = a.cdaweb.Dataset(data_id)
result = Fido.search(trange, dataset)
filelist = [i[0].split('/')[-1] for i in result.show('URL')[0]]
filelist.sort()
if path is None:
filelist = [sunpy.config.get('downloads', 'download_dir') + os.sep + file for file in filelist]
elif type(path) is str:
filelist = [path + os.sep + f for f in filelist]
for i, f in enumerate(filelist):
if os.path.exists(f) and os.path.getsize(f) == 0:
os.remove(f)
if not os.path.exists(f):
downloaded_file = Fido.fetch(result[0][i], path=path)
# files = Fido.fetch(result, path=path)
solo_mag = TimeSeries(filelist, concatenate=True)
df_solo_mag = solo_mag.to_dataframe()
return df_solo_mag
# VSO, LL02 not working
```
|
{
"source": "jgieseler/sunpy",
"score": 3
}
|
#### File: map/tests/strategies.py
```python
import hypothesis.strategies as st
import numpy as np
from hypothesis import assume
from hypothesis.extra.numpy import arrays
@st.composite
def matrix_meta(draw, key):
"""
Create an arbitrary but valid (ie non-singular) PCi_j or CDi_j matrix.
Parameters
----------
key : {'pc', 'cd'}
"""
arr = draw(arrays(
float, (2, 2),
elements=st.floats(min_value=-1, max_value=1, allow_nan=False))
)
# Make sure matrix isn't singular
assume(np.abs(np.linalg.det(arr)) > 1e-8)
return {f'{key}1_1': arr[0, 0],
f'{key}1_2': arr[0, 1],
f'{key}2_1': arr[1, 0],
f'{key}2_2': arr[1, 1]}
```
|
{
"source": "jgi-kbase/CachingService",
"score": 3
}
|
#### File: test/caching_service/test_api_v1.py
```python
import unittest
import requests
from uuid import uuid4
import functools
import src.caching_service.minio as minio
from src.caching_service.exceptions import MissingCache
url = 'http://web:5000/v1'
@functools.lru_cache()
def get_cache_id(cache_params=None):
if not cache_params:
cache_params = '{"xyz":123}'
resp = requests.post(
url + '/cache_id',
headers={'Authorization': 'non_admin_token', 'Content-Type': 'application/json'},
data=cache_params
)
print('-' * 80)
print('Response: ' + resp.text)
json = resp.json()
cache_id = json['cache_id']
return cache_id
@functools.lru_cache()
def upload_cache(cache_params=None, content=None):
"""Upload a cache file for repeatedly testing downloads/deletes/etc."""
cache_id = get_cache_id(cache_params)
if not content:
content = b'{"hallo": "welt"}'
requests.post(
url + '/cache/' + cache_id,
headers={'Authorization': 'non_admin_token'},
files={'file': ('test.json', content)}
)
return (cache_id, content)
class TestApiV1(unittest.TestCase):
def test_root(self):
"""Test get paths."""
resp = requests.get(url)
json = resp.json()
# Don't particularly feel the need to test the content of this
self.assertTrue(json['routes'])
def test_missing_auth(self):
"""Test the error response for all endpoints that require the Authentication header."""
endpoints = [
{'method': 'POST', 'url': url + '/cache_id'},
{'method': 'GET', 'url': url + '/cache/example'},
{'method': 'POST', 'url': url + '/cache/example'},
{'method': 'DELETE', 'url': url + '/cache/example'}
]
for req_data in endpoints:
resp = requests.post(req_data['url'], headers={})
json = resp.json()
self.assertEqual(resp.status_code, 400, 'Status code is 400')
self.assertEqual(json['status'], 'error', 'Status is set to "error"')
self.assertTrue('Missing header' in json['error'], 'Error message is set')
def test_invalid_auth(self):
"""Test the error response for all endpoints that require valid auth."""
endpoints = [
{'method': 'POST', 'url': url + '/cache_id'},
{'method': 'GET', 'url': url + '/cache/example'},
{'method': 'POST', 'url': url + '/cache/example'},
{'method': 'DELETE', 'url': url + '/cache/example'}
]
for req_data in endpoints:
resp = requests.post(req_data['url'], headers={'Authorization': 'invalid_token'})
json = resp.json()
self.assertEqual(resp.status_code, 403, 'Status code is 403')
self.assertEqual(json['status'], 'error', 'Status is set to "error"')
self.assertTrue('Invalid token' in json['error'], 'Error message is set')
def test_make_cache_id_valid(self):
"""
Test a valid call to create a new cache ID.
POST /cache_id
"""
resp = requests.post(
url + '/cache_id',
headers={'Authorization': 'non_admin_token', 'Content-Type': 'application/json'},
data='{"xyz": 123}'
)
json = resp.json()
self.assertEqual(resp.status_code, 200)
self.assertEqual(json['status'], 'ok', 'Status is "generated"')
self.assertEqual(len(json['cache_id']), 128, 'Creates 128-byte cache ID')
def test_make_cache_id_malformed_json(self):
"""
Test a call to make a cache ID with invalid JSON formatting.
POST /cache_id
"""
resp = requests.post(
url + '/cache_id',
headers={'Authorization': 'non_admin_token', 'Content-Type': 'application/json'},
data='{{{{(((('
)
json = resp.json()
self.assertEqual(resp.status_code, 400)
self.assertEqual(json['status'], 'error', 'Status is "error"')
self.assertTrue('JSON parsing error' in json['error'], 'Error message is set')
def test_make_cache_id_unauthorized(self):
"""
Test a call to create a new cache ID with an invalid auth token.
POST /cache_id
"""
resp = requests.post(
url + '/cache_id',
headers={'Authorization': 'invalid_token', 'Content-Type': 'application/json'},
data='{"xyz": 123}'
)
json = resp.json()
self.assertEqual(resp.status_code, 403, 'Status code is 403')
self.assertEqual(json['status'], 'error', 'Status is set to "error"')
self.assertTrue('Invalid token' in json['error'], 'Gives error message')
def test_make_cache_id_wrong_content_type(self):
"""
Test a call to create a new cache ID with the wrong content-type.
POST /cache_id
"""
resp = requests.post(
url + '/cache_id',
headers={'Authorization': 'non_admin_token', 'Content-Type': 'multipart/form-data'},
data='{"xyz": 123}'
)
json = resp.json()
self.assertEqual(resp.status_code, 400, 'Status code is 400')
self.assertEqual(json['status'], 'error', 'Status is set to "error"')
self.assertTrue('Invalid Content-Type' in json['error'])
def test_make_cache_id_missing_content_type(self):
"""
Test a call to create a new cache ID with missing content-type.
POST /cache_id
"""
resp = requests.post(
url + '/cache_id',
headers={'Authorization': 'non_admin_token'},
data='{"xyz": 123}'
)
json = resp.json()
self.assertEqual(resp.status_code, 400, 'Status code is 400')
self.assertEqual(json['status'], 'error', 'Status is set to "error"')
self.assertTrue('Invalid Content-Type' in json['error'])
def test_make_cache_id_missing_json(self):
"""
Test a call to create a new cache ID with missing authorization
POST /cache_id
"""
resp = requests.post(
url + '/cache_id',
headers={'Authorization': 'non_admin_token', 'Content-Type': 'application/json'}
)
json = resp.json()
self.assertEqual(resp.status_code, 400, 'Status code is 400')
self.assertEqual(json['status'], 'error', 'Status is set to "error"')
self.assertTrue('JSON parsing error' in json['error'])
def test_download_cache_file_valid(self):
"""
Test a call to download an existing cache file successfully.
GET /cache/<cache_id>
"""
(cache_id, content) = upload_cache()
resp = requests.get(
url + '/cache/' + cache_id,
headers={'Authorization': 'non_admin_token'}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content, content)
def test_download_cache_file_unauthorized_cache(self):
"""
Test a call to download a cache file that was made by a different token ID
GET /cache/<cache_id>
"""
cache_id = str(uuid4())
minio.create_placeholder(cache_id, 'test_user')
resp = requests.get(
url + '/cache/' + cache_id,
headers={'Authorization': 'non_admin_token'}
)
json = resp.json()
self.assertEqual(resp.status_code, 403, 'Status code is 403')
self.assertEqual(json['status'], 'error', 'Status is set to "error"')
self.assertTrue('You do not have access' in json['error'])
def test_download_cache_file_missing_cache(self):
"""
Test a call to download a cache file that does not exist
GET /cache/<cache_id>
"""
cache_id = str(uuid4())
resp = requests.get(
url + '/cache/' + cache_id,
headers={'Authorization': 'non_admin_token'}
)
json = resp.json()
self.assertEqual(resp.status_code, 404)
self.assertEqual(json['status'], 'error')
self.assertTrue('not found' in json['error'])
def test_upload_cache_file_valid(self):
"""
Test a call to upload a cache file successfully.
POST /cache/<cache_id>
"""
cache_id = get_cache_id()
content = b'{"hallo": "welt"}'
resp = requests.post(
url + '/cache/' + cache_id,
headers={'Authorization': 'non_admin_token'},
files={'file': ('test.json', content)}
)
json = resp.json()
self.assertEqual(resp.status_code, 200)
self.assertEqual(json['status'], 'ok')
def test_upload_cache_file_unauthorized_cache(self):
"""
Test a call to upload a cache file successfully.
POST /cache/<cache_id>
"""
cache_id = str(uuid4())
minio.create_placeholder(cache_id, 'test_user')
resp = requests.post(
url + '/cache/' + cache_id,
headers={'Authorization': 'non_admin_token'},
files={'file': ('test.json', b'{"x": 1}')}
)
json = resp.json()
self.assertEqual(resp.status_code, 403, 'Status code is 403')
self.assertEqual(json['status'], 'error', 'Status is set to "error"')
self.assertTrue('You do not have access' in json['error'])
def test_upload_cache_file_missing_cache(self):
"""
Test a call to upload a cache file successfully.
POST /cache/<cache_id>
"""
cache_id = str(uuid4())
resp = requests.post(
url + '/cache/' + cache_id,
headers={'Authorization': 'non_admin_token'},
files={'file': ('test.json', b'{"x": 1}')}
)
json = resp.json()
self.assertEqual(resp.status_code, 404, 'Status code is 404')
self.assertEqual(json['status'], 'error', 'Status is set to "error"')
self.assertTrue('not found' in json['error'])
def test_upload_cache_file_missing_file(self):
"""
Test a call to upload a cache file successfully.
POST /cache/<cache_id>
"""
cache_id = get_cache_id()
resp = requests.post(
url + '/cache/' + cache_id,
headers={'Authorization': 'non_admin_token'},
files={'filexx': ('test.json', b'{"x": 1}')}
)
json = resp.json()
self.assertEqual(resp.status_code, 400, 'Status code is 400')
self.assertEqual(json['status'], 'error', 'Status is set to "error"')
self.assertTrue('missing' in json['error'])
def test_delete_valid(self):
"""
Test a valid deletion of a cache entry.
DELETE /cache/<cache_id>
"""
cache_id = get_cache_id()
resp = requests.delete(
url + '/cache/' + cache_id,
headers={'Authorization': 'non_admin_token'}
)
json = resp.json()
self.assertEqual(resp.status_code, 200, 'Status code is 200')
self.assertEqual(json['status'], 'ok', 'Status is "deleted"')
# Test that the cache is inaccessible
with self.assertRaises(MissingCache):
minio.get_metadata(cache_id)
def test_delete_unauthorized_cache(self):
"""
Test a deletion of a cache entry with a cache created by a different token ID.
DELETE /cache/<cache_id>
"""
cache_id = str(uuid4())
minio.create_placeholder(cache_id, 'test_user')
resp = requests.delete(
url + '/cache/' + cache_id,
headers={'Authorization': 'non_admin_token'}
)
json = resp.json()
self.assertEqual(resp.status_code, 403, 'Status code is 403')
self.assertEqual(json['status'], 'error', 'Status is set to "error"')
self.assertTrue('You do not have access' in json['error'])
def test_delete_missing_cache(self):
"""
Test a deletion of a nonexistent cache entry
DELETE /cache/<cache_id>
"""
cache_id = str(uuid4())
resp = requests.delete(
url + '/cache/' + cache_id,
headers={'Authorization': 'non_admin_token'}
)
json = resp.json()
self.assertEqual(resp.status_code, 404, 'Status code is 404')
self.assertEqual(json['status'], 'error', 'Status is set to "error"')
self.assertTrue('not found' in json['error'])
```
|
{
"source": "jgi-kbase/IDMappingService",
"score": 2
}
|
#### File: jgikbase/idmapping/config.py
```python
from typing import Dict, Optional, Set, Tuple
from pathlib import Path
import os
import configparser
from jgikbase.idmapping.core.user import AuthsourceID
from jgikbase.idmapping.core.errors import MissingParameterError
from jgikbase.idmapping.core.user_lookup import LocalUserLookup
# May want different configuration implementations based on the deployment environment.
# YAGNI for now.
class IDMappingConfigError(Exception):
""" Thrown when there's an error in the ID Mapping system configuration. """
pass
class KBaseConfig:
"""
Loads a configuration from a standard KBase-style deploy.cfg file (an ini file with only
one section.) The configuration is contained in the `idmapping` section of the config file.
The keys are:
mongo-host
mongo-db
mongo-user (optional)
mongo-pwd (optional)
authentication-enabled (optional)
authentication-admin-enabled (optional)
keys specific to each authentication source. See the example deploy.cfg file in this repo
or the class variables.
dont-trust-x-ip-headers (optional)
The last key instructs the server to ignore the X-Real-IP and X-Forwarded-For
headers if set to the string 'true'.
:ivar mongo_host: the host of the MongoDB instance, including the port.
:ivar mongo_db: the MongoDB database to use for the ID mapping service.
:ivar mongo_user: the username to use with MongoDB, if any.
:ivar mongo_pwd: the password to use with MongoDB, if any.
:ivar auth_enabled: the set of authentication sources that are enabled.
:ivar auth_admin_enabled: the set of authentication sources that are trusted to define
system administrators.
:ivar ignore_ip_headers: True if the X-Real-IP and X-Forwarded-For headers should be ignored.
:ivar lookup_configs: the configurations for the user lookup instances. This is a dict
of :class:`jgikbase.idmapping.core.user.AuthsourceID` to the configuration for the lookup
instance for that authsource. The configuration is a tuple where the first entry is a
string denoting the module containing the factory method used to create the lookup
instance. The second entry is a str -> str dict containing the configuration for the
lookup instance.
"""
ENV_VAR_IDMAPPING = 'ID_MAPPING_CONFIG'
"""
The first environment variable where the system will look for the path to the config file.
"""
ENV_VAR_KB_DEP = 'KB_DEPLOYMENT_CONFIG'
"""
The second environment variable where the system will look for the path to the config file.
"""
CFG_SEC = 'idmapping'
""" The section of the config file where the configuration is located. """
_TEMP_KEY_CFG_FILE = 'temp-key-config-file'
KEY_MONGO_HOST = 'mongo-host'
""" The key corresponding to the value containing the MongoDB host. """
KEY_MONGO_DB = 'mongo-db'
""" The key corresponding to the value containing the MongoDB database. """
KEY_MONGO_USER = 'mongo-user'
""" The key corresponding to the value containing the MongoDB username. """
KEY_MONGO_PWD = '<PASSWORD>'
""" The key corresponding to the value containing the MongoDB user password. """
KEY_AUTH_ENABLED = 'authentication-enabled'
"""
The key corresponding to the value containing a comma separated list of authentication sources
that should be enabled on system start up.
"""
KEY_AUTH_ADMIN_ENABLED = 'authentication-admin-enabled'
"""
The key corresponding to the value containing a comma separated list of authentication sources
that are trusted to define system administrators.
"""
KEY_IGNORE_IP_HEADERS = 'dont-trust-x-ip-headers'
"""
The key corresponding to the value containing a boolean designating whether the X-Real_IP
and X-Forwarded-For headers should be ignored. """
AUTH_PREFIX = 'auth-source-'
""" The prefix for keys for specific authentication sources. """
FACTORY_MODULE = '-factory-module'
"""
The suffix for the key for a specific authentication source that defines the python
module containing the factory for the user lookup instance.
"""
INIT = '-init-'
"""
The portion of the key after the authentication source name that defines the key as
a key-value configuration item.
"""
_TRUE = 'true'
def __init__(self, cfgfile: Path=None) -> None:
"""
Load the configuration.
:param cfgfile: the path to the configuration file. If not provided, the path will be
looked up in the environment variables, in order of precedence, ID_MAPPING_CONFIG and
KB_DEPLOYMENT_CONFIG.
"""
if not cfgfile:
cfgfile = self._get_cfg_from_env()
cfg = self._get_cfg(cfgfile)
self.ignore_ip_headers = self._TRUE == cfg.get(self.KEY_IGNORE_IP_HEADERS)
self.mongo_host = self._get_string(self.KEY_MONGO_HOST, cfg)
self.mongo_db = self._get_string(self.KEY_MONGO_DB, cfg)
self.mongo_user = self._get_string(self.KEY_MONGO_USER, cfg, False)
mongo_pwd = self._get_string(self.KEY_MONGO_PWD, cfg, False)
if bool(self.mongo_user) ^ bool(mongo_pwd): # xor
mongo_pwd = <PASSWORD>
raise IDMappingConfigError(
('Must provide both {} and {} params in config file ' +
'{} section {} if MongoDB authentication is to be used').format(
self.KEY_MONGO_USER, self.KEY_MONGO_PWD, cfg[self._TEMP_KEY_CFG_FILE],
self.CFG_SEC))
self.mongo_pwd = <PASSWORD>
self.auth_enabled = self._get_authsource_ids(self.KEY_AUTH_ENABLED, cfg)
self.auth_admin_enabled = self._get_authsource_ids(self.KEY_AUTH_ADMIN_ENABLED, cfg)
self.lookup_configs = self._get_lookup_configs(cfg)
def _get_cfg(self, cfgfile: Path) -> Dict[str, str]:
if not cfgfile.is_file():
raise IDMappingConfigError('{} does not exist or is not a file'.format(cfgfile))
config = configparser.ConfigParser()
with cfgfile.open() as cfg:
try:
config.read_file(cfg)
except configparser.Error as e:
raise IDMappingConfigError('Error parsing config file {}: {}'.format(
cfgfile, e)) from e
if self.CFG_SEC not in config:
raise IDMappingConfigError('No section {} found in config file {}'.format(
self.CFG_SEC, cfgfile))
sec = config[self.CFG_SEC]
# a section is not a real map and is missing methods
c = {x: sec[x] for x in sec.keys()}
c[self._TEMP_KEY_CFG_FILE] = str(cfgfile)
return c
def _get_cfg_from_env(self) -> Path:
if os.environ.get(self.ENV_VAR_IDMAPPING):
return Path(os.environ[self.ENV_VAR_IDMAPPING])
if os.environ.get(self.ENV_VAR_KB_DEP):
return Path(os.environ[self.ENV_VAR_KB_DEP])
raise IDMappingConfigError('Could not find deployment configuration file from either ' +
'permitted environment variable: {}, {}'.format(
self.ENV_VAR_IDMAPPING, self.ENV_VAR_KB_DEP))
def _get_string(self, param_name: str, config: Dict[str, str], raise_on_err: bool=True
) -> Optional[str]:
s = config.get(param_name)
if s and s.strip():
return s.strip()
elif raise_on_err:
raise IDMappingConfigError(
'Required parameter {} not provided in configuration file {}, section {}'.format(
param_name, config[self._TEMP_KEY_CFG_FILE], self.CFG_SEC))
else:
return None
def _get_authsource_ids(self, param_name: str, config: Dict[str, str]) -> Set[AuthsourceID]:
s = self._get_string(param_name, config, False)
ret: Set[AuthsourceID] = set()
if not s:
return ret
ids = s.split(',')
for id_ in ids:
try:
ret.add(AuthsourceID(id_.strip()))
except MissingParameterError as e:
raise IDMappingConfigError(
('Parameter {} in configuration file {}, section {}, has whitespace-only entry'
).format(param_name, config[self._TEMP_KEY_CFG_FILE], self.CFG_SEC, str(e))
) from e
except Exception as e:
raise IDMappingConfigError(
'Parameter {} in configuration file {}, section {}, is invalid: {}'.format(
param_name, config[self._TEMP_KEY_CFG_FILE], self.CFG_SEC, str(e))) from e
return ret
def _get_lookup_configs(self, cfg) -> Dict[AuthsourceID, Tuple[str, Dict[str, str]]]:
ret = {}
for asID in self.auth_enabled:
if asID == LocalUserLookup.LOCAL:
continue
prefix = self.AUTH_PREFIX + asID.id
factory = None
lookupcfg = {}
for key, val in cfg.items():
if key.startswith(prefix):
if key == prefix + self.FACTORY_MODULE:
factory = val.strip()
elif key.startswith(prefix + self.INIT):
lookupcfg[key[len(prefix + self.INIT):]] = val.strip()
else:
raise IDMappingConfigError(
'Unexpected parameter {} in configuration file {}, section {}'
.format(key, cfg[self._TEMP_KEY_CFG_FILE], self.CFG_SEC))
if not factory:
raise IDMappingConfigError(
'Required parameter {} not provided in configuration file {}, section {}'
.format(prefix + self.FACTORY_MODULE, cfg[self._TEMP_KEY_CFG_FILE],
self.CFG_SEC))
ret[asID] = (factory, lookupcfg)
return ret
```
#### File: idmapping/storage/id_mapping_storage.py
```python
from abc import abstractmethod as _abstractmethod # pragma: no cover
from abc import ABCMeta as _ABCMeta # pragma: no cover
from jgikbase.idmapping.core.object_id import NamespaceID # pragma: no cover
from jgikbase.idmapping.core.user import User, Username # pragma: no cover
from jgikbase.idmapping.core.tokens import HashedToken # pragma: no cover
from jgikbase.idmapping.core.object_id import Namespace # pragma: no cover
from typing import Iterable, Set, Tuple # pragma: no cover
from jgikbase.idmapping.core.object_id import ObjectID # pragma: no cover
from typing import Dict
class IDMappingStorage: # pragma: no cover
"""
An interface for a storage system for ID mappings. All methods are abstract.
"""
__metaclass__ = _ABCMeta
@_abstractmethod
def create_local_user(self, username: Username, token: HashedToken) -> None:
"""
Create a user.
Once created, users cannot be removed. The client programmer is responsible for
ensuring that the token provided does not already exist in the database.
:param username: the user name.
:param token: the user's token after applying a hash function.
:raises ValueError: if the token already exists in the database.
:raises TypeError: if any of the arguments are None.
:raises UserExistsError: if the user already exists.
:raises IDMappingStorageError: if an unexpected error occurs.
"""
raise NotImplementedError()
@_abstractmethod
def set_local_user_as_admin(self, username: Username, admin: bool) -> None:
'''
Mark a user as a system admin. Or not.
:param username: the name of the user to alter.
:param admin: True to give the user admin privileges, False to remove them. If the user
is already in the given state, no further action is taken.
:raises TypeError: if the usename is None.
'''
raise NotImplementedError()
@_abstractmethod
def update_local_user_token(self, username: Username, token: HashedToken) -> None:
"""
Update an existing user's token.
:param username: the user name.
:param token: the user's token after applying a hash function.
:raises ValueError: if the token already exists in the database.
:raises TypeError: if any of the arguments are None.
:raises NoSuchUserError: if the user does not exist.
:raises IDMappingStorageError: if an unexpected error occurs.
"""
raise NotImplementedError()
@_abstractmethod
def get_user(self, token: HashedToken) -> Tuple[Username, bool]:
"""
Get the user, if any, associated with a hashed token.
:param token: the hashed token.
:raises TypeError: if the token is None.
:raises InvalidTokenError: if the token does not exist in the storage system.
:raises IDMappingStorageError: if an unexpected error occurs.
:returns: a tuple of the username corresponding to the token and a boolean denoting
whether the user is an admin or not.
"""
raise NotImplementedError()
@_abstractmethod
def get_users(self) -> Dict[Username, bool]:
"""
Get all the users in the system.
:raises IDMappingStorageError: if an unexpected error occurs.
:returns: a mapping of username to a boolean denoting whether the user is an admin or not.
"""
raise NotImplementedError()
@_abstractmethod
def user_exists(self, username: Username) -> bool:
'''
Check if a user exist in the system. Returns True if so.
:param username: the username to check.
:raises TypeError: if the username is None.
'''
raise NotImplementedError()
@_abstractmethod
def create_namespace(self, namespace_id: NamespaceID) -> None:
"""
Create a new namespace. Once created, namespaces cannot be removed.
:param namespace_id: The namespace to create.
:raises TypeError: if the namespace ID is None.
:raises NamespaceExistsError: if the namespace already exists.
"""
raise NotImplementedError()
@_abstractmethod
def add_user_to_namespace(self, namespace_id: NamespaceID, admin_user: User) -> None:
"""
Add a user to a namespace, giving them administration rights. A noop occurs if the user
is already an administrator for the namespace.
:param namespace_id: the namespace to modify.
:param admin_user: the user.
:raises TypeError: if any of the arguments are None.
:raises NoSuchNamespaceError: if the namespace does not exist.
:raises UserExistsError: if the user already administrates the namespace.
"""
raise NotImplementedError()
@_abstractmethod
def remove_user_from_namespace(self, namespace_id: NamespaceID, admin_user: User) -> None:
"""
Remove a user from a namespace, removing their administration rights.
:param namespace_id: the namespace to modify.
:param admin_user: the user.
:raises TypeError: if any of the arguments are None.
:raises NoSuchNamespaceError: if the namespace does not exist.
:raises NoSuchUserError: if the user does not administrate the namespace.
"""
raise NotImplementedError()
@_abstractmethod
def set_namespace_publicly_mappable(self, namespace_id: NamespaceID, publicly_mappable: bool
) -> None:
"""
Set the publicly mappable flag on a namespace.
:param namespace_id: The namespace to alter.
:param publicly_mappable: True to set the namespace to publicly mappable, False or None
to prevent public mapping.
:raises TypeError: if namespace_id is None.
:raises NoSuchNamespaceError: if the namespace does not exist.
"""
raise NotImplementedError()
@_abstractmethod
def get_namespaces(self, nids: Iterable[NamespaceID]=None) -> Set[Namespace]:
"""
Get all the namespaces in the system.
:param ids: specific namespaces to get. By default all namespaces are returned.
:raises TypeError: if nids contains None.
:raises NoSuchNamespaceError: if any of the namespaces in the nids parameter do not
exist
"""
raise NotImplementedError()
@_abstractmethod
def get_namespace(self, namespace_id: NamespaceID) -> Namespace:
"""
Get a particular namespace.
:param namespace_id: the id of the namespace to get.
:raises TypeError: if the namespace ID is None.
:raises NoSuchNamespaceError: if the namespace does not exist.
"""
raise NotImplementedError()
@_abstractmethod
def add_mapping(self, primary_OID: ObjectID, secondary_OID: ObjectID) -> None:
"""
Create a mapping from one namespace to another.
Note that this method does NOT check for the existence of the namespaces.
If the mapping already exists, no further action is taken.
:param primary_OID: the primary namespace/ID combination.
:param secondary_OID: the secondary namespace/ID combination.
:raise TypeError: if any of the arguments are None.
:raise ValueError: if the namespace IDs are the same.
"""
raise NotImplementedError()
@_abstractmethod
def remove_mapping(self, primary_OID: ObjectID, secondary_OID: ObjectID) -> bool:
"""
Remove a mapping from one namespace to another. Returns true if a mapping was removed,
false otherwise.
:param primary_OID: the primary namespace/ID combination.
:param secondary_OID: the secondary namespace/ID combination.
:raise TypeError: if any of the arguments are None.
"""
raise NotImplementedError()
@_abstractmethod
def find_mappings(self, oid: ObjectID, ns_filter: Iterable[NamespaceID]=None
) -> Tuple[Set[ObjectID], Set[ObjectID]]:
"""
Find mappings given a namespace / id combination.
If the namespace or id does not exist, no results will be returned. The namespaces in the
filter are ignored if they do not exist.
:param oid: the namespace / id combination to match against.
:param ns_filter: a list of namespaces with which to filter the results. Only results in
these namespaces will be returned.
:returns: a tuple of sets of object IDs. The first set in the tuple contains mappings
where the provided object ID is the primary object ID, and the second set contains
mappings where the provided object ID is the secondary object ID.
:raise TypeError: if the object ID is None or the filter contains None.
"""
raise NotImplementedError()
```
|
{
"source": "jgilik/gitzebo",
"score": 2
}
|
#### File: gitzebo/gitzebo/schema.py
```python
import sqlalchemy, os
# DB path is currently rooted in our module directory by default
# TODO: database configuration should be externalized
db_path = os.path.join(os.path.dirname(__file__), 'gitzebo.db')
db = sqlalchemy.create_engine("sqlite:///{0}".format(db_path))
latest_version=1
from sqlalchemy import (Table, Column, MetaData,
ForeignKey, ColumnDefault,
Integer, String, Boolean, Text)
def get_metadata(version=latest_version, db=db):
metadata = MetaData(db)
schema_version = Table('_schema_version', metadata,
Column('version', Integer),
)
users = Table('users', metadata,
Column('user_id', Integer, primary_key=True),
Column('user_name', String, unique=True),
Column('pass_hash', String),
Column('pass_salt', String),
Column('commit_name', String),
Column('commit_email', String),
Column('can_create_users', Boolean, ColumnDefault(False)),
Column('can_create_repositories', Boolean, ColumnDefault(False)),
)
keys = Table('keys', metadata,
Column('key_id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('users.user_id')),
Column('name', String),
Column('public_key', Text),
# TODO: public_key should be unique, but TEXT columns can't be (I think)
# verify me maybe?
)
repos = Table('repositories', metadata,
Column('repository_id', Integer, primary_key=True),
Column('repository_name', String, unique=True),
)
repo_acls = Table('repository_acls', metadata,
Column('user_id', Integer, ForeignKey('users.user_id')),
Column('repository_id', Integer,
ForeignKey('repositories.repository_id')),
Column('is_owner', Boolean, ColumnDefault(False)),
Column('can_write', Boolean, ColumnDefault(False)),
Column('can_rewind', Boolean, ColumnDefault(False)),
Column('can_read', Boolean, ColumnDefault(False)),
Column('can_create_tag', Boolean, ColumnDefault(False)),
Column('can_modify_tag', Boolean, ColumnDefault(False)),
)
return metadata
def get_table(table, db=db):
metadata = get_metadata(db=db)
return metadata.tables[table]
def create_schema(version=latest_version, db=db):
metadata = get_metadata(version=version, db=db)
# Insure that our schema versioning table doesn't exist--
# which also insures that we're not trying to provision over
# an existing database, as we have an explicit upgrade
# workflow.
if metadata.tables['_schema_version'].exists():
raise Exception("Database already provisioned")
# Check to see if any tables already exist, which would mean
# we're attempting to provision a database that contains some
# other application's data. If it contained our application's
# data, the _schema_version table would exist!
for table in metadata.sorted_tables:
if table.exists():
raise Exception("Table already exists: " + table.name)
# Create all of our tables
metadata.create_all(db)
# Populate schema version
version_table = metadata.tables['_schema_version']
# TODO: do we need to check for success somehow?
db.execute(version_table.insert().values(version=version))
# Create the default admin user
from users import create_user
id = create_user('admin', password='<PASSWORD>', commit_email='', can_create_users=True, can_create_repositories=True)
def upgrade_schema(db=db):
raise Exception("upgrade_schema() not implemented")
# TODO: Fix this so we can destroy older schemas that contain extraneous tables
# without problems; right now, they would be left dangling unless we
# upgraded first :o
def destroy_schema(db=db):
metadata = get_metadata(db=db)
metadata.drop_all(db)
```
#### File: gitzebo/gitzebo/version.py
```python
major_version = 0
minor_version = 0
patch_version = 10
def format_version():
return "{0}.{1}.{2}".format(major_version, minor_version, patch_version)
```
|
{
"source": "jgillich/free-email-domains",
"score": 3
}
|
#### File: free-email-domains/scripts/check.py
```python
from concurrent import futures
import json
import sys
import dns.resolver
import dns.exception
def _load(fn, from_json=False):
with open(fn) as f:
if from_json:
return json.load(f)
else:
return [l.strip() for l in f]
domains = _load('hubspot-free-email-domains.txt')
sorted_domains = sorted({d.lower() for d in domains if d})
if domains != sorted_domains:
print('Hubspot domains are not sorted/unique!? Compare with /tmp/sorted-free-email-domains.txt')
with open('/tmp/sorted-free-email-domains.txt', 'w') as f:
for e in sorted_domains:
f.write(e + '\n')
sys.exit(1)
domains = _load('free-email-domains.txt')
sorted_domains = sorted({d.lower() for d in domains if d})
if domains != sorted_domains:
print('Domains are not sorted/unique!? Compare with /tmp/sorted-free-email-domains.txt')
with open('/tmp/sorted-free-email-domains.txt', 'w') as f:
for e in sorted_domains:
f.write(e + '\n')
sys.exit(1)
disposable = set(_load('tmp/disposable-email-domains/index.json', from_json=True))
assert 'mailinator.com' in disposable
not_disposable = {'c2.hu'}
disposable = [d for d in domains if (d in disposable) and (d not in not_disposable)]
if disposable:
print('Disposable domains: %s' % disposable)
sys.exit(2)
disposable_wildcard = _load('tmp/disposable-email-domains/wildcard.json', from_json=True)
disposable = [d for d in domains if any(w in d for w in disposable_wildcard)]
if disposable:
print('Disposable domains (wildcards): %s' % disposable)
sys.exit(3)
def _checkMxRecord(d):
for r in ('', '8.8.8.8', '1.1.1.1', '8.8.4.4', '1.0.0.1'):
resolver = dns.resolver.Resolver()
resolver.timeout = 10
resolver.lifetime = 10
if r:
resolver.nameservers = [r]
try:
resolver.query(d, 'MX')
return
except dns.exception.DNSException:
pass
return d
with futures.ThreadPoolExecutor(max_workers=10) as executor:
no_mx = executor.map(_checkMxRecord, domains)
no_mx = [d for d in no_mx if d]
if no_mx:
print('Domains without MX records: %s' % no_mx)
sys.exit(4)
```
|
{
"source": "jgillich/gcapy",
"score": 3
}
|
#### File: gcapy/gcapy/process.py
```python
import base64
import sys
from binascii import hexlify
from enum import Enum
from .util import *
from .gcap import *
class GCAPyAction(Enum):
Metadata = 0
Extract = 1
Stats = 2
class GCAPyOutput(Enum):
Ascii = 0
Json = 1
Binary = 2
def pp_task(action, output, files, ranges):
action_name = ""
output_name = ""
if action is GCAPyAction.Metadata:
action_name = "Displaying metadata for"
elif action is GCAPyAction.Extract:
action_name = "Extracting records from"
elif action is GCAPyAction.Stats:
action_name = "Gathering stats for GameRecords in"
else:
raise RuntimeError("unhandled action")
if output is GCAPyOutput.Ascii:
output_name = "text"
elif output is GCAPyOutput.Json:
output_name = "JSON"
elif output is GCAPyOutput.Binary:
output_name = "binary"
else:
raise RuntimeError("unhandled output")
return "%s %s with the ranges %s and outputing in %s" % \
(action_name, str(files), str(ranges), str(output_name))
def process_gcapy(files, ranges, actions, output):
output_process = None # function reference for the output processor
if output is GCAPyOutput.Ascii:
output_process = output_ascii
elif output is GCAPyOutput.Json:
output_process = output_json
elif output is GCAPyOutput.Binary:
output_process = output_binary
else:
raise RuntimeError("unhandled output")
# Step 1: fetch the required data
# Step 2: output the data in the required format
# fail fast
for f in files:
if not file_exists(f):
error("missing specfied file " + f)
return 1
for f in files:
gcap = None
info("File: " + f)
try:
gcap = GCAP.load(f)
except IOError:
error("could not open %s for reading" % f)
return 1
except GCAPFormatError as e:
error("GCAP format error: " + str(e))
return 1
except GCAPVersionError as e:
error("GCAP version error: " + str(e))
return 1
for action in actions:
if action is GCAPyAction.Metadata:
output_process(gcap.get_metadata())
elif action is GCAPyAction.Extract:
for therange in ranges:
for r in get_gcap_range(gcap, therange):
output_process(r)
elif action is GCAPyAction.Stats:
pass
return 0
def get_gcap_range(gcap, therange):
max_record = gcap.record_count()
max_iter = min(therange[1]+1, max_record)
for i in range(therange[0], max_iter):
if i < 1:
continue
if i >= max_record:
break
yield gcap.get_record(i)
# output processors
def output_ascii(data):
template = ""
rtype = data['type']
number = data['number']
record = data['record']
if rtype == "METADATA":
guid = hexlify(record['guid']) if sys.version_info[0] < 3 else record['guid'].hex()
start = record['start_time']
end = record['end_time']
if end > start:
delta = end - start
else:
delta = 0
template = \
"""\
Title: "%s"
GUID: %s
Number of records: %d
Revision: %d
Start: %d End: %d Delta: %d seconds
Description:
"%s"
""" % (record['title'], guid, record['record_count'],
record['capture_revision'], start, end, delta, record['description'])
elif rtype == "GAME":
gtype = record['type']
time = float(record['timestamp'])/1e6 # microseconds since start of capture
record = record['record']
if gtype == "PACKET":
ptype = record['type']
dst = record['destination']
contents = hexlify(record['record']) if sys.version_info[0] < 3 else record['record'].hex()
if dst == "CLIENT":
src = "SERVER"
else:
src = "CLIENT"
template = \
"""\
Game record %d at %.6fs is from %s to %s with contents %s\
""" % (number, time, src, dst, contents)
print(template)
def output_json(data):
rtype = data['type']
record = data['record']
if rtype == "METADATA":
record['guid'] = hexlify(record['guid']) if sys.version_info[0] < 3 else record['guid'].hex()
record['sha256_hash'] = hexlify(record['sha256_hash']) if sys.version_info[0] < 3 else record['sha256_hash'].hex()
elif rtype == "GAME":
gtype = record['type']
record = record['record']
if gtype == "PACKET":
ptype = record['type']
record['record'] = encode_record(record['record'])
else:
record = encode_record(record)
print(json.dumps(data))
def output_binary(data):
rtype = data['type']
record = data['record']
if rtype == "GAME":
gtype = record['type']
record = record['record']
if gtype == "PACKET":
ptype = record['type']
contents = record['record']
sys.stdout.write(contents)
def encode_record(record):
if sys.version_info[0] < 3:
return base64.encodestring(record).strip()
else:
return base64.b64encode(record).decode("ascii").strip()
```
#### File: gcapy/gcapy/util.py
```python
import sys
import os
interactive = True
def error(msg):
_write_msg("error: " + msg)
def warning(msg):
_write_msg("warning: " + msg)
def info(msg):
_write_msg(msg)
def _write_msg(msg):
if not interactive:
sys.stderr.write(msg + "\n")
else:
print("I" + msg)
def file_exists(filename):
return os.path.isfile(filename)
```
|
{
"source": "jgillick/coffeetable-programs",
"score": 3
}
|
#### File: NorthernLights/shapes/BaseShape.py
```python
import time
# Colors
RED = (1,0,0)
YELLOW = (1,1,0)
GREEN = (0,1,0)
CYAN = (0,1,1)
BLUE = (0,0,1)
PURPLE = (1,0,1)
class BaseShape:
# A list of instance attribute names, which are animatable objects
animatable_attrs = []
# The time of the last animation update
last_update = None
# The number of LEDs in the strip
led_count = 0
# The color index we're setting (red: 0, green: 1, blue: 2)
color = 0
def __init__(self, led_count, color, time):
self.led_count = led_count
self.color = color
self.last_update = time
def update(self, now):
""" Updates the shape animatable attributes."""
elapsed = now - self.last_update
print(elapsed)
is_animating = False
for anin_attr in self.animatable_attrs:
anim = getattr(self, anin_attr)
ret = anim.update(elapsed)
if ret:
is_animating = True
self.last_update = now
return is_animating
def __len__(self):
return self.led_count
def __getitem__(self, key):
return (0,0,0)
def __setitem__(self, key, value):
""" Cannot set pixel item. """
pass
def __delitem__(self, key):
""" Cannot delete pixel color. """
pass
```
|
{
"source": "jgillick/LED-String-Controller",
"score": 4
}
|
#### File: CircuitPython/DotStar/code.py
```python
import time
import board
import adafruit_dotstar
# DotStar pins
# Change these if you want to connect an external DotStar LED(s) to different pins.
PIN_SCK = board.APA102_SCK
PIN_MOSI = board.APA102_MOSI
# The onboard RGB LED
led = adafruit_dotstar.DotStar(PIN_SCK, PIN_MOSI, 1, brightness=0.1)
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colors are a transition r -> g -> b -> back to r.
if pos < 0 or pos > 255:
r = g = b = 0
elif pos < 85:
r = int(pos * 3)
g = int(255 - pos * 3)
b = 0
elif pos < 170:
pos -= 85
r = int(255 - pos * 3)
g = 0
b = int(pos * 3)
else:
pos -= 170
r = 0
g = int(pos * 3)
b = int(255 - pos * 3)
return (r, g, b)
while True:
# Animate the onboard LED around the color wheel
for i in range(255):
led[0] = wheel(i)
time.sleep(0.001)
```
#### File: CircuitPython/TwinkleStars/code.py
```python
import time
import digitalio
import board
from pulseio import PWMOut
from random import randrange, uniform
PWM_FULL = 65535
class AnimationCtrl:
""" A general-purpose class which controls the LED animations."""
PHASE_PAUSE = 0
PHASE_RUN = 1
min_pause = 0
max_pause = 0.5
min_run = 0.1
max_run = 0.2
duration = 0.0
phase = 0
def __init__(self):
pass
def startRun(self):
"""Start an animation cycle."""
self.phase = self.PHASE_RUN
self.duration = uniform(self.min_run, self.max_run)
def endRun(self):
"""End an animation cycle and pause."""
self.phase = self.PHASE_PAUSE
self.duration = uniform(self.min_pause, self.max_pause)
def step(self, time):
"""Handle a single step in the animation.
This should be called in each program loop."""
self.duration -= time
if self.duration <= 0:
if self.phase == self.PHASE_PAUSE:
self.startRun()
else:
self.endRun()
class FadeCtrl(AnimationCtrl):
"""Create random LED fade animations."""
led = None
inc = 0
min_pause = 0.2
max_pause = 5
min_run = 0.5
max_run = 1
def __init__(self, pin):
"""Setup LED and start first fade animation."""
self.led = PWMOut(pin, frequency=5000, duty_cycle=0)
self.startRun()
def startRun(self):
"""Start a new fade animation."""
super().startRun()
self.incPerMillisecond()
if self.led.duty_cycle > 0:
self.inc *= -1 # Decrement to zero
def endRun(self):
"""The current fade animation is over."""
super().endRun()
# Finish on or off
if self.inc > 0:
self.led.duty_cycle = PWM_FULL
else:
self.led.duty_cycle = 0
self.incPerMillisecond()
def incPerMillisecond(self):
"""Calculate how much to increment the LED per millisecond
to complete the fade animation by the end of the duration."""
self.inc = PWM_FULL / self.duration
def step(self, time):
"""Perform a single step of the animation. This should be called on each program loop."""
if self.phase == self.PHASE_RUN:
value = self.led.duty_cycle + int(self.inc * time)
if value < 0:
value = 0
elif value > PWM_FULL:
value = PWM_FULL
self.led.duty_cycle = value
super().step(time)
class TwinkleCtrl(AnimationCtrl):
"""Create a random 'twinkle' animation across two pins.
This assumes you have fairy LED strings attached to the two pins.
The pins will have one polarity for a random amount of time, and then swap.
"""
pin1 = None
pin2 = None
min_pause = 0
max_pause = 0.5
min_run = 0.1
max_run = 0.2
def __init__(self, pin1, pin2):
"""Setup the LEDs and start the first animation."""
super().__init__()
self.pins = [
digitalio.DigitalInOut(pin1),
digitalio.DigitalInOut(pin2),
]
for pin in self.pins:
pin.direction = digitalio.Direction.OUTPUT
pin.value = False
self.startRun()
def startRun(self):
"""Turn the LED on, in one polarity."""
self.pins[1].value = not self.pins[0].value
super().startRun()
def endRun(self):
"""Turn the LEDs off."""
self.pins[0].value = not self.pins[0].value
super().endRun()
# Initialize all LED animations
animations = [
TwinkleCtrl(board.PA9, board.PA13),
TwinkleCtrl(board.PA14, board.PB5),
FadeCtrl(board.LED1),
FadeCtrl(board.LED2),
FadeCtrl(board.LED3),
]
# Run animation steps, forever
last_time = time.monotonic()
while True:
now = time.monotonic()
time_since = now - last_time
last_time = now
for anim in animations:
anim.step(time_since)
```
|
{
"source": "jgillick/Spaces",
"score": 3
}
|
#### File: spaces/tests/test_models.py
```python
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import IntegrityError
from django.test import TestCase
from spaces.models import Space, Document, Revision
class SpaceTestCase(TestCase):
""" Test Space Models """
def setUp(self):
self.space = Space.objects.create(name='My Space!', path='mine')
def test_root_space_should_be_present(self):
""" The default application should have a __ROOT__ space """
Space.objects.get(name=Space.ROOT_SPACE_NAME)
def test_user_space_should_be_present(self):
""" The default application should have a __USER__ space """
Space.objects.get(name=Space.USER_SPACE_NAME)
def test_convert_path_to_slug(self):
""" Convert special characters in path """
space = Space.objects.create(
name='Test Space', path=' this-is / a !$ test ')
self.assertEqual(space.path, 'this-is-a-test')
def test_cannot_create_existing_space(self):
""" Cannot create a space with the same path or name """
with self.assertRaises(ValidationError):
Space.objects.create(name='My Space!', path='mine')
class DocumentTestCase(TestCase):
"""
Test the Document Model
"""
def setUp(self):
self.root_space = Space.objects.get(name=Space.ROOT_SPACE_NAME)
self.space = Space.objects.create(name='My Space!', path='mine')
self.space_other = Space.objects.create(
name='Another space', path='other')
self.user = get_user_model().objects.create_user(
username='bob',
email='<EMAIL>',
password='<PASSWORD>')
# Document hierarchy
self.doc_foo = Document.objects.create(
title='Foo',
path='foo',
space=self.space)
self.doc_bar = Document.objects.create(
title='Bar',
path='bar',
parent=self.doc_foo)
self.doc_baz = Document.objects.create(
title='Baz',
path='baz',
parent=self.doc_bar)
# Create hierarchy by full path
self.doc_uri = Document.objects.create(
title='Quick Fox',
path='quick/brown/fox',
space=self.space)
# Other document
self.doc_other = Document.objects.create(
title='Other document',
path='other-doc',
space=self.space_other)
def test_create_document_without_a_space(self):
""" All documents belong in a space. """
with self.assertRaises(ValidationError):
Document.objects.create(
title='Orphan', path='annie')
def test_cannot_create_existing_document(self):
"""
Cannot create a document with the same path under the same parent.
"""
with self.assertRaises(ValidationError):
Document.objects.create(
title='Bar',
path='bar',
parent=self.doc_foo)
def test_auto_save_parent_documents(self):
"""
If parent documents are not saved, then save them automatically
"""
d1 = Document(title='p1', path='p1', parent=self.doc_foo)
d2 = Document(title='p2', path='p2', parent=d1)
d3 = Document(title='p3', path='p3', parent=d2)
d3.save()
doc = Document.objects.get_by_path('mine/foo/p1/p2/p3')
def test_can_edit_root_document(self):
"""
The document attached to __ROOT__ can be edited
"""
doc = Document.objects.get_by_path('', create=True)
doc.title = "Root Doc"
doc.save()
def test_no_document_past_root(self):
"""
No hierarchy is allowed under the __ROOT__ space
"""
with self.assertRaises(ValidationError):
Document.objects.create(
title="Nested doc",
path="hello",
space=self.root_space)
def test_can_create_same_under_other_parent(self):
""" Can create a document with the same path under another parent """
Document.objects.create(
title='Bar',
path='bar',
parent=self.doc_bar)
def test_root_space_doc_path(self):
"""
Get the root document of a space by path
"""
doc = Document.objects.get_by_path('mine')
self.assertEqual(doc.space.path, self.space.path)
self.assertEqual(doc.path, '')
self.assertEqual(doc.parent, None)
def test_path_query_finder(self):
""" Find document by full path """
doc = Document.objects.get_by_path('mine/foo/bar/baz')
self.assertEqual(doc, self.doc_baz)
def test_path_with_extra_slashes(self):
"""
Extra slashes in a path should be ignored when searching
"""
doc = Document.objects.get_by_path('mine/foo/bar///baz/')
self.assertEqual(doc, self.doc_baz)
def test_empty_path_matches_root(self):
"""
If a path is empty, it matches the root space
"""
doc = Document.objects.get_by_path('')
self.assertEqual(doc.space, self.root_space)
def test_space_in_inferred_from_parent(self):
"""
If creating a document without a space, it's assumed from the parent
"""
self.assertEqual(self.doc_bar.space.path, self.space.path)
def test_cannot_have_parent_in_another_space(self):
"""
A document cannot have a parent that belongs to another space
"""
with self.assertRaises(ValidationError):
Document.objects.create(
title='Wrong parent',
path='wrong',
parent=self.doc_other,
space=self.space)
def test_create_with_full_path(self):
"""
Create a document with full path
"""
uri = 'foo/bar/baz/boo/foo'
doc = Document.objects.create(
title='Foo', path=uri, space=self.space)
self.assertEqual(doc.path, 'foo')
self.assertEqual(doc.full_path(), "%s/%s" % (self.space.path, uri))
def test_get_full_path(self):
"""
Check that a document generates it's full path correctly.
"""
self.assertEqual(self.doc_baz.full_path(), 'mine/foo/bar/baz')
def test_first_doc_cannot_match_space(self):
"""
No document immediate under the space, can share the space name
"""
with self.assertRaises(ValidationError):
doc = Document.objects.create(
title='Wrong',
path=self.space.path,
space=self.space)
def test_delete_document_in_path(self):
"""
When deleting a document in the middle of the path,
all children should be assigned to the parent above
"""
self.doc_bar.delete()
baz = Document.objects.get(path="baz")
self.assertEqual(baz.parent.path, self.doc_foo.path)
def test_delete_all_in_path(self):
"""
Delete a document and all it's children with the `with_children` flag
"""
self.doc_bar.delete(with_children=True)
with self.assertRaises(ObjectDoesNotExist):
Document.objects.get_by_path('mine/foo/bar/baz')
def test_cannot_delete_root_document(self):
""" The root document of a space cannot be deleted. """
doc = self.space.get_root_document()
with self.assertRaises(ValidationError):
doc.delete()
def test_special_characters_in_path(self):
"""
Path elements should have special characters parsed out
"""
path = "it's alive. bang!!bang! hash#hash"
expected = "its-alive-bangbang-hashhash"
doc = Document.objects.create(
title='Test Path',
path=path,
space=self.space)
self.assertEqual(doc.path, expected)
def test_convert_path_to_slug(self):
""" Convert special characters in path """
space = Space.objects.create(name='Test Space', path=' this-is / a !$ test ')
self.assertEqual(space.path, 'this-is-a-test')
class UserSpaceTestCase(TestCase):
"""
A User Space, is a special space reserved for a user
"""
def setUp(self):
self.space = Space.objects.get(name=Space.USER_SPACE_NAME)
def test_space_path(self):
"""
Ensure that documents cannot be put in the user root path.
That part of the path is reserved for the username:
/user/<username>/
"""
with self.assertRaises(ObjectDoesNotExist):
doc = Document.objects.create(
title='Bad',
path='user/not_a_user',
space=self.space)
class RevisionTestCase(TestCase):
"""
Test Document Revision Models
"""
def setUp(self):
space = Space.objects.create(name='My Space!', path='mine')
user = get_user_model().objects.create_user(
username='bob',
email='<EMAIL>',
password='<PASSWORD>')
# Document with 2 revisions
self.doc = Document.objects.create(
title='Foo', path='foo', space=space)
rev = Revision.objects.create(
content='Lorem ipsum dolor sit amet',
author=user,
doc=self.doc)
rev.content = 'Sed dignissim lacinia nunc.'
rev.save()
def test_multiple_revisions(self):
""" If a revision is saved, another revision will be created """
self.assertEqual(self.doc.revision_set.count(), 2)
def test_correct_revision(self):
""" A document should always reference the latest revision """
self.assertEqual(self.doc.latest.content, 'Sed dignissim lacinia nunc.')
def test_save_no_change_same_rev(self):
"""
If saving a revision that is the same as the last,
it does not create a new one
"""
initialRevCount = self.doc.revision_set.count()
self.doc.latest.save()
self.assertEqual(self.doc.revision_set.count(), initialRevCount)
def test_delete_revisions(self):
"""
Deleting a document should remove all revisions
"""
self.doc.delete()
self.assertEqual(Revision.objects.count(), 0)
```
#### File: Spaces/spaces/utils.py
```python
import re
import os
import uuid
from datetime import date
from django.conf import settings
def normalize_path(path):
"""
Normalizes a path:
* Removes extra and trailing slashes
* Converts special characters to underscore
"""
if path is None:
return ""
path = re.sub(r'/+', '/', path) # repeated slash
path = re.sub(r'/*$', '', path) # trailing slash
path = [to_slug(p) for p in path.split(os.sep)]
return os.sep.join(path) # preserves leading slash
def to_slug(value):
""" Convert a string to a URL slug. """
value = value.lower()
# Space to dashes
value = re.sub(r'[\s_]+', '-', value)
# Special characters
value = re.sub(r'[^a-z0-9\-]+', '', value, flags=re.I)
# Extra dashes
value = re.sub(r'\-{2,}', '-', value)
value = re.sub(r'(^\-)|(\-$)', '', value)
return value
def upload_file(f):
""" Upload a file and return the URL to it. """
# Create path under media root
name, ext = os.path.splitext(f.name)
name = "%s%s" % (str(uuid.uuid4()), ext)
path = date.today().strftime("%Y")
# Create base directory
filepath = os.path.join(settings.MEDIA_ROOT, path)
if not os.path.exists(filepath):
os.makedirs(filepath)
# Write file
filepath = os.path.join(filepath, name)
with open(filepath, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
# Return URL
return os.path.join(settings.MEDIA_URL, path, name)
```
|
{
"source": "jgillies/dbt-helper",
"score": 2
}
|
#### File: integration/007_open_test/test_open.py
```python
from test.integration.base import DBTIntegrationTest
class OpenTest(DBTIntegrationTest):
"""
Want to test that this works for each file type, and for installed packages
"""
@property
def models(self):
return "test/integration/007_open_test/models"
@property
def packages_config(self):
return {
"packages": [{"local": "test/integration/007_open_test/local_dependency"}]
}
def check_model_file_opened(self, args):
result = self.run_dbthelper(["open"] + args)
return result == 0
def test_open(self):
self.run_dbt(["deps"])
self.run_dbt(["run"])
self.assertTrue(self.check_model_file_opened(["my_model"]))
self.assertTrue(self.check_model_file_opened(["my_model", "--compiled"]))
self.assertTrue(self.check_model_file_opened(["my_model", "--run"]))
self.assertTrue(self.check_model_file_opened(["my_model", "--source"]))
self.assertTrue(self.check_model_file_opened(["my_package_model"]))
self.assertTrue(self.check_model_file_opened(["my_package_model", "-c"]))
self.assertTrue(self.check_model_file_opened(["my_package_model", "-r"]))
self.assertTrue(self.check_model_file_opened(["my_package_model", "-s"]))
```
|
{
"source": "jgillis/delocate",
"score": 2
}
|
#### File: delocate/tests/test_delocating.py
```python
from __future__ import division, print_function
import os
import shutil
import subprocess
from collections import namedtuple
from os.path import basename, dirname
from os.path import join as pjoin
from os.path import realpath, relpath, splitext
from typing import Any, Callable, Dict, Iterable, List, Set, Text, Tuple
import pytest
from ..delocating import (
DelocationError,
bads_report,
check_archs,
copy_recurse,
delocate_path,
delocate_tree_libs,
filter_system_libs,
)
from ..libsana import (
search_environment_for_lib,
tree_libs,
tree_libs_from_directory,
)
from ..tmpdirs import InTemporaryDirectory
from ..tools import get_install_names, set_install_name
from .env_tools import TempDirWithoutEnvVars
from .pytest_tools import assert_equal, assert_raises
from .test_install_names import EXT_LIBS, LIBA, LIBB, LIBC, TEST_LIB, _copy_libs
from .test_tools import (
ARCH_32,
ARCH_64,
ARCH_BOTH,
ARCH_M1,
LIB64,
LIB64A,
LIBBOTH,
LIBM1,
)
LibtreeLibs = namedtuple(
"LibtreeLibs", ("liba", "libb", "libc", "test_lib", "slibc", "stest_lib")
)
def _make_libtree(out_path: str) -> LibtreeLibs:
liba, libb, libc, test_lib = _copy_libs(
[LIBA, LIBB, LIBC, TEST_LIB], out_path
)
sub_path = pjoin(out_path, "subsub")
slibc, stest_lib = _copy_libs([libc, test_lib], sub_path)
# Set execute permissions
for exe in (test_lib, stest_lib):
os.chmod(exe, 0o744)
# Check test-lib doesn't work because of relative library paths
with pytest.raises(subprocess.CalledProcessError):
subprocess.run([test_lib], check=True)
with pytest.raises(subprocess.CalledProcessError):
subprocess.run([stest_lib], check=True)
# Fixup the relative path library names by setting absolute paths
for fname, using, path in (
(libb, "liba.dylib", out_path),
(libc, "liba.dylib", out_path),
(libc, "libb.dylib", out_path),
(test_lib, "libc.dylib", out_path),
(slibc, "liba.dylib", out_path),
(slibc, "libb.dylib", out_path),
(stest_lib, "libc.dylib", sub_path),
):
set_install_name(fname, using, pjoin(path, using))
# Check scripts now execute correctly
subprocess.run([test_lib], check=True)
subprocess.run([stest_lib], check=True)
return LibtreeLibs(liba, libb, libc, test_lib, slibc, stest_lib)
def without_system_libs(obj):
# Until Big Sur, we could copy system libraries. Now:
# https://developer.apple.com/documentation/macos-release-notes/macos-big-sur-11_0_1-release-notes
# - nearly all the system libraries are in a dynamic linker cache and
# do not exist on the filesystem. We're obliged to use
# `filter_system_libs` to avoid trying to copy these files.
out = [e for e in obj if filter_system_libs(e)]
if isinstance(obj, dict):
out = {k: obj[k] for k in out}
return out
@pytest.mark.filterwarnings("ignore:tree_libs:DeprecationWarning")
@pytest.mark.parametrize(
"tree_libs_func", [tree_libs, tree_libs_from_directory]
)
def test_delocate_tree_libs(
tree_libs_func: Callable[[str], Dict[Text, Dict[Text, Text]]]
) -> None:
# Test routine to copy library dependencies into a local directory
with InTemporaryDirectory() as tmpdir:
# Copy libs into a temporary directory
subtree = pjoin(tmpdir, "subtree")
all_local_libs = _make_libtree(subtree)
liba, libb, libc, test_lib, slibc, stest_lib = all_local_libs
copy_dir = "dynlibs"
os.makedirs(copy_dir)
# First check that missing out-of-system tree library causes error.
sys_lib = EXT_LIBS[0]
lib_dict = without_system_libs(tree_libs_func(subtree))
lib_dict.update({"/unlikely/libname.dylib": {}})
with pytest.raises(DelocationError):
delocate_tree_libs(lib_dict, copy_dir, subtree)
lib_dict = without_system_libs(tree_libs_func(subtree))
copied = delocate_tree_libs(lib_dict, copy_dir, subtree)
# There are no out-of-tree libraries, nothing gets copied
assert len(copied) == 0
# Make an out-of-tree library to test against.
os.makedirs("out_of_tree")
fake_lib = realpath(pjoin("out_of_tree", "libfake.dylib"))
shutil.copyfile(liba, fake_lib)
set_install_name(liba, sys_lib, fake_lib)
lib_dict = without_system_libs(tree_libs_func(subtree))
copied = delocate_tree_libs(lib_dict, copy_dir, subtree)
# Out-of-tree library copied.
assert copied == {fake_lib: {realpath(liba): fake_lib}}
assert os.listdir(copy_dir) == [basename(fake_lib)]
# Library using the copied library now has an
# install name starting with @loader_path, then
# pointing to the copied library directory
pathto_copies = relpath(realpath(copy_dir), dirname(realpath(liba)))
lib_inames = without_system_libs(get_install_names(liba))
new_link = f"@loader_path/{pathto_copies}/{basename(fake_lib)}"
assert [new_link] <= lib_inames
# Libraries now have a relative loader_path to their corresponding
# in-tree libraries
for requiring, using, rel_path in (
(libb, "liba.dylib", ""),
(libc, "liba.dylib", ""),
(libc, "libb.dylib", ""),
(test_lib, "libc.dylib", ""),
(slibc, "liba.dylib", "../"),
(slibc, "libb.dylib", "../"),
(stest_lib, "libc.dylib", ""),
):
loader_path = "@loader_path/" + rel_path + using
not_sys_req = without_system_libs(get_install_names(requiring))
assert loader_path in not_sys_req
# Another copy to delocate, now without faked out-of-tree dependency.
subtree = pjoin(tmpdir, "subtree1")
out_libs = _make_libtree(subtree)
lib_dict = without_system_libs(tree_libs_func(subtree))
copied = delocate_tree_libs(lib_dict, copy_dir, subtree)
# Now no out-of-tree libraries, nothing copied.
assert copied == {}
# Check test libs still work
subprocess.run([out_libs.test_lib], check=True)
subprocess.run([out_libs.stest_lib], check=True)
# Check case where all local libraries are out of tree
subtree2 = pjoin(tmpdir, "subtree2")
liba, libb, libc, test_lib, slibc, stest_lib = _make_libtree(subtree2)
copy_dir2 = "dynlibs2"
os.makedirs(copy_dir2)
# Trying to delocate where all local libraries appear to be
# out-of-tree will raise an error because of duplicate library names
# (libc and slibc both named <something>/libc.dylib)
lib_dict2 = without_system_libs(tree_libs_func(subtree2))
with pytest.raises(DelocationError):
delocate_tree_libs(lib_dict2, copy_dir2, "/fictional")
# Rename a library to make this work
new_slibc = pjoin(dirname(slibc), "libc2.dylib")
os.rename(slibc, new_slibc)
# Tell test-lib about this
set_install_name(stest_lib, slibc, new_slibc)
slibc = new_slibc
# Confirm new test-lib still works
subprocess.run([test_lib], check=True)
subprocess.run([stest_lib], check=True)
# Delocation now works
lib_dict2 = without_system_libs(tree_libs_func(subtree2))
copied2 = delocate_tree_libs(lib_dict2, copy_dir2, "/fictional")
local_libs = [liba, libb, libc, slibc, test_lib, stest_lib]
rp_liba, rp_libb, rp_libc, rp_slibc, rp_test_lib, rp_stest_lib = [
realpath(L) for L in local_libs
]
exp_dict = {
rp_libc: {rp_test_lib: libc},
rp_slibc: {rp_stest_lib: slibc},
rp_libb: {rp_slibc: libb, rp_libc: libb},
rp_liba: {rp_slibc: liba, rp_libc: liba, rp_libb: liba},
}
assert copied2 == exp_dict
ext_local_libs = {liba, libb, libc, slibc}
assert set(os.listdir(copy_dir2)) == {
basename(lib) for lib in ext_local_libs
}
# Libraries using the copied libraries now have an install name starting
# with @loader_path, then pointing to the copied library directory
for lib in (liba, libb, libc, test_lib, slibc, stest_lib):
pathto_copies = relpath(realpath(copy_dir2), dirname(realpath(lib)))
lib_inames = get_install_names(lib)
new_links = [
f"@loader_path/{pathto_copies}/{basename(elib)}"
for elib in copied
]
assert set(new_links) <= set(lib_inames)
def _copy_fixpath(files: Iterable[str], directory: str) -> List[str]:
new_fnames = []
for fname in files:
shutil.copy2(fname, directory)
new_fname = pjoin(directory, basename(fname))
for name in get_install_names(fname):
if name.startswith("lib"):
set_install_name(new_fname, name, pjoin(directory, name))
new_fnames.append(new_fname)
return new_fnames
def _copy_to(fname: str, directory: str, new_base: str) -> str:
new_name = pjoin(directory, new_base)
shutil.copy2(fname, new_name)
return new_name
@pytest.mark.filterwarnings("ignore:tree_libs:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:copy_recurse:DeprecationWarning")
def test_copy_recurse() -> None:
# Function to find / copy needed libraries recursively
with InTemporaryDirectory():
# Get some fixed up libraries to play with
os.makedirs("libcopy")
test_lib, liba, libb, libc = _copy_fixpath(
[TEST_LIB, LIBA, LIBB, LIBC], "libcopy"
)
# Set execute permissions
os.chmod(test_lib, 0o744)
# Check system finds libraries
subprocess.run(["./libcopy/test-lib"], check=True)
# One library, depends only on system libs, system libs filtered
def filt_func(libname: str) -> bool:
return not libname.startswith("/usr/lib")
os.makedirs("subtree")
_copy_fixpath([LIBA], "subtree")
# Nothing copied therefore
assert copy_recurse("subtree", copy_filt_func=filt_func) == {}
assert set(os.listdir("subtree")) == {"liba.dylib"}
# shortcut
_rp = realpath
# An object that depends on a library that depends on two libraries
# test_lib depends on libc, libc depends on liba and libb. libc gets
# copied first, then liba, libb
def _st(fname: str) -> str:
return _rp(pjoin("subtree2", basename(fname)))
os.makedirs("subtree2")
shutil.copy2(test_lib, "subtree2")
assert copy_recurse("subtree2", filt_func) == {
_rp(libc): {_st(test_lib): libc},
_rp(libb): {_rp(libc): libb},
_rp(liba): {_rp(libb): liba, _rp(libc): liba},
}
assert set(os.listdir("subtree2")) == {
"liba.dylib",
"libb.dylib",
"libc.dylib",
"test-lib",
}
# A circular set of libraries
os.makedirs("libcopy2")
libw = _copy_to(LIBA, "libcopy2", "libw.dylib")
libx = _copy_to(LIBA, "libcopy2", "libx.dylib")
liby = _copy_to(LIBA, "libcopy2", "liby.dylib")
libz = _copy_to(LIBA, "libcopy2", "libz.dylib")
# targets and dependencies. A copy of libw starts in the directory,
# first pass should install libx and liby (dependencies of libw),
# second pass should install libz, libw (dependencies of liby, libx
# respectively)
t_dep1_dep2 = (
(libw, libx, liby), # libw depends on libx, liby
(libx, libw, liby), # libx depends on libw, liby
(liby, libw, libz), # liby depends on libw, libz
(libz, libw, libx),
) # libz depends on libw, libx
for tlib, dep1, dep2 in t_dep1_dep2:
set_install_name(tlib, EXT_LIBS[0], dep1)
set_install_name(tlib, EXT_LIBS[1], dep2)
os.makedirs("subtree3")
seed_path = pjoin("subtree3", "seed")
shutil.copy2(libw, seed_path)
assert copy_recurse("subtree3") == { # not filtered
# First pass, libx, liby get copied
_rp(libx): {
_rp(seed_path): libx,
_rp(libw): libx,
_rp(libz): libx,
},
_rp(liby): {
_rp(seed_path): liby,
_rp(libw): liby,
_rp(libx): liby,
},
_rp(libw): {_rp(libx): libw, _rp(liby): libw, _rp(libz): libw},
_rp(libz): {_rp(liby): libz},
}
assert set(os.listdir("subtree3")) == {
"seed",
"libw.dylib",
"libx.dylib",
"liby.dylib",
"libz.dylib",
}
for tlib, dep1, dep2 in t_dep1_dep2:
out_lib = pjoin("subtree3", basename(tlib))
assert set(get_install_names(out_lib)) == {
"@loader_path/" + basename(dep1),
"@loader_path/" + basename(dep2),
}
# Check case of not-empty copied_libs
os.makedirs("subtree4")
shutil.copy2(libw, "subtree4")
copied_libs = {
_rp(libw): {_rp(libx): libw, _rp(liby): libw, _rp(libz): libw}
}
copied_copied = copied_libs.copy()
assert copy_recurse("subtree4", None, copied_libs) == {
_rp(libw): {_rp(libx): libw, _rp(liby): libw, _rp(libz): libw},
_rp(libx): {_rp(libw): libx, _rp(libz): libx},
_rp(liby): {_rp(libw): liby, _rp(libx): liby},
_rp(libz): {_rp(liby): libz},
}
# Not modified in-place
assert copied_libs == copied_copied
@pytest.mark.filterwarnings("ignore:tree_libs:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:copy_recurse:DeprecationWarning")
def test_copy_recurse_overwrite():
# type: () -> None
# Check that copy_recurse won't overwrite pre-existing libs
with InTemporaryDirectory():
# Get some fixed up libraries to play with
os.makedirs("libcopy")
test_lib, liba, libb, libc = _copy_fixpath(
[TEST_LIB, LIBA, LIBB, LIBC], "libcopy"
)
# Filter system libs
def filt_func(libname):
# type: (Text) -> bool
return not libname.startswith("/usr/lib")
os.makedirs("subtree")
# libb depends on liba
shutil.copy2(libb, "subtree")
# If liba is already present, barf
shutil.copy2(liba, "subtree")
assert_raises(DelocationError, copy_recurse, "subtree", filt_func)
# Works if liba not present
os.unlink(pjoin("subtree", "liba.dylib"))
copy_recurse("subtree", filt_func)
def test_delocate_path() -> None:
# Test high-level path delocator script
with InTemporaryDirectory():
# Make a tree; use realpath for OSX /private/var - /var
_, _, _, test_lib, slibc, stest_lib = _make_libtree(realpath("subtree"))
# Check it fixes up correctly
assert delocate_path("subtree", "deplibs") == {}
assert len(os.listdir("deplibs")) == 0
subprocess.run([test_lib], check=True)
subprocess.run([stest_lib], check=True)
# Make a fake external library to link to
os.makedirs("fakelibs")
fake_lib = realpath(_copy_to(LIBA, "fakelibs", "libfake.dylib"))
_, _, _, test_lib, slibc, stest_lib = _make_libtree(
realpath("subtree2")
)
set_install_name(slibc, EXT_LIBS[0], fake_lib)
# shortcut
_rp = realpath
# Check fake libary gets copied and delocated
slc_rel = pjoin("subtree2", "subsub", "libc.dylib")
assert delocate_path("subtree2", "deplibs2") == {
_rp(fake_lib): {_rp(slc_rel): fake_lib}
}
assert os.listdir("deplibs2") == ["libfake.dylib"]
assert "@loader_path/../../deplibs2/libfake.dylib" in get_install_names(
slibc
)
# Unless we set the filter otherwise
_, _, _, test_lib, slibc, stest_lib = _make_libtree(
realpath("subtree3")
)
set_install_name(slibc, EXT_LIBS[0], fake_lib)
def filt(libname: str) -> bool:
return not (libname.startswith("/usr") or "libfake" in libname)
assert delocate_path("subtree3", "deplibs3", None, filt) == {}
assert len(os.listdir("deplibs3")) == 0
# Test tree names filtering works
_, _, _, test_lib, slibc, stest_lib = _make_libtree(
realpath("subtree4")
)
set_install_name(slibc, EXT_LIBS[0], fake_lib)
def lib_filt(filename: str) -> bool:
return not filename.endswith("subsub/libc.dylib")
assert delocate_path("subtree4", "deplibs4", lib_filt) == {}
assert len(os.listdir("deplibs4")) == 0
# Check can use already existing directory
os.makedirs("deplibs5")
_, _, _, test_lib, slibc, stest_lib = _make_libtree(
realpath("subtree5")
)
assert delocate_path("subtree5", "deplibs5") == {}
assert len(os.listdir("deplibs5")) == 0
# Check invalid string
with pytest.raises(TypeError):
delocate_path("subtree5", "deplibs5", lib_filt_func="invalid-str")
def _make_bare_depends():
# type: () -> Tuple[Text, Text]
# Copy:
# * liba.dylib to 'libs' dir, which is a dependency of libb.dylib
# * libb.dylib to 'subtree' dir, as 'libb' (no extension).
#
# This is for testing delocation when the depending file does not have a
# dynamic library file extension.
(libb,) = _copy_libs([LIBB], "subtree")
(liba,) = _copy_libs([LIBA], "libs")
bare_b, _ = splitext(libb)
os.rename(libb, bare_b)
# use realpath for OSX /private/var - /var
set_install_name(bare_b, "liba.dylib", realpath(liba))
return liba, bare_b
def test_delocate_path_dylibs():
# type: () -> None
# Test options for delocating everything, or just dynamic libraries
_rp = realpath # shortcut
with InTemporaryDirectory():
# With 'dylibs-only' - does not inspect non-dylib files
liba, bare_b = _make_bare_depends()
assert_equal(
delocate_path("subtree", "deplibs", lib_filt_func="dylibs-only"),
{},
)
assert_equal(len(os.listdir("deplibs")), 0)
# None - does inspect non-dylib files
assert_equal(
delocate_path("subtree", "deplibs", None),
{_rp(pjoin("libs", "liba.dylib")): {_rp(bare_b): _rp(liba)}},
)
assert_equal(os.listdir("deplibs"), ["liba.dylib"])
with InTemporaryDirectory():
# Callable, dylibs only, does not inspect
liba, bare_b = _make_bare_depends()
def func(fn):
# type: (Text) -> bool
return fn.endswith(".dylib")
assert_equal(delocate_path("subtree", "deplibs", func), {})
def func(fn):
# type: (Text) -> bool
return fn.endswith("libb")
assert_equal(
delocate_path("subtree", "deplibs", None),
{_rp(pjoin("libs", "liba.dylib")): {_rp(bare_b): _rp(liba)}},
)
def test_check_archs():
# type: () -> None
# Test utility to check architectures in copied_libs dict
# No libs always OK
s0 = set() # type: Set[Any]
assert_equal(check_archs({}), s0)
# One lib to itself OK
lib_M1_M1 = {LIBM1: {LIBM1: "install_name"}}
lib_64_64 = {LIB64: {LIB64: "install_name"}}
assert_equal(check_archs(lib_M1_M1), s0)
assert_equal(check_archs(lib_64_64), s0)
# OK matching to another static lib of same arch
assert_equal(check_archs({LIB64A: {LIB64: "install_name"}}), s0)
# Or two libs
two_libs = {
LIB64A: {LIB64: "install_name"},
LIBM1: {LIBM1: "install_name"},
}
assert_equal(check_archs(two_libs), s0)
# Same as empty sequence required_args argument
assert_equal(check_archs(lib_M1_M1, ()), s0)
assert_equal(check_archs(lib_64_64, ()), s0)
assert_equal(check_archs(two_libs, ()), s0)
assert_equal(check_archs(two_libs, []), s0)
assert_equal(check_archs(two_libs, set()), s0)
# bads if we require more archs than present
for in_libs, exp_arch, missing in (
(lib_M1_M1, ARCH_64, ARCH_64),
(lib_M1_M1, ARCH_BOTH, ARCH_64),
(lib_M1_M1, "x86_64", ARCH_64),
(lib_M1_M1, "universal2", ARCH_64),
(lib_64_64, ARCH_M1, ARCH_M1),
(lib_64_64, ARCH_BOTH, ARCH_M1),
(lib_64_64, "arm64", ARCH_M1),
(lib_64_64, "intel", ARCH_32),
(lib_64_64, "universal2", ARCH_M1),
):
ded, value = list(in_libs.items())[0]
ding, _ = list(value.items())[0]
arch_check = check_archs(in_libs, exp_arch)
assert_equal(arch_check, {(ding, missing)})
# Two libs
assert_equal(check_archs(two_libs, ARCH_M1), {(LIB64, ARCH_M1)})
assert_equal(check_archs(two_libs, ARCH_64), {(LIBM1, ARCH_64)})
assert_equal(
check_archs(two_libs, ARCH_BOTH), {(LIB64, ARCH_M1), (LIBM1, ARCH_64)}
)
# Libs must match architecture with second arg of None
assert_equal(
check_archs({LIB64: {LIBM1: "install_name"}}),
{(LIB64, LIBM1, ARCH_M1)},
)
assert_equal(
check_archs(
{
LIB64A: {LIB64: "install_name"},
LIBM1: {LIBM1: "install_name"},
LIB64: {LIBM1: "install_name"},
}
),
{(LIB64, LIBM1, ARCH_M1)},
)
# For single archs depending, dual archs in depended is OK
assert check_archs({LIBBOTH: {LIB64A: "install_name"}}) == s0
# For dual archs in depending, both must be present
assert_equal(check_archs({LIBBOTH: {LIBBOTH: "install_name"}}), s0)
assert_equal(
check_archs({LIB64A: {LIBBOTH: "install_name"}}),
{(LIB64A, LIBBOTH, ARCH_M1)},
)
# More than one bad
in_dict = {
LIB64A: {LIBBOTH: "install_name"},
LIB64: {LIBM1: "install_name"},
}
exp_res = {(LIB64A, LIBBOTH, ARCH_M1), (LIB64, LIBM1, ARCH_M1)}
assert_equal(check_archs(in_dict), exp_res)
# Check stop_fast flag; can't predict return, but there should only be one
stopped = check_archs(in_dict, (), True)
assert_equal(len(stopped), 1)
# More than one bad in dependings
assert_equal(
check_archs(
{
LIB64A: {LIBBOTH: "install_name", LIBM1: "install_name"},
LIB64: {LIBM1: "install_name"},
}
),
{
(LIB64A, LIBBOTH, ARCH_M1),
(LIB64A, LIBM1, ARCH_M1),
(LIB64, LIBM1, ARCH_M1),
},
)
def test_bads_report():
# type: () -> None
# Test bads_report of architecture errors
# No bads, no report
assert_equal(bads_report(set()), "")
fmt_str_2 = "Required arch arm64 missing from {0}"
fmt_str_3 = "{0} needs arch arm64 missing from {1}"
# One line report
assert_equal(
bads_report({(LIB64, LIBM1, ARCH_M1)}), fmt_str_3.format(LIBM1, LIB64)
)
# One line report applying path stripper
assert_equal(
bads_report({(LIB64, LIBM1, ARCH_M1)}, dirname(LIB64)),
fmt_str_3.format(basename(LIBM1), basename(LIB64)),
)
# Multi-line report
report = bads_report(
{
(LIB64A, LIBBOTH, ARCH_M1),
(LIB64A, LIBM1, ARCH_M1),
(LIB64, LIBM1, ARCH_M1),
}
)
expected = {
fmt_str_3.format(LIBM1, LIB64A),
fmt_str_3.format(LIBM1, LIB64),
fmt_str_3.format(LIBBOTH, LIB64A),
}
# Set ordering undefined.
assert_equal(set(report.splitlines()), expected)
# Two tuples and three tuples
report2 = bads_report(
{(LIB64A, LIBBOTH, ARCH_M1), (LIB64, ARCH_M1), (LIBM1, ARCH_M1)}
)
expected2 = {
fmt_str_3.format(LIBBOTH, LIB64A),
fmt_str_2.format(LIB64),
fmt_str_2.format(LIBM1),
}
assert_equal(set(report2.splitlines()), expected2)
# Tuples must be length 2 or 3
assert_raises(
ValueError,
bads_report,
{(LIB64A, LIBBOTH, ARCH_M1), (LIB64,), (LIBM1, ARCH_M1)},
)
# Tuples must be length 2 or 3
assert_raises(
ValueError,
bads_report,
{
(LIB64A, LIBBOTH, ARCH_M1),
(LIB64, LIB64, ARCH_M1, ARCH_64),
(LIBM1, ARCH_M1),
},
)
def test_dyld_library_path_lookups() -> None:
# Test that DYLD_LIBRARY_PATH can be used to find libs during
# delocation
with TempDirWithoutEnvVars("DYLD_LIBRARY_PATH") as tmpdir:
# Copy libs into a temporary directory
subtree = pjoin(tmpdir, "subtree")
all_local_libs = _make_libtree(subtree)
liba, libb, libc, test_lib, slibc, stest_lib = all_local_libs
# move libb and confirm that test_lib doesn't work
hidden_dir = "hidden"
os.mkdir(hidden_dir)
new_libb = os.path.join(hidden_dir, os.path.basename(LIBB))
shutil.move(libb, new_libb)
with pytest.raises(subprocess.CalledProcessError):
subprocess.run([test_lib], check=True)
# Update DYLD_LIBRARY_PATH and confirm that we can now
# successfully delocate test_lib
os.environ["DYLD_LIBRARY_PATH"] = hidden_dir
delocate_path("subtree", "deplibs")
subprocess.run([test_lib], check=True)
def test_dyld_library_path_beats_basename():
# type: () -> None
# Test that we find libraries on DYLD_LIBRARY_PATH before basename
with TempDirWithoutEnvVars("DYLD_LIBRARY_PATH") as tmpdir:
# Copy libs into a temporary directory
subtree = pjoin(tmpdir, "subtree")
all_local_libs = _make_libtree(subtree)
liba, libb, libc, test_lib, slibc, stest_lib = all_local_libs
# Copy liba into a subdirectory
subdir = os.path.join(subtree, "subdir")
os.mkdir(subdir)
new_libb = os.path.join(subdir, os.path.basename(LIBB))
shutil.copyfile(libb, new_libb)
# Without updating the environment variable, we find the lib normally
predicted_lib_location = search_environment_for_lib(libb)
# tmpdir can end up in /var, and that can be symlinked to
# /private/var, so we'll use realpath to resolve the two
assert_equal(predicted_lib_location, os.path.realpath(libb))
# Updating shows us the new lib
os.environ["DYLD_LIBRARY_PATH"] = subdir
predicted_lib_location = search_environment_for_lib(libb)
assert_equal(predicted_lib_location, realpath(new_libb))
def test_dyld_fallback_library_path_loses_to_basename():
# type: () -> None
# Test that we find libraries on basename before DYLD_FALLBACK_LIBRARY_PATH
with TempDirWithoutEnvVars("DYLD_FALLBACK_LIBRARY_PATH") as tmpdir:
# Copy libs into a temporary directory
subtree = pjoin(tmpdir, "subtree")
all_local_libs = _make_libtree(subtree)
liba, libb, libc, test_lib, slibc, stest_lib = all_local_libs
# Copy liba into a subdirectory
subdir = "subdir"
os.mkdir(subdir)
new_libb = os.path.join(subdir, os.path.basename(LIBB))
shutil.copyfile(libb, new_libb)
os.environ["DYLD_FALLBACK_LIBRARY_PATH"] = subdir
predicted_lib_location = search_environment_for_lib(libb)
# tmpdir can end up in /var, and that can be symlinked to
# /private/var, so we'll use realpath to resolve the two
assert_equal(predicted_lib_location, os.path.realpath(libb))
```
#### File: subpkg/tests/test_fakepkg.py
```python
from namespace.subpkg.module2 import func2, func3 # type: ignore
def test_fakepkg():
assert func2() == 2
assert func3() == 3
```
#### File: fakepkg/tests/test_fakepkg.py
```python
from delocate.tests.pytest_tools import assert_equal
from ..module1 import func1
from ..subpkg.module2 import func2, func3
def test_fakepkg():
assert_equal(func1(), 1)
assert_equal(func2(), 2)
assert_equal(func3(), 3)
```
|
{
"source": "jgillis/optimization-engine",
"score": 2
}
|
#### File: opengen/config/build_config.py
```python
from opengen.config.tcp_server_config import TcpServerConfiguration
import random
import string
class BuildConfiguration:
"""Build configuration
Configuration for the code generator
"""
def __init__(self, build_dir="."):
"""
Construct an instance of BuildConfiguration
:param build_dir: Target directory, defaults to the current directory
:return: A new instance of BuildConfiguration
"""
random_string = ''.join(random.choice(string.ascii_letters) for _i in range(20))
self.__target_system = None
self.__build_mode = 'release'
self.__id = random_string
self.__cost_function_name = 'phi_' + random_string
self.__grad_cost_function_name = 'grad_phi_' + random_string
self.__constraint_penalty_function = 'mapping_f2_' + random_string
self.__alm_constraints_mapping_f1 = 'mapping_f1_' + random_string
self.__rebuild = False
self.__build_dir = build_dir
self.__open_version = None
self.__build_c_bindings = False
self.__tcp_interface_config = None
# ---------- GETTERS ---------------------------------------------
@property
def rebuild(self):
"""Whether to re-build the optimizer from scratch"""
return self.__rebuild
@property
def id(self):
"""Unique identifier of build configuration"""
return self.__id
@property
def cost_function_name(self):
return self.__cost_function_name
@property
def grad_function_name(self):
return self.__grad_cost_function_name
@property
def constraint_penalty_function_name(self):
return self.__constraint_penalty_function
@property
def alm_mapping_f1_function_name(self):
return self.__alm_constraints_mapping_f1
@property
def target_system(self):
"""Target system"""
return self.__target_system
@property
def build_mode(self):
"""Build mode (release or debug)"""
return self.__build_mode
@property
def build_dir(self):
"""Directory in which the auto-generated optimizer will be stored"""
return self.__build_dir
@property
def open_version(self):
"""
OpEn version used with the auto-generated solver
:return: The method returns either a specific version of OpEn,
which will be used with the auto-generated optimizer, or `None`,
in which case, the latest version will be used. You may set your
preferred version of OpEn with `with_open_version`
"""
return self.__open_version
@property
def build_c_bindings(self):
return self.__build_c_bindings
@property
def tcp_interface_config(self):
return self.__tcp_interface_config
# ---------- SETTERS ---------------------------------------------
def with_rebuild(self, do_rebuild):
"""
Whether to clean and rebuild the code generator, if it already exists
:param do_rebuild: if set to True, the target code generator
will be cleaned and rebuilt from scratch
:return: current instance of BuildConfiguration
"""
self.__rebuild = do_rebuild
return self
def with_target_system(self, target_system):
"""
Specify the target system
:param target_system: target system as string (e.g., use
"arm-unknown-linux-gnueabihf" or "rpi" for Raspberry Pi).
Note that you must have installed the target using `rustup`
if you need to cross-compile.
:return: current instance of BuildConfiguration
"""
if target_system.lower() == "rpi":
self.__target_system = "arm-unknown-linux-gnueabihf"
else:
self.__target_system = target_system
return self
def with_build_mode(self, build_mode):
"""
Set the build mode (debug/release)
:param build_mode: Choose either 'debug' or 'release'; the former is
fast, but suboptimal, while the later may take a while to compile,
but the generated binary is significantly faster
:return: current instance of BuildConfiguration
"""
self.__build_mode = build_mode
return self
def with_build_directory(self, build_dir):
"""
Specify the build directory
:param build_dir: build directory as string
:return: current instance of BuildConfiguration
"""
self.__build_dir = build_dir
return self
def with_open_version(self, open_version):
"""
Specify the version of OpEn to link to
:param: OpEn version
:return: current instance of BuildConfiguration
"""
self.__open_version = open_version
return self
def with_build_c_bindings(self, build_c_bindings=True):
"""
If activated, OpEn will generate C/C++ bindings for the
auto-generated solver
:param build_c_bindings: whether to build C/C++ bindings for
auto-generated solver; default: `True`, i.e., it suffices
to call `build_config.with_build_c_bindings()` instead of
`build_config.with_build_c_bindings(True)`
:return: current instance of BuildConfiguration
"""
self.__build_c_bindings = build_c_bindings
return self
def with_tcp_interface_config(self, tcp_interface_config=TcpServerConfiguration()):
"""
Specify a TCP server configuration object
:param tcp_interface_config: Custom TCP server configuration
:return: current instance of BuildConfiguration
"""
self.__tcp_interface_config = tcp_interface_config
return self
```
|
{
"source": "jgillis/piwheels",
"score": 2
}
|
#### File: piwheels/audit/__init__.py
```python
import os
import sys
import hashlib
import logging
import argparse
from pathlib import Path
from queue import Queue, Empty
from html.parser import HTMLParser
from .. import __version__, terminal, const
def main(args=None):
"""
This is the main function for the :program:`piw-audit` script. It relies
on nothing from the master application as this is intended to be used
offline or on backups of the master.
"""
sys.excepthook = terminal.error_handler
terminal.error_handler[OSError] = (
terminal.error_handler.exc_message, 1)
logging.getLogger().name = 'audit'
parser = terminal.configure_parser("""\
The piw-audit script is intended to verify that the indexes generated for the
"simple" index are valid, i.e. that the directories and files pointed to all
exist and optionally that the hashes recorded in the sub-indexes match the
files on disk. Note that the script is intended to be run offline; i.e. the
master should preferably be shut down during operation of this script. If the
master is active, deletions may cause false negatives.
""")
parser.add_argument(
'-o', '--output-path', metavar='PATH', default=const.OUTPUT_PATH,
help="The path under which the website has been written; must be "
"readable by the current user")
parser.add_argument(
'-e', '--extraneous', metavar='FILE', type=argparse.FileType('w'),
help="If specified, the path of a file to which all extraneous "
"filenames (files which shouldn't exist, but do) will be written")
parser.add_argument(
'-m', '--missing', metavar='FILE', type=argparse.FileType('w'),
help="If specified, the path of a file to which all missing "
"filenames (files which should exist, but don't) will be written")
parser.add_argument(
'-b', '--broken', metavar='FILE', type=argparse.FileType('w'),
help="If specified, the path of a file to which all filenames of "
"corrupted wheels will be written; warning: this is an extremely "
"slow operation on a full index which is avoided if this option is "
"not specified")
config = parser.parse_args(args)
terminal.configure_logging(config.log_level, config.log_file)
logging.info("PiWheels Audit version %s", __version__)
config.output_path = Path(os.path.expanduser(config.output_path))
check_simple_index(config)
def check_simple_index(config):
logging.info('checking simple index')
path = config.output_path / 'simple'
index = path / 'index.html'
try:
for href, text in parse_links(index):
check_package_index(config, href)
except OSError as exc:
report_missing(config, 'simple index', index)
def check_package_index(config, package):
logging.info('checking %s', package)
path = config.output_path / 'simple' / package
index = path / 'index.html'
try:
all_files = set(path.iterdir())
except OSError:
report_missing(config, 'package dir', path)
else:
try:
all_files.remove(index)
except KeyError:
report_missing(config, 'package index', index)
else:
for href, text in parse_links(index):
filename, filehash = href.rsplit('#', 1)
try:
all_files.remove(path / filename)
except KeyError:
report_missing(config, 'wheel', path / filename)
else:
if config.broken:
check_wheel_hash(config, package, filename, filehash)
for filename in all_files:
report_extra(config, 'file', path / filename)
def check_wheel_hash(config, package, filename, filehash):
logging.info('checking %s/%s', package, filename)
algorithm, filehash = filehash.rsplit('=', 1)
try:
state = {
'md5': hashlib.md5,
'sha256': hashlib.sha256,
}[algorithm]()
except KeyError:
logging.error('invalid algorithm %s in package index %s',
algorithm, package)
else:
wheel = config.output_path / 'simple' / package / filename
with wheel.open('rb') as f:
while True:
buf = f.read(4096)
if not buf:
break
state.update(buf)
if state.hexdigest().lower() != filehash.lower():
report_broken(config, 'wheel', wheel)
# TODO Test JSON data
# TODO Test project dirs
# TODO Test wheel metadata?
def report(file, prefix, label, path):
logging.error('%s %s %s', prefix, label, path)
if file:
file.write(str(path))
file.write('\n')
def report_missing(config, label, path):
report(config.missing, 'missing', label, path)
def report_extra(config, label, path):
report(config.extraneous, 'extraneous', label, path)
def report_broken(config, label, path):
report(config.broken, 'corrupted', label, path)
class IndexParser(HTMLParser):
def __init__(self, queue):
super().__init__()
self.queue = queue
self.href = None
self.data = None
def handle_starttag(self, tag, attrs):
if tag == 'a':
for name, value in attrs:
if name == 'href':
self.href = value
break
def handle_data(self, data):
if self.href is not None:
if self.data is None:
self.data = data
else:
self.data += data
def handle_endtag(self, tag):
if tag == 'a' and self.href is not None and self.data is not None:
self.queue.put((self.href, self.data))
self.href = None
self.data = None
def parse_links(path):
with path.open('r') as f:
q = Queue()
parser = IndexParser(q)
while True:
buf = f.read(4096)
if not buf:
break
parser.feed(buf)
while True:
try:
yield q.get(block=False)
except Empty:
break
```
#### File: piwheels/monitor/states.py
```python
from datetime import datetime, timedelta, timezone
from collections import deque
from ..info import get_pi_info
from ..states import SlaveStats, MasterStats
UTC = timezone.utc
class MasterState:
"""
Class for tracking the state of the master via messages sent over the
monitor PUB socket.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self):
self.killed = False
self.stats = deque(maxlen=100)
self.first_seen = None
self.last_seen = None
self.status = 'Doing whatever the master does' # TODO
self.label = ''
self.os_name = '-'
self.os_version = '-'
self.board_revision = '-'
self.board_info = ''
self.board_serial = '-'
def update(self, timestamp, msg, data):
"""
Update the master's state from an incoming status message.
:param datetime.datetime timestamp:
The time at which the message was originally sent.
:param str msg:
The message itself.
:param data:
Any data sent with the message.
"""
self.last_seen = timestamp
if msg == 'HELLO':
(
self.first_seen,
self.label,
self.os_name,
self.os_version,
self.board_revision,
self.board_serial,
) = data
self.board_info = format_revision(self.board_revision)
self.stats.clear()
elif msg == 'STATS':
self.stats.append(MasterStats.from_message(data))
else:
assert False, 'unexpected message'
@property
def slave_id(self):
return None
@property
def sort_key(self):
return '', ''
@property
def state(self):
if self.first_seen is not None:
if datetime.now(tz=UTC) - self.last_seen > timedelta(seconds=30):
return 'alert'
if self.stats:
latest = self.stats[-1]
if latest.builds_last_hour == 0:
return 'alert'
elif latest.downloads_last_hour == 0:
return 'alert'
elif latest.disk_free < latest.disk_size * 0.1:
return 'alert'
elif latest.mem_free < latest.mem_size * 0.1:
return 'alert'
elif latest.swap_size and (latest.swap_free < latest.swap_size * 0.5):
return 'alert'
elif latest.load_average > 4.0:
return 'alert'
elif latest.cpu_temp > 70.0:
return 'alert'
if self.killed:
return 'dead'
return 'okay'
class SlaveState:
"""
Class for tracking the state of a single build slave via messages sent
over the monitor PUB socket.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, slave_id):
self.killed = False
self.slave_id = slave_id
self.stats = deque(maxlen=100)
self.last_msg = ''
self.build_timeout = None
self.busy_timeout = None
self.py_version = '-'
self.abi = '-'
self.platform = '-'
self.label = ''
self.os_name = '-'
self.os_version = '-'
self.board_revision = '-'
self.board_info = ''
self.board_serial = '-'
self.build_start = None
self.first_seen = None
self.last_seen = None
self.clock_skew = None
self.status = ''
def update(self, timestamp, msg, data):
"""
Update the slave's state from an incoming status message.
:param datetime.datetime timestamp:
The time at which the message was originally sent.
:param str msg:
The message itself.
:param data:
Any data sent with the message.
"""
self.last_msg = msg
self.last_seen = timestamp
if msg == 'HELLO':
self.status = 'Initializing'
self.first_seen = timestamp
(
self.build_timeout,
self.busy_timeout,
self.py_version,
self.abi,
self.platform,
self.label,
self.os_name,
self.os_version,
self.board_revision,
self.board_serial,
) = data
self.board_info = format_revision(self.board_revision)
self.stats.clear()
elif msg == 'STATS':
data = SlaveStats.from_message(data)
self.clock_skew = self.last_seen - data.timestamp
self.stats.append(data)
elif msg == 'SLEEP':
self.status = 'Waiting for jobs'
elif msg == 'DIE':
self.status = 'Terminating'
self.killed = True
elif msg == 'BUILD':
self.status = 'Building {} {}'.format(data[0], data[1])
self.build_start = timestamp
elif msg == 'SEND':
self.status = 'Transferring file'
elif msg == 'DONE':
self.status = 'Cleaning up after build'
self.build_start = None
elif msg in ('CONT', 'ACK'):
pass
else:
assert False, 'unexpected message'
@property
def sort_key(self):
return self.abi, self.label
@property
def state(self):
"""
Calculate a simple state indicator for the slave, used to color the
initial "*" on the entry.
"""
now = datetime.now(tz=UTC)
if self.first_seen is not None:
if now - self.last_seen > self.busy_timeout:
return 'dead'
elif now - self.last_seen > self.busy_timeout / 2:
return 'alert'
elif self.stats:
latest = self.stats[-1]
if latest.disk_free < latest.disk_size * 0.1:
return 'alert'
elif latest.mem_free < latest.mem_size * 0.1:
return 'alert'
elif latest.swap_size and (latest.swap_free < latest.swap_size * 0.5):
return 'alert'
elif latest.load_average > 4.0:
return 'alert'
elif latest.cpu_temp > 78.0:
return 'alert'
if self.last_msg == 'DONE':
return 'cleaning'
elif self.last_msg == 'SEND':
return 'sending'
elif self.build_start is not None:
return 'building'
if self.killed:
return 'dead'
return 'idle'
def format_revision(revision):
try:
return (
'Pi {i.model} rev{i.pcb_revision} {i.memory}'.format(
i=get_pi_info(revision)))
except ValueError:
return ''
```
#### File: monitor/tui/dialogs.py
```python
"Defines the dialogs used in the monitor application"
from collections import namedtuple
from piwheels import widgets as wdg
class HelpDialog(wdg.Dialog):
def __init__(self):
ok_button = wdg.FixedButton(wdg.format_hotkey('_OK'))
body = wdg.Text([
"Welcome to the ", ('bold', "piwheels"), " monitor "
"application. When run on the same node as the "
"master, this should automatically connect and "
"display its status, along with the state of any "
"connected build slaves.\n"
"\n",
"The following keys can be used within the monitor:\n"
"\n",
('bold', "j / down"), " - Move down the list of machines\n",
('bold', "k / up"), " - Move up the list of machines\n",
('bold', "enter"), " - Perform an action on the selected machine\n",
('bold', "h"), " - Display this help\n",
('bold', "q"), " - Quit the application",
])
super().__init__(title='Help', body=body, buttons=[ok_button])
wdg.connect_signal(ok_button, 'click', lambda btn: self._emit('close'))
self.width = ('relative', 50)
self.min_width = 60
self.height = ('relative', 20)
self.min_height = 16
def keypress(self, size, key):
if key == 'o':
self._emit('close')
else:
return super().keypress(size, key)
Action = namedtuple('Action', ('result', 'title', 'help'))
class ActionsDialog(wdg.Dialog):
title = 'Action!'
actions = [] # list of Action instances
def __init__(self, state):
ok_button = wdg.FixedButton(wdg.format_hotkey('_OK'))
cancel_button = wdg.FixedButton(wdg.format_hotkey('_Cancel'))
choices = []
self.state = state
self.actions = {
wdg.RadioButton(choices, wdg.format_hotkey(action.title),
on_state_change=self.action_picked,
user_data=action): action
for action in self.actions
}
self.help_text = wdg.Text('')
super().__init__(
title=self.title,
body=wdg.Columns([
(20, wdg.Pile(choices)),
self.help_text
]),
buttons=[ok_button, cancel_button])
self.result = None
for radio, action in self.actions.items():
if radio.state:
self.help_text.set_text(action.help)
wdg.connect_signal(ok_button, 'click', self.ok)
wdg.connect_signal(cancel_button, 'click', self.cancel)
self.width = 50
self.min_width = 20
self.height = 12
def ok(self, btn=None):
for radio, action in self.actions.items():
if radio.state:
self.result = action.result
break
self._emit('close')
def cancel(self, btn=None):
self._emit('close')
def default(self, btn=None):
# cancel if focused on cancel button, ok otherwise
pass
def action_picked(self, radio, new_state, action):
if new_state:
self.help_text.set_text(action.help)
def keypress(self, size, key):
try:
{
'enter': self.default,
'o': self.ok,
'c': self.cancel,
}[key]()
except KeyError:
for radio in self.actions:
if key == wdg.find_hotkey(*radio._label.get_text()).lower():
radio.set_state(True)
self.set_focus(radio)
return
return super().keypress(size, key)
class MasterDialog(ActionsDialog):
title = 'Master Control'
actions = [
Action('sleep', "_Pause",
"Stops new builds from being sent to build slaves, but waits "
"for existing builds to finish first. Useful for installing "
"new build dependencies across the cluster without shutting "
"everything down."),
Action('sleep_now', "_Halt",
"Immediately halt existing builds and stop new builds from "
"being sent to the slaves. Useful for installing new build "
"dependencies across the cluster."),
Action('wake', "_Resume",
"Resumes sending builds to slaves; the opposite to the 'Pause' "
"and 'Halt' actions."),
Action('kill_slaves', "_Stop Slaves",
"Shuts down all build slaves after each has completed its "
"existing build. Use this before 'Stop Master' to stop "
"everything when upgrading the entire cluster."),
Action('kill_slaves_now', "_Kill Slaves",
"Cancels all existing builds and immediately shuts down all "
"build slaves. Use this before 'Stop Master' to stop "
"everything when upgrading the entire cluster quickly."),
Action('kill_master', "Stop _Master",
"Cancels all active builds and shuts down the master service "
"but leaves all slaves running. Useful for upgrading just the "
"master and/or performing database maintenance."),
]
class SlaveDialog(ActionsDialog):
title = 'Slave Control'
actions = [
Action('skip_now', "Sk_ip",
"Stops the current build and moves onto the next (if there is "
"one). Useful for skipping a build which is known to fail or "
"is obviously failing."),
Action('sleep', "_Pause",
"Stops new builds from being sent to the selected slave, but "
"waits the existing build to finish first. Useful for "
"maintaining dependencies on the slave."),
Action('sleep_now', "_Halt",
"Immediately halt the existing build and stop new builds from "
"being sent to the selected slave. Useful for maintaining "
"dependencies on the slave."),
Action('wake', "_Resume",
"Resumes sending builds to the selected slave; the opposite to "
"the 'Pause' and 'Halt' actions."),
Action('kill_slave', "_Stop Slave",
"Shuts down the build slave after the current build has "
"finished. Useful for maintaining the slave installation."),
Action('kill_slave_now', "_Kill Slave",
"Cancels the current build and immediately shuts down the "
"build slave. Useful for maintaining the slave installation."),
]
```
#### File: piwheels/piwheels/transport.py
```python
import logging
import ipaddress as ip
import datetime as dt
from binascii import hexlify
import zmq
from voluptuous import Invalid
import cbor2
from .protocols import Protocol, NoData
PUSH = zmq.PUSH
PULL = zmq.PULL
REQ = zmq.REQ
REP = zmq.REP
PUB = zmq.PUB
SUB = zmq.SUB
ROUTER = zmq.ROUTER
DEALER = zmq.DEALER
NOBLOCK = zmq.NOBLOCK
POLLIN = zmq.POLLIN
POLLOUT = zmq.POLLOUT
SUBSCRIBE = zmq.SUBSCRIBE
UNSUBSCRIBE = zmq.UNSUBSCRIBE
Error = zmq.ZMQError
Again = zmq.error.Again
def default_encoder(encoder, value):
if isinstance(value, dt.timedelta):
encoder.encode(
cbor2.CBORTag(2001, (
value.days, value.seconds, value.microseconds)))
elif value is NoData:
encoder.encode(cbor2.CBORTag(2002, None))
else:
raise cbor2.CBOREncodeError(
'cannot serialize type %s' % value.__class__.__name__)
def default_decoder(decoder, tag):
if tag.tag == 2001:
days, seconds, microseconds = tag.value
return dt.timedelta(
days=days, seconds=seconds, microseconds=microseconds)
elif tag.tag == 2002:
return NoData
return tag
class Context:
"""
Wrapper for 0MQ :class:`zmq.Context`. This extends the :meth:`socket`
method to include parameters for the socket's protocol and logger.
"""
def __init__(self):
self._context = zmq.Context.instance()
def socket(self, sock_type, *, protocol=None, logger=None):
return Socket(self._context.socket(sock_type), protocol, logger)
def close(self, linger=1):
self._context.destroy(linger=linger * 1000)
self._context.term()
class Socket:
"""
Wrapper for :class:`zmq.Socket`. This extends 0MQ's sockets to include a
protocol which will be used to validate messages that are sent and received
(via a voluptuous schema), and a logger which can be used to debug socket
behaviour.
"""
def __init__(self, socket, protocol=None, logger=None):
if logger is None:
logger = logging.getLogger()
if protocol is None:
protocol = Protocol()
self._logger = logger
self._socket = socket
self._protocol = protocol
self._socket.ipv6 = True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def _dump_msg(self, msg, data=NoData):
try:
schema = self._protocol.send[msg]
except KeyError:
raise IOError('unknown message: %s' % msg)
if data is NoData:
if schema is not NoData:
raise IOError('data must be specified for %s' % msg)
return cbor2.dumps(msg, default=default_encoder)
else:
if schema is NoData:
raise IOError('no data expected for %s' % msg)
try:
data = schema(data)
except Invalid as e:
raise IOError('invalid data for %s: %r' % (msg, data))
try:
return cbor2.dumps((msg, data), default=default_encoder)
except cbor2.CBOREncodeError as e:
raise IOError('unable to serialize data')
def _load_msg(self, buf):
try:
msg = cbor2.loads(buf, tag_hook=default_decoder)
except cbor2.CBORDecodeError as e:
raise IOError('unable to deserialize data')
if isinstance(msg, str):
try:
schema = self._protocol.recv[msg]
except KeyError:
raise IOError('unknown message: %s' % msg)
if schema is NoData:
return msg, None
raise IOError('missing data for: %s' % msg)
else:
try:
msg, data = msg
except (TypeError, ValueError):
raise IOError('invalid message structure received')
try:
schema = self._protocol.recv[msg]
except KeyError:
raise IOError('unknown message: %s' % msg)
if schema is NoData:
raise IOError('data not expected for: %s' % msg)
try:
return msg, schema(data)
except Invalid as e:
raise IOError('invalid data for %s: %r' % (msg, data))
@property
def hwm(self):
"""
The high-water mark of the socket, i.e. the number of messages that can
be queued before the socket blocks (or drops, depending on the socket
type) messages.
"""
return self._socket.hwm
@hwm.setter
def hwm(self, value):
self._socket.hwm = value
def bind(self, address):
"""
Binds the socket to listen on the specified *address*.
"""
return self._socket.bind(address)
def connect(self, address):
"""
Connects the socket to the listening socket at *address*.
"""
return self._socket.connect(address)
def close(self, linger=None):
"""
Closes the socket. If *linger* is specified, it is the number of
seconds to wait for pending messages to be flushed.
"""
return self._socket.close(
linger=linger if linger is None else linger * 1000)
def subscribe(self, topic):
"""
Subscribes SUB type sockets to the specified *topic* (a string prefix).
"""
self._socket.setsockopt_string(SUBSCRIBE, topic)
def unsubscribe(self, topic):
"""
Unsubscribes SUB type sockets from the specified *topic* (a string
prefix).
"""
self._socket.setsockopt_string(UNSUBSCRIBE, topic)
def poll(self, timeout=None, flags=POLLIN):
"""
Polls the socket for pending data (by default, when *flags* is POLLIN).
If no data is available after *timeout* seconds, returns False.
Otherwise returns True.
If *flags* is POLLOUT instead, tests whether the socket has available
slots for queueing new messages.
"""
return self._socket.poll(
timeout if timeout is None else timeout * 1000, flags)
def send(self, buf, flags=0):
"""
Send *buf* (a :class:`bytes` string).
"""
self._logger.debug('>> %s', buf)
return self._socket.send(buf, flags)
def recv(self, flags=0):
"""
Receives the next message as a :class:`bytes` string.
"""
buf = self._socket.recv(flags)
self._logger.debug('<< %s', buf)
return buf
def drain(self):
"""
Receives all pending messages in the queue and discards them. This
is typically useful during shutdown routines or for testing.
"""
while self.poll(0):
self.recv()
def send_multipart(self, msg_parts, flags=0):
"""
Send *msg_parts*, a list of :class:`bytes` strings as a multi-part
message which can be received intact with :meth:`recv_multipart`.
"""
self._logger.debug('>>' + (' %s' * len(msg_parts)), *msg_parts)
return self._socket.send_multipart(msg_parts, flags)
def recv_multipart(self, flags=0):
"""
Receives a multi-part message, returning its content as a list of
:class:`bytes` strings.
"""
msg_parts = self._socket.recv_multipart(flags)
self._logger.debug('<<' + (' %s' * len(msg_parts)), *msg_parts)
return msg_parts
def send_msg(self, msg, data=NoData, flags=0):
"""
Send the unicode string *msg* with its associated *data* as a
CBOR-encoded message. This is the primary method used in piwheels for
sending information between tasks.
The message, and its associated data, must validate against the
:attr:`protocol` associated with the socket on construction.
"""
self._logger.debug('>> %s %r', msg, data)
return self._socket.send(self._dump_msg(msg, data), flags)
def recv_msg(self, flags=0):
"""
Receive a CBOR-encoded message, returning a tuple of the unicode
message string and its associated data. This is the primary method used
in piwheels for receving information into a task.
The message, and its associated data, will be validated agains the
:attr:`protocol` associated with the socket on construction.
"""
msg, data = self._load_msg(self._socket.recv(flags))
self._logger.debug('<< %s %r', msg, data)
return msg, data
def send_addr_msg(self, addr, msg, data=NoData, flags=0):
"""
Send a CBOR-encoded message (and associated data) to *addr*, a
:class:`bytes` string.
"""
self._logger.debug('>> %s %s %r',
hexlify(addr).decode('ascii'), msg, data)
self._socket.send_multipart([addr, b'', self._dump_msg(msg, data)],
flags)
def recv_addr_msg(self, flags=0):
"""
Receive a CBOR-encoded message (and associated data) along with the
address it came from (represented as a :class:`bytes` string).
"""
try:
addr, empty, buf = self._socket.recv_multipart(flags)
except ValueError:
raise IOError('invalid message structure received')
msg, data = self._load_msg(buf)
self._logger.debug('<< %s %s %r',
hexlify(addr).decode('ascii'), msg, data)
return addr, msg, data
class Poller:
"""
Wrapper for 0MQ :class:`zmq.Poller`. This simply tweaks 0MQ's poller to use
seconds for timeouts, and to return a :class:`dict` by default from
:meth:`poll`.
"""
def __init__(self):
self._poller = zmq.Poller()
self._map = {}
def register(self, sock, flags=POLLIN | POLLOUT):
"""
Register *sock* with the poller, watching for events as specified by
*flags* (which defaults to POLLIN and POLLOUT events).
"""
if isinstance(sock, Socket):
self._map[sock._socket] = sock
return self._poller.register(sock._socket, flags)
else:
return self._poller.register(sock, flags)
def unregister(self, sock):
"""
Unregister *sock* from the poller. After this, calls to :meth:`poll`
will never return references to *sock*.
"""
if isinstance(sock, Socket):
self._poller.unregister(sock._socket)
del self._map[sock._socket]
else:
self._poller.unregister(sock)
def poll(self, timeout=None):
"""
Poll all registered sockets for the events they were registered with,
for *timeout* seconds. Returns a dictionary mapping sockets to events
or an empty dictinoary if the *timeout* elapsed with no events
occurring.
"""
return {
self._map.get(sock, sock): event
for sock, event in self._poller.poll(
timeout if timeout is None else timeout * 1000)
}
```
#### File: tests/master/test_cloud_gazer.py
```python
from unittest import mock
import pytest
from queue import Queue
from datetime import datetime, timezone
from conftest import MockTask
from piwheels import const, protocols, transport
from piwheels.master.cloud_gazer import CloudGazer
UTC = timezone.utc
def dt(s):
return datetime.strptime(s, '%Y-%m-%d %H:%M:%S').replace(tzinfo=UTC)
@pytest.fixture()
def mock_events(request):
with mock.patch('piwheels.master.cloud_gazer.PyPIEvents') as pypi_events:
pypi_events().serial = 0
source = []
def events_iter():
for index, event in enumerate(source, start=pypi_events().serial + 1):
pypi_events().serial = index
yield event
pypi_events().__iter__.side_effect = events_iter
yield source
@pytest.fixture(scope='function')
def task(request, db_queue, web_queue, skip_queue, master_config):
task = CloudGazer(master_config)
yield task
task.close()
db_queue.check()
def test_init(mock_events, db_queue, task):
db_queue.expect('ALLPKGS')
db_queue.send('OK', {"foo"})
db_queue.expect('GETPYPI')
db_queue.send('OK', 1)
task.once()
db_queue.check()
assert task.packages == {"foo"}
assert task.serial == 1
def test_new_pkg(mock_events, db_queue, web_queue, task):
assert task.skip_default == ''
db_queue.expect('ALLPKGS')
db_queue.send('OK', set())
db_queue.expect('GETPYPI')
db_queue.send('OK', 0)
task.once()
db_queue.check()
mock_events[:] = [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'some description'),
]
db_queue.expect('NEWPKG', ['foo', '', 'some description'])
db_queue.send('OK', True)
db_queue.expect('NEWPKGNAME', ['foo', 'foo', dt('2018-07-11 16:43:08')])
db_queue.send('OK', None)
web_queue.expect('BOTH', 'foo')
web_queue.send('DONE')
db_queue.expect('NEWPKGNAME', ['foo', 'foo', dt('2018-07-11 16:43:08')])
db_queue.send('OK', None)
db_queue.expect('SETPYPI', 1)
db_queue.send('OK', None)
task.poll(0)
db_queue.check()
web_queue.check()
assert task.packages == {"foo"}
assert task.serial == 1
def test_dev_mode(dev_mode, mock_events, db_queue, web_queue, task):
assert task.skip_default == 'development mode'
db_queue.expect('ALLPKGS')
db_queue.send('OK', set())
db_queue.expect('GETPYPI')
db_queue.send('OK', 0)
task.once()
db_queue.check()
mock_events[:] = [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'some description'),
]
db_queue.expect('NEWPKG', ['foo', 'development mode', 'some description'])
db_queue.send('OK', True)
db_queue.expect('NEWPKGNAME', ['foo', 'foo', dt('2018-07-11 16:43:08')])
db_queue.send('OK', None)
web_queue.expect('BOTH', 'foo')
web_queue.send('DONE')
db_queue.expect('NEWPKGNAME', ['foo', 'foo', dt('2018-07-11 16:43:08')])
db_queue.send('OK', None)
db_queue.expect('SETPYPI', 1)
db_queue.send('OK', None)
task.poll(0)
db_queue.check()
web_queue.check()
assert task.packages == {"foo"}
assert task.serial == 1
def test_new_pkg_non_canon_name(mock_events, db_queue, web_queue, task):
assert task.skip_default == ''
db_queue.expect('ALLPKGS')
db_queue.send('OK', set())
db_queue.expect('GETPYPI')
db_queue.send('OK', 0)
task.once()
db_queue.check()
mock_events[:] = [
('Foo', None, dt('2018-07-11 16:43:08'), 'create', 'some description'),
]
db_queue.expect('NEWPKG', ['foo', '', 'some description'])
db_queue.send('OK', True)
db_queue.expect('NEWPKGNAME', ['foo', 'foo', dt('1970-01-01 00:00:00')])
db_queue.send('OK', None)
db_queue.expect('NEWPKGNAME', ['foo', 'Foo', dt('2018-07-11 16:43:08')])
db_queue.send('OK', None)
web_queue.expect('BOTH', 'foo')
web_queue.send('DONE')
db_queue.expect('NEWPKGNAME', ['foo', 'foo', dt('1970-01-01 00:00:00')])
db_queue.send('OK', None)
db_queue.expect('NEWPKGNAME', ['foo', 'Foo', dt('2018-07-11 16:43:08')])
db_queue.send('OK', None)
db_queue.expect('SETPYPI', 1)
db_queue.send('OK', None)
task.poll(0)
db_queue.check()
web_queue.check()
assert task.packages == {"foo"}
assert task.serial == 1
def test_existing_ver(mock_events, db_queue, web_queue, task):
db_queue.expect('ALLPKGS')
db_queue.send('OK', {'foo'})
db_queue.expect('GETPYPI')
db_queue.send('OK', 0)
task.once()
db_queue.check()
mock_events[:] = [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'some description'),
('foo', '0.2', dt('2018-07-11 16:43:09'), 'create', 'some description'),
]
# event 1
db_queue.expect('NEWPKGNAME', ['foo', 'foo', dt('2018-07-11 16:43:08')])
db_queue.send('OK', None)
# event 2
db_queue.expect('NEWPKGNAME', ['foo', 'foo', dt('2018-07-11 16:43:09')])
db_queue.send('OK', None)
db_queue.expect('NEWVER', ['foo', '0.2', dt('2018-07-11 16:43:09'), 'binary only'])
db_queue.send('OK', False)
web_queue.expect('BOTH', 'foo')
web_queue.send('DONE')
db_queue.expect('SETPYPI', 2)
db_queue.send('OK', None)
task.poll(0)
db_queue.check()
web_queue.check()
assert task.packages == {"foo"}
assert task.serial == 2
def test_new_ver(mock_events, db_queue, web_queue, task):
db_queue.expect('ALLPKGS')
db_queue.send('OK', {"foo"})
db_queue.expect('GETPYPI')
db_queue.send('OK', 2)
task.once()
db_queue.check()
mock_events[:] = [
('bar', None, dt('2018-07-11 16:43:07'), 'create', 'some description'),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'source', 'some description'),
]
# event 1
db_queue.expect('NEWPKG', ['bar', '', 'some description'])
db_queue.send('OK', True)
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('2018-07-11 16:43:07')])
db_queue.send('OK', None)
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('2018-07-11 16:43:07')])
db_queue.send('OK', None)
web_queue.expect('BOTH', 'bar')
web_queue.send('DONE')
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('2018-07-11 16:43:09')])
db_queue.send('OK', None)
# event 2
db_queue.expect('NEWVER', ['bar', '1.0', dt('2018-07-11 16:43:09'), ''])
db_queue.send('OK', True)
db_queue.expect('SETDESC', ['bar', 'some description'])
db_queue.send('OK', True)
web_queue.expect('BOTH', 'bar')
web_queue.send('DONE')
db_queue.expect('SETPYPI', 4)
db_queue.send('OK', None)
task.poll(0)
db_queue.check()
web_queue.check()
assert task.packages == {"foo", "bar"}
assert task.serial == 4
def test_new_ver_non_canon_pkg_name(mock_events, db_queue, web_queue, task):
db_queue.expect('ALLPKGS')
db_queue.send('OK', {"foo"})
db_queue.expect('GETPYPI')
db_queue.send('OK', 2)
task.once()
db_queue.check()
mock_events[:] = [
('Bar', None, dt('2018-07-11 16:43:07'), 'create', 'some description'),
('Bar', '1.0', dt('2018-07-11 16:43:09'), 'source', 'some description'),
]
# event 1
db_queue.expect('NEWPKG', ['bar', '', 'some description'])
db_queue.send('OK', True)
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('1970-01-01 00:00:00')])
db_queue.send('OK', None)
db_queue.expect('NEWPKGNAME', ['bar', 'Bar', dt('2018-07-11 16:43:07')])
db_queue.send('OK', None)
web_queue.expect('BOTH', 'bar')
web_queue.send('DONE')
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('1970-01-01 00:00:00')])
db_queue.send('OK', None)
db_queue.expect('NEWPKGNAME', ['bar', 'Bar', dt('2018-07-11 16:43:07')])
db_queue.send('OK', None)
# event 2
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('1970-01-01 00:00:00')])
db_queue.send('OK', None)
db_queue.expect('NEWPKGNAME', ['bar', 'Bar', dt('2018-07-11 16:43:09')])
db_queue.send('OK', None)
db_queue.expect('NEWVER', ['bar', '1.0', dt('2018-07-11 16:43:09'), ''])
db_queue.send('OK', True)
db_queue.expect('SETDESC', ['bar', 'some description'])
db_queue.send('OK', True)
web_queue.expect('BOTH', 'bar')
web_queue.send('DONE')
db_queue.expect('SETPYPI', 4)
db_queue.send('OK', None)
task.poll(0)
db_queue.check()
web_queue.check()
assert task.packages == {"foo", "bar"}
assert task.serial == 4
def test_new_ver_deleted(mock_events, db_queue, web_queue, task):
db_queue.expect('ALLPKGS')
db_queue.send('OK', {"foo"})
db_queue.expect('GETPYPI')
db_queue.send('OK', 2)
task.once()
db_queue.check()
mock_events[:] = [
# bar is already deleted, so description is None
('bar', None, dt('2018-07-11 16:43:07'), 'create', None),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'source', None),
]
# event 1
db_queue.expect('NEWPKG', ['bar', '', ''])
db_queue.send('OK', True)
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('2018-07-11 16:43:07')])
db_queue.send('OK', None)
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('2018-07-11 16:43:07')])
db_queue.send('OK', None)
web_queue.expect('BOTH', 'bar')
web_queue.send('DONE')
# event 2
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('2018-07-11 16:43:09')])
db_queue.send('OK', None)
db_queue.expect('NEWVER', ['bar', '1.0', dt('2018-07-11 16:43:09'), ''])
db_queue.send('OK', True)
web_queue.expect('BOTH', 'bar')
web_queue.send('DONE')
db_queue.expect('SETPYPI', 4)
db_queue.send('OK', None)
task.poll(0)
db_queue.check()
web_queue.check()
assert task.packages == {"foo", "bar"}
assert task.serial == 4
def test_remove_ver(mock_events, db_queue, web_queue, skip_queue, task):
db_queue.expect('ALLPKGS')
db_queue.send('OK', {"foo"})
db_queue.expect('GETPYPI')
db_queue.send('OK', 2)
task.once()
db_queue.check()
mock_events[:] = [
('bar', None, dt('2018-07-11 16:43:07'), 'create', 'some description'),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'source', 'some description'),
('bar', '1.0', dt('2018-07-11 16:43:11'), 'remove', None),
]
# event 1
db_queue.expect('NEWPKG', ['bar', '', 'some description'])
db_queue.send('OK', True)
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('2018-07-11 16:43:07')])
db_queue.send('OK', None)
web_queue.expect('BOTH', 'bar')
web_queue.send('DONE')
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('2018-07-11 16:43:07')])
db_queue.send('OK', None)
# event 2
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('2018-07-11 16:43:09')])
db_queue.send('OK', None)
db_queue.expect('NEWVER', ['bar', '1.0', dt('2018-07-11 16:43:09'), ''])
db_queue.send('OK', True)
db_queue.expect('SETDESC', ['bar', 'some description'])
db_queue.send('OK', True)
web_queue.expect('BOTH', 'bar')
web_queue.send('DONE')
# event 3
db_queue.expect('SKIPVER', ['bar', '1.0', 'deleted'])
db_queue.send('OK', True)
web_queue.expect('DELVER', ['bar', '1.0'])
web_queue.send('DONE')
skip_queue.expect('DELVER', ['bar', '1.0'])
skip_queue.send('OK')
db_queue.expect('DELVER', ['bar', '1.0'])
db_queue.send('OK', None)
db_queue.expect('SETPYPI', 5)
db_queue.send('OK', None)
task.poll(0)
db_queue.check()
web_queue.check()
assert task.packages == {"foo", "bar"}
assert task.serial == 5
def test_remove_pkg(mock_events, db_queue, web_queue, skip_queue, task):
db_queue.expect('ALLPKGS')
db_queue.send('OK', {"foo"})
db_queue.expect('GETPYPI')
db_queue.send('OK', 2)
task.once()
db_queue.check()
mock_events[:] = [
('bar', None, dt('2018-07-11 16:43:07'), 'create', 'some description'),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'source', 'some description'),
('bar', None, dt('2018-07-11 16:43:11'), 'remove', None),
]
# event 1
db_queue.expect('NEWPKG', ['bar', '', 'some description'])
db_queue.send('OK', True)
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('2018-07-11 16:43:07')])
db_queue.send('OK', None)
web_queue.expect('BOTH', 'bar')
web_queue.send('DONE')
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('2018-07-11 16:43:07')])
db_queue.send('OK', None)
# event 2
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('2018-07-11 16:43:09')])
db_queue.send('OK', None)
db_queue.expect('NEWVER', ['bar', '1.0', dt('2018-07-11 16:43:09'), ''])
db_queue.send('OK', True)
db_queue.expect('SETDESC', ['bar', 'some description'])
db_queue.send('OK', True)
web_queue.expect('BOTH', 'bar')
web_queue.send('DONE')
# event 3
db_queue.expect('SKIPPKG', ['bar', 'deleted'])
db_queue.send('OK', True)
web_queue.expect('DELPKG', 'bar')
web_queue.send('DONE')
skip_queue.expect('DELPKG', 'bar')
skip_queue.send('OK')
db_queue.expect('DELPKG', 'bar')
db_queue.send('OK', None)
db_queue.expect('SETPYPI', 5)
db_queue.send('OK', None)
task.poll(0)
db_queue.check()
web_queue.check()
assert task.packages == {"foo"}
assert task.serial == 5
def test_remove_pkg_no_insert(mock_events, db_queue, web_queue, skip_queue, task):
db_queue.expect('ALLPKGS')
db_queue.send('OK', {"foo"})
db_queue.expect('GETPYPI')
db_queue.send('OK', 3)
task.once()
db_queue.check()
mock_events[:] = [
('bar', None, dt('2018-07-11 16:43:09'), 'remove', None),
]
db_queue.expect('SKIPPKG', ['bar', 'deleted'])
db_queue.send('OK', True)
web_queue.expect('DELPKG', 'bar')
web_queue.send('DONE')
skip_queue.expect('DELPKG', 'bar')
skip_queue.send('OK')
db_queue.expect('DELPKG', 'bar')
db_queue.send('OK', None)
db_queue.expect('SETPYPI', 4)
db_queue.send('OK', None)
task.poll(0)
db_queue.check()
web_queue.check()
assert task.packages == {"foo"}
assert task.serial == 4
def test_remove_pkg_before_insert(mock_events, db_queue, web_queue, skip_queue,
task):
db_queue.expect('ALLPKGS')
db_queue.send('OK', {"foo"})
db_queue.expect('GETPYPI')
db_queue.send('OK', 2)
task.once()
db_queue.check()
mock_events[:] = [
('bar', None, dt('2018-07-11 16:43:08'), 'remove', None),
('bar', None, dt('2018-07-11 16:43:09'), 'create', 'some description'),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'source', 'some description'),
]
# event 1
db_queue.expect('SKIPPKG', ['bar', 'deleted'])
db_queue.send('OK', None)
web_queue.expect('DELPKG', 'bar')
web_queue.send('DONE')
skip_queue.expect('DELPKG', 'bar')
skip_queue.send('OK')
db_queue.expect('DELPKG', 'bar')
db_queue.send('OK', None)
# event 2
db_queue.expect('NEWPKG', ['bar', '', 'some description'])
db_queue.send('OK', True)
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('2018-07-11 16:43:09')])
db_queue.send('OK', None)
web_queue.expect('BOTH', 'bar')
web_queue.send('DONE')
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('2018-07-11 16:43:09')])
db_queue.send('OK', None)
# event 3
db_queue.expect('NEWPKGNAME', ['bar', 'bar', dt('2018-07-11 16:43:09')])
db_queue.send('OK', None)
db_queue.expect('NEWVER', ['bar', '1.0', dt('2018-07-11 16:43:09'), ''])
db_queue.send('OK', True)
db_queue.expect('SETDESC', ['bar', 'some description'])
db_queue.send('OK', True)
web_queue.expect('BOTH', 'bar')
web_queue.send('DONE')
db_queue.expect('SETPYPI', 5)
db_queue.send('OK', None)
task.poll(0)
db_queue.check()
web_queue.check()
assert task.packages == {"foo", "bar"}
assert task.serial == 5
def test_enable_ver(mock_events, db_queue, web_queue, task):
db_queue.expect('ALLPKGS')
db_queue.send('OK', {"foo"})
db_queue.expect('GETPYPI')
db_queue.send('OK', 3)
task.once()
db_queue.check()
mock_events[:] = [
('foo', '1.0', dt('2018-07-11 16:43:09'), 'create', 'some description'),
('foo', '1.0', dt('2018-07-11 16:43:11'), 'source', 'some description'),
]
# event 1
db_queue.expect('NEWPKGNAME', ['foo', 'foo', dt('2018-07-11 16:43:09')])
db_queue.send('OK', None)
db_queue.expect('NEWVER', ['foo', '1.0', dt('2018-07-11 16:43:09'), 'binary only'])
db_queue.send('OK', True)
db_queue.expect('SETDESC', ['foo', 'some description'])
db_queue.send('OK', True)
web_queue.expect('BOTH', 'foo')
web_queue.send('DONE')
# event 2
db_queue.expect('NEWPKGNAME', ['foo', 'foo', dt('2018-07-11 16:43:11')])
db_queue.send('OK', None)
db_queue.expect('NEWVER', ['foo', '1.0', dt('2018-07-11 16:43:11'), ''])
db_queue.send('OK', False)
db_queue.expect('GETSKIP', ['foo', '1.0'])
db_queue.send('OK', 'binary only')
db_queue.expect('SKIPVER', ['foo', '1.0', ''])
db_queue.send('OK', None)
web_queue.expect('PROJECT', 'foo')
web_queue.send('DONE')
db_queue.expect('SETPYPI', 5)
db_queue.send('OK', None)
task.poll(0)
db_queue.check()
web_queue.check()
assert task.packages == {"foo"}
assert task.serial == 5
def test_yank_ver(mock_events, db_queue, web_queue, task):
db_queue.expect('ALLPKGS')
db_queue.send('OK', {"foo"})
db_queue.expect('GETPYPI')
db_queue.send('OK', 3)
task.once()
db_queue.check()
mock_events[:] = [
('foo', '1.0', dt('2018-07-11 16:43:11'), 'yank', None),
]
db_queue.expect('NEWPKGNAME', ['foo', 'foo', dt('2018-07-11 16:43:11')])
db_queue.send('OK', None)
db_queue.expect('YANKVER', ['foo', '1.0'])
db_queue.send('OK', True)
web_queue.expect('BOTH', 'foo')
web_queue.send('DONE')
db_queue.expect('SETPYPI', 4)
db_queue.send('OK', None)
task.poll(0)
db_queue.check()
web_queue.check()
assert task.packages == {"foo"}
assert task.serial == 4
def test_unyank_ver(mock_events, db_queue, web_queue, task):
db_queue.expect('ALLPKGS')
db_queue.send('OK', {"foo"})
db_queue.expect('GETPYPI')
db_queue.send('OK', 3)
task.once()
db_queue.check()
mock_events[:] = [
('foo', '1.0', dt('2018-07-11 16:43:11'), 'unyank', None),
]
db_queue.expect('NEWPKGNAME', ['foo', 'foo', dt('2018-07-11 16:43:11')])
db_queue.send('OK', None)
db_queue.expect('UNYANKVER', ['foo', '1.0'])
db_queue.send('OK', True)
web_queue.expect('BOTH', 'foo')
web_queue.send('DONE')
db_queue.expect('SETPYPI', 4)
db_queue.send('OK', None)
task.poll(0)
db_queue.check()
web_queue.check()
assert task.packages == {"foo"}
assert task.serial == 4
def test_yank_unyank_ver(mock_events, db_queue, web_queue, task):
db_queue.expect('ALLPKGS')
db_queue.send('OK', {"foo"})
db_queue.expect('GETPYPI')
db_queue.send('OK', 3)
task.once()
db_queue.check()
mock_events[:] = [
('foo', '1.0', dt('2018-07-11 16:43:11'), 'yank', None),
('foo', '1.0', dt('2018-07-11 16:43:12'), 'unyank', None),
]
# event 1
db_queue.expect('NEWPKGNAME', ['foo', 'foo', dt('2018-07-11 16:43:11')])
db_queue.send('OK', None)
db_queue.expect('YANKVER', ['foo', '1.0'])
db_queue.send('OK', True)
web_queue.expect('BOTH', 'foo')
web_queue.send('DONE')
# event 2
db_queue.expect('NEWPKGNAME', ['foo', 'foo', dt('2018-07-11 16:43:12')])
db_queue.send('OK', None)
db_queue.expect('UNYANKVER', ['foo', '1.0'])
db_queue.send('OK', True)
web_queue.expect('BOTH', 'foo')
web_queue.send('DONE')
db_queue.expect('SETPYPI', 5)
db_queue.send('OK', None)
task.poll(0)
db_queue.check()
web_queue.check()
assert task.packages == {"foo"}
assert task.serial == 5
```
#### File: tests/remove/test_remove.py
```python
import os
from unittest import mock
from threading import Thread
import pytest
from conftest import find_message
from piwheels import __version__, protocols, transport
from piwheels.remove import main
@pytest.fixture()
def import_queue_name(request, tmpdir):
yield 'ipc://' + str(tmpdir.join('import-queue'))
@pytest.fixture()
def import_queue(request, mock_context, import_queue_name, tmpdir):
queue = mock_context.socket(transport.REP, protocol=protocols.mr_chase)
queue.hwm = 1
queue.bind(import_queue_name)
yield queue
queue.close()
class RemoveThread(Thread):
def __init__(self, args):
super().__init__(target=self.capture_exc, args=(args,), daemon=True)
self.exception = None
self.exitcode = None
def capture_exc(self, args):
try:
self.exitcode = main(args)
except Exception as e:
self.exception = e
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.join(10)
assert not self.is_alive()
def test_help(capsys):
with pytest.raises(SystemExit):
main(['--help'])
out, err = capsys.readouterr()
assert out.startswith('usage:')
assert '--yes' in out
assert '--skip' in out
def test_version(capsys):
with pytest.raises(SystemExit):
main(['--version'])
out, err = capsys.readouterr()
assert out.strip() == __version__
def test_abort(caplog):
with mock.patch('piwheels.terminal.yes_no_prompt') as prompt_mock:
prompt_mock.return_value = False
assert main(['foo', '0.1']) == 2
assert find_message(caplog.records, message='User aborted removal')
def test_remove_package(mock_context, import_queue_name, import_queue):
with mock.patch('piwheels.terminal.yes_no_prompt') as prompt_mock:
prompt_mock.return_value = True
with RemoveThread(['--import-queue', import_queue_name, 'foo']) as thread:
assert import_queue.recv_msg() == ('REMOVE', ['foo', None, ''])
import_queue.send_msg('DONE')
thread.join(10)
assert thread.exitcode == 0
def test_remove_version(mock_context, import_queue_name, import_queue):
with mock.patch('piwheels.terminal.yes_no_prompt') as prompt_mock:
prompt_mock.return_value = True
with RemoveThread(['--import-queue', import_queue_name, 'foo', '0.1']) as thread:
assert import_queue.recv_msg() == ('REMOVE', ['foo', '0.1', ''])
import_queue.send_msg('DONE')
thread.join(10)
assert thread.exitcode == 0
def test_remove_and_skip(mock_context, import_queue_name, import_queue):
with mock.patch('piwheels.terminal.yes_no_prompt') as prompt_mock:
prompt_mock.return_value = True
with RemoveThread(['--import-queue', import_queue_name, 'foo', '0.1', '--skip', 'legal']) as thread:
assert import_queue.recv_msg() == ('REMOVE', ['foo', '0.1', 'legal'])
import_queue.send_msg('DONE')
thread.join(10)
assert thread.exitcode == 0
def test_failure(mock_context, import_queue_name, import_queue):
with RemoveThread(['--import-queue', import_queue_name, 'foo', '0.1', '--yes']) as thread:
assert import_queue.recv_msg() == ('REMOVE', ['foo', '0.1', ''])
import_queue.send_msg('ERROR', 'Package foo does not exist')
thread.join(10)
assert isinstance(thread.exception, RuntimeError)
assert 'Package foo does not exist' in str(thread.exception)
def test_unexpected(mock_context, import_queue_name, import_queue):
with RemoveThread(['--import-queue', import_queue_name, 'foo', '0.1', '--yes']) as thread:
assert import_queue.recv_msg() == ('REMOVE', ['foo', '0.1', ''])
import_queue.send_msg('SEND', 'foo.whl')
thread.join(10)
assert isinstance(thread.exception, RuntimeError)
assert 'Unexpected response from master' in str(thread.exception)
```
#### File: piwheels/tests/test_ranges.py
```python
from piwheels.ranges import (
consolidate,
split,
exclude,
intersect,
)
def test_consolidate():
assert list(consolidate([range(5), range(4, 10)])) == [range(10)]
assert list(consolidate([range(5), range(5, 10)])) == [range(10)]
assert list(consolidate([range(5), range(6, 10)])) == [range(5), range(6, 10)]
def test_split():
assert list(split([range(10)], 5)) == [range(5), range(5, 10)]
assert list(split([range(10)], 0)) == [range(10)]
assert list(split([range(10)], 20)) == [range(10)]
def test_exclude():
assert list(exclude([range(10)], range(2))) == [range(2, 10)]
assert list(exclude([range(10)], range(2, 4))) == [range(2), range(4, 10)]
def test_intersect():
assert intersect(range(10), range(5)) == range(5)
assert intersect(range(10), range(10, 2)) is None
assert intersect(range(10), range(2, 5)) == range(2, 5)
```
#### File: piwheels/tests/test_tasks.py
```python
import logging
from unittest import mock
from datetime import datetime, timedelta
from time import sleep
import pytest
from piwheels import protocols, transport, tasks
class CounterTask(tasks.PauseableTask):
# A trivial task purely for test purposes, with a very rapid poll cycle
name = 'counter'
def __init__(self, config, control_protocol=protocols.task_control,
delay=timedelta(microseconds=1)):
super().__init__(config, control_protocol)
self.every(delay, self.loop)
self.count = 0
def loop(self):
self.count += 1
def poll(self, timeout=0.1):
return super().poll(timeout)
def foo(self):
pass
class SimpleTask(tasks.Task):
name = 'simple'
class BrokenTask(tasks.Task):
# A trivial task which instantly breaks
name = 'broken'
def __init__(self, config, control_protocol=protocols.task_control):
super().__init__(config, control_protocol)
self.every(timedelta(microseconds=1), self.loop)
def loop(self):
raise Exception("Don't panic!")
def test_task_quits(master_config):
task = tasks.Task(master_config)
try:
task.start()
task.quit()
task.join(10)
assert not task.is_alive()
finally:
task.close()
def test_task_runs(master_config):
task = CounterTask(master_config)
try:
task.start()
task.quit()
task.join(10)
assert task.count > 0
finally:
task.close()
def test_task_force(master_config):
task = CounterTask(master_config, delay=timedelta(seconds=1))
try:
task.start()
start = datetime.utcnow()
while task.count == 0:
sleep(0.01)
assert datetime.utcnow() - start < timedelta(seconds=1)
start = datetime.utcnow()
task.force(task.loop)
while task.count == 1:
sleep(0.01)
assert datetime.utcnow() - start < timedelta(seconds=1)
task.quit()
task.join(10)
assert task.count > 0
with pytest.raises(ValueError):
task.force(task.foo)
finally:
task.close()
def test_task_pause(master_config):
task = CounterTask(master_config)
try:
task.start()
task.pause()
sleep(0.01)
current = task.count
sleep(0.01)
assert task.count == current
task.resume()
task.quit()
task.join(10)
assert task.count > current
finally:
task.close()
def test_task_pause_resume_idempotent(master_config):
task = CounterTask(master_config)
try:
task.start()
task.pause()
task.pause()
task.resume()
task.resume()
task.quit()
task.join(10)
assert not task.is_alive()
finally:
task.close()
def test_task_quit_while_paused(master_config):
task = CounterTask(master_config)
try:
task.start()
task.pause()
task.quit()
task.join(10)
assert not task.is_alive()
finally:
task.close()
def test_task_resume_while_not_paused(master_config):
task = CounterTask(master_config)
try:
task.logger = mock.Mock()
task.start()
task.resume()
task.quit()
task.join(10)
assert not task.is_alive()
assert task.logger.warning.call_count == 1
finally:
task.close()
def test_broken_control(master_config, caplog):
protocol = protocols.Protocol(recv={
'FOO': protocols.NoData,
'QUIT': protocols.NoData,
})
task = CounterTask(master_config, control_protocol=protocol)
try:
task.start()
task._ctrl('FOO')
task.quit()
task.join(10)
assert not task.is_alive()
assert caplog.record_tuples == [
('counter', logging.INFO, 'starting'),
('counter', logging.INFO, 'started'),
('counter', logging.ERROR, 'unhandled exception in %r' % task),
('counter', logging.INFO, 'stopped'),
]
finally:
task.close()
caplog.clear()
task = SimpleTask(master_config, control_protocol=protocol)
try:
task.start()
task._ctrl('FOO')
task.quit()
task.join(10)
assert not task.is_alive()
assert caplog.record_tuples == [
('simple', logging.INFO, 'starting'),
('simple', logging.INFO, 'started'),
('simple', logging.ERROR, 'unhandled exception in %r' % task),
('simple', logging.INFO, 'stopped'),
]
finally:
task.close()
def test_bad_control(master_config, caplog):
task = CounterTask(master_config)
try:
task.start()
sock = task.ctx.socket(
transport.PUSH, protocol=reversed(task.control_protocol),
logger=task.logger)
sock.connect('inproc://ctrl-counter')
sock.send(b'FOO')
sock.close()
task.quit()
task.join(10)
assert not task.is_alive()
assert caplog.record_tuples == [
('counter', logging.INFO, 'starting'),
('counter', logging.INFO, 'started'),
('counter', logging.ERROR, 'unable to deserialize data'),
('counter', logging.INFO, 'stopping'),
('counter', logging.INFO, 'stopped'),
]
finally:
task.close()
caplog.clear()
task = SimpleTask(master_config)
try:
task.start()
sock = task.ctx.socket(
transport.PUSH, protocol=reversed(task.control_protocol),
logger=task.logger)
sock.connect('inproc://ctrl-simple')
sock.send(b'FOO')
sock.close()
task.quit()
task.join(10)
assert not task.is_alive()
assert caplog.record_tuples == [
('simple', logging.INFO, 'starting'),
('simple', logging.INFO, 'started'),
('simple', logging.ERROR, 'unable to deserialize data'),
('simple', logging.INFO, 'stopping'),
('simple', logging.INFO, 'stopped'),
]
finally:
task.close()
def test_broken_task_quits(master_config, master_control_queue):
task = BrokenTask(master_config)
try:
task.start()
task.join(10)
assert not task.is_alive()
# Ensure the broken task tells the master to quit
assert master_control_queue.recv_msg() == ('QUIT', None)
finally:
task.close()
```
#### File: piwheels/tests/test_terminal.py
```python
import sys
import logging
from unittest import mock
import pytest
import configargparse
from piwheels.terminal import (
_CONSOLE,
configure_parser,
configure_logging,
error_handler,
yes_no_prompt,
ErrorHandler,
)
def test_configure_parser():
p = configure_parser('foo', log_params=False)
assert p.description == 'foo'
with pytest.raises(SystemExit):
p.parse_args(['--version'])
with pytest.raises(SystemExit):
p.parse_args(['-h'])
with pytest.raises(configargparse.ArgumentError):
p.parse_args(['--log-file', 'foo.log'])
c = p.parse_args([])
assert c.configuration is None
def test_configure_parser_with_logging(tmpdir):
p = configure_parser('foo', log_params=True)
assert p.description == 'foo'
with pytest.raises(SystemExit):
p.parse_args(['--version'])
with pytest.raises(SystemExit):
p.parse_args(['-h'])
c = p.parse_args(['--log-file', str(tmpdir.join('/foo.log'))])
assert c.log_level == logging.WARNING
assert c.log_file == str(tmpdir.join('/foo.log'))
mock_logger = logging.getLogger('mock')
with mock.patch('logging.getLogger') as m:
m.return_value = mock_logger
configure_logging(c.log_level, c.log_file)
assert len(m.return_value.handlers) == 2
assert m.return_value.handlers[0] is _CONSOLE
assert m.return_value.handlers[0].level == logging.WARNING
assert isinstance(m.return_value.handlers[1], logging.FileHandler)
assert m.return_value.handlers[1].baseFilename == str(tmpdir.join('/foo.log'))
assert m.return_value.handlers[1].level == logging.INFO
assert m.return_value.level == logging.INFO
def test_error_handler():
with mock.patch('piwheels.terminal.logging') as logging:
assert error_handler(SystemExit, 0, None) == 0
assert error_handler(KeyboardInterrupt, 'Ctrl+C pressed', None) == 2
assert logging.critical.call_count == 0
assert error_handler(configargparse.ArgumentError, 'foo', None) == 2
assert logging.critical.call_args_list == [
mock.call('foo'),
mock.call('Try the --help option for more information.'),
]
logging.reset_mock()
assert error_handler(IOError, 'File not found', None) == 1
assert logging.critical.call_args == mock.call('File not found')
logging.reset_mock()
with mock.patch('traceback.format_exception') as fmt_exc:
fmt_exc.side_effect = lambda t, v, tb: [v]
assert error_handler(ValueError, 'Foo%bar', None) == 1
assert logging.critical.call_args == mock.call('Foo%%bar')
def test_configure_error_handler():
e = ErrorHandler()
l = len(e)
assert RuntimeError not in e
try:
raise RuntimeError('Error communicating with master')
except RuntimeError:
assert e(*sys.exc_info()) == 1
e[RuntimeError] = (None, 2)
assert len(e) == l + 1
assert e[RuntimeError] == (None, 2)
try:
raise RuntimeError('Error communicating with master')
except RuntimeError:
assert e(*sys.exc_info()) == 2
def test_yes_no_prompt(capsys):
with mock.patch('builtins.input') as _input:
_input.return_value = ''
assert yes_no_prompt('Foo') == True
assert _input.call_args == mock.call('Foo [Y/n] ')
out, err = capsys.readouterr()
assert out == '\n'
_input.side_effect = ['foo', 'NO']
assert yes_no_prompt('Bar') == False
out, err = capsys.readouterr()
assert out == '\nInvalid response\n'
```
#### File: piwheels/tests/test_transport.py
```python
import os
import sys
import ipaddress as ip
import datetime as dt
from unittest import mock
import cbor2
import pytest
from voluptuous import Any
from piwheels.protocols import Protocol, NoData
from piwheels.transport import *
def test_ipaddress_roundtrip():
protocol = Protocol(recv={'FOO': Any(ip.IPv4Address, ip.IPv6Address)})
ctx = Context()
pull = ctx.socket(PULL, protocol=protocol)
push = ctx.socket(PUSH, protocol=reversed(protocol))
pull.bind('inproc://foo')
push.connect('inproc://foo')
address4 = ip.IPv4Address('192.168.0.1')
address6 = ip.IPv6Address('::1')
push.send_msg('FOO', address4)
assert pull.recv_msg() == ('FOO', address4)
push.send_msg('FOO', address6)
assert pull.recv_msg() == ('FOO', address6)
push.close()
pull.close()
def test_timedelta_roundtrip():
protocol = Protocol(recv={'FOO': dt.timedelta})
ctx = Context()
pull = ctx.socket(PULL, protocol=protocol)
push = ctx.socket(PUSH, protocol=reversed(protocol))
pull.bind('inproc://foo')
push.connect('inproc://foo')
delta = dt.timedelta(minutes=5)
push.send_msg('FOO', delta)
assert pull.recv_msg() == ('FOO', delta)
push.close()
pull.close()
def test_encoding_unknown_type():
protocol = Protocol(recv={'FOO': Exception})
ctx = Context()
pull = ctx.socket(PULL, protocol=protocol)
push = ctx.socket(PUSH, protocol=reversed(protocol))
pull.bind('inproc://foo')
push.connect('inproc://foo')
with pytest.raises(IOError):
push.send_msg('FOO', NotImplementedError())
push.close()
pull.close()
def test_decoding_unknown_type():
ctx = Context()
pull = ctx.socket(PULL)
push = ctx.socket(PUSH)
pull.bind('inproc://foo')
push.connect('inproc://foo')
push.send(cbor2.dumps(cbor2.CBORTag(4000, None)))
with pytest.raises(IOError):
pull.recv_msg()
push.close()
pull.close()
def test_recv_invalid_addr_msg_structure():
ctx = Context()
pull = ctx.socket(PULL)
push = ctx.socket(PUSH)
pull.bind('inproc://foo')
push.connect('inproc://foo')
push.send_multipart([b'foo', b'', b'', b''])
with pytest.raises(IOError):
pull.recv_addr_msg()
def test_send_data_for_pure_msg():
protocol = Protocol(recv={'FOO': NoData})
ctx = Context()
pull = ctx.socket(PULL, protocol=protocol)
push = ctx.socket(PUSH, protocol=reversed(protocol))
pull.bind('inproc://foo')
push.connect('inproc://foo')
with pytest.raises(IOError):
push.send_msg('FOO', 1)
push.close()
pull.close()
def test_send_no_data_for_msg():
protocol = Protocol(recv={'FOO': int})
ctx = Context()
pull = ctx.socket(PULL, protocol=protocol)
push = ctx.socket(PUSH, protocol=reversed(protocol))
pull.bind('inproc://foo')
push.connect('inproc://foo')
with pytest.raises(IOError):
push.send_msg('FOO')
push.close()
pull.close()
def test_send_bad_data_for_msg():
protocol = Protocol(recv={'FOO': int})
ctx = Context()
pull = ctx.socket(PULL, protocol=protocol)
push = ctx.socket(PUSH, protocol=reversed(protocol))
pull.bind('inproc://foo')
push.connect('inproc://foo')
with pytest.raises(IOError):
push.send_msg('FOO', 'bar')
push.close()
pull.close()
def test_recv_bad_data_from_msg():
ctx = Context()
pull = ctx.socket(PULL, protocol=Protocol(recv={'FOO': int}))
push = ctx.socket(PUSH, protocol=Protocol(send={'FOO': str}))
pull.bind('inproc://foo')
push.connect('inproc://foo')
push.send_msg('FOO', 'bar')
with pytest.raises(IOError):
pull.recv_msg()
push.close()
pull.close()
def test_recv_no_data_from_msg():
ctx = Context()
pull = ctx.socket(PULL, protocol=Protocol(recv={'FOO': int}))
push = ctx.socket(PUSH, protocol=Protocol(send={'FOO': NoData}))
pull.bind('inproc://foo')
push.connect('inproc://foo')
push.send_msg('FOO')
with pytest.raises(IOError):
pull.recv_msg()
push.close()
pull.close()
def test_recv_unknown_msg():
ctx = Context()
pull = ctx.socket(PULL)
push = ctx.socket(PUSH, protocol=Protocol(send={'FOO': int}))
pull.bind('inproc://foo')
push.connect('inproc://foo')
push.send_msg('FOO', 1)
with pytest.raises(IOError):
pull.recv_msg()
push.close()
pull.close()
def test_recv_unexpected_data():
ctx = Context()
pull = ctx.socket(PULL, protocol=Protocol(recv={'FOO': NoData}))
push = ctx.socket(PUSH, protocol=Protocol(send={'FOO': int}))
pull.bind('inproc://foo')
push.connect('inproc://foo')
push.send_msg('FOO', 1)
with pytest.raises(IOError):
pull.recv_msg()
push.close()
pull.close()
def test_hwm_attr():
ctx = Context()
sock = ctx.socket(PULL)
sock.hwm = 10
assert sock.hwm == 10
sock.close()
def test_subscribe():
ctx = Context()
pub = ctx.socket(PUB, protocol=Protocol(send={'FOO': int}))
sub = ctx.socket(SUB, protocol=Protocol(recv={'FOO': int}))
pub.bind('inproc://foo')
sub.connect('inproc://foo')
sub.subscribe('')
pub.send_msg('FOO', 1)
assert sub.recv_msg() == ('FOO', 1)
sub.unsubscribe('')
pub.send_msg('FOO', 2)
assert not sub.poll(0.5)
sub.close()
pub.close()
def test_poll_fd(tmpdir):
r, w = os.pipe()
p = Poller()
p.register(r)
assert not p.poll(0.1)
os.write(w, b'foo')
assert p.poll(0.1)
p.unregister(r)
os.write(w, b'bar')
assert not p.poll(0.1)
os.close(w)
os.close(r)
```
|
{
"source": "jgindin/core",
"score": 3
}
|
#### File: components/wallbox/sensor.py
```python
from homeassistant.components.sensor import SensorEntity
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
CONF_CONNECTIONS,
DOMAIN,
SENSOR_TYPES,
WallboxSensorEntityDescription,
)
CONF_STATION = "station"
UPDATE_INTERVAL = 30
async def async_setup_entry(hass, config, async_add_entities):
"""Create wallbox sensor entities in HASS."""
coordinator = hass.data[DOMAIN][CONF_CONNECTIONS][config.entry_id]
async_add_entities(
[
WallboxSensor(coordinator, config, description)
for ent in coordinator.data
if (description := SENSOR_TYPES[ent])
]
)
class WallboxSensor(CoordinatorEntity, SensorEntity):
"""Representation of the Wallbox portal."""
entity_description: WallboxSensorEntityDescription
def __init__(
self, coordinator, config, description: WallboxSensorEntityDescription
):
"""Initialize a Wallbox sensor."""
super().__init__(coordinator)
self.entity_description = description
self._attr_name = f"{config.title} {description.name}"
@property
def native_value(self):
"""Return the state of the sensor."""
return self.coordinator.data[self.entity_description.key]
```
|
{
"source": "jginesclavero/ros2-tf-pose-estimation",
"score": 2
}
|
#### File: tf_pose_estimation/tf_pose_estimation/tf_pose_estimation_node.py
```python
import time
import os
import sys
import ast
from threading import Lock
from numpy import double
import numpy as np
import cv2
import rclpy
from rclpy.node import Node
from rclpy.parameter import Parameter
from rcl_interfaces.msg import ParameterDescriptor
from rclpy.executors import MultiThreadedExecutor
from rclpy.callback_groups import ReentrantCallbackGroup
from ament_index_python.packages import get_package_share_directory
from cv_bridge import CvBridge, CvBridgeError
from std_msgs.msg import String, Header
from sensor_msgs.msg import Image, CameraInfo
from tf_pose_estimation_msgs.msg import Frame, Person, BodyPart, Pixel
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import model_wh, get_graph_path
class TFPoseEstimationNode(Node):
def __init__(self, last_contexts=None):
"""Initialize all params and load data."""
""" Constants and params """
super().__init__('tf_pose_estimation_node')
parameters = [
('image_topic', '', ParameterDescriptor()),
('depth_image_topic', '', ParameterDescriptor()),
('camera_info_topic', '', ParameterDescriptor()),
('model', 'cmu', ParameterDescriptor()),
('resolution', '432x368', ParameterDescriptor()),
('resize_out_ratio', 4.0, ParameterDescriptor()),
]
self.declare_parameters('', parameters)
image_topic = self.get_parameter_or('image_topic',
Parameter('image_topic', type_ = Parameter.Type.STRING, value = ""))._value
depth_image_topic = self.get_parameter_or('depth_image_topic',
Parameter('depth_image_topic', type_ = Parameter.Type.STRING, value = ""))._value
camera_info_topic = self.get_parameter_or('camera_info_topic',
Parameter('camera_info_topic', type_ = Parameter.Type.STRING, value = ""))._value
model = self.get_parameter_or('model',
Parameter('model', type_ = Parameter.Type.STRING, value = ""))._value
resolution = self.get_parameter_or('resolution',
Parameter('resolution', type_ = Parameter.Type.STRING, value = ''))._value
self._resize_out_ratio = self.get_parameter_or('resize_out_ratio',
Parameter('resize_out_ratio', type_ = Parameter.Type.DOUBLE, value = 4.0))._value
self._tf_lock = Lock()
self.DISTANCE_INFO = False
if not image_topic:
self.get_logger().error('Parameter \'image_topic\' is not provided.')
sys.exit(-1)
try:
w, h = model_wh(resolution)
graph_path = get_graph_path(model)
graph_path = os.path.join(get_package_share_directory('tf_pose_estimation'), graph_path)
except Exception as e:
self.get_logger().error('invalid model: %s, e=%s' % (model, e))
sys.exit(-1)
self._image_msg = Image()
self._pose_estimator = TfPoseEstimator(graph_path, target_size=(w, h))
self._cv_bridge = CvBridge()
self._pose_pub = self.create_publisher(Frame, "pose", 1)
self._image_sub = self.create_subscription(Image, image_topic, self.image_cb, 10)
if depth_image_topic and camera_info_topic:
self._depth_image_sub = self.create_subscription(
Image, depth_image_topic, self.depth_image_cb, 1)
self._camera_info_sub = self.create_subscription(
CameraInfo, camera_info_topic, self.camera_info_cb, 1)
self._depth_image_msg = Image()
self.DISTANCE_INFO = True
self._camera_info_ready = False
self.get_logger().info("Subscribe to " + image_topic)
self.get_logger().info("Ready!")
def destroy(self):
super().destroy_node()
def camera_info_cb(self, msg):
self.fx = msg.k[0]
self.fy = msg.k[4]
self.cx = msg.k[2]
self.cy = msg.k[5]
self._camera_info_ready = True
def depth_image_cb(self, msg):
self._depth_image_msg = msg
def image_cb(self, msg):
self._image_msg = msg
def humans_to_msg(self, humans):
frame = Frame()
for human in humans:
person = Person()
for k in human.body_parts:
body_part = human.body_parts[k]
if body_part.score > 0.55:
body_part_msg = BodyPart()
pixel = Pixel()
body_part_msg.id = str(body_part.part_idx)
pixel.x = int(body_part.x * self._image_msg.width + 0.5)
pixel.y = int(body_part.y * self._image_msg.height + 0.5)
body_part_msg.pixel = pixel
body_part_msg.score = body_part.score
person.body_parts.append(body_part_msg)
frame.persons.append(person)
return frame
def get_2d_pose(self):
msg = Frame()
try:
cv_image = self._cv_bridge.imgmsg_to_cv2(self._image_msg, "bgr8")
except CvBridgeError as e:
self.get_logger().error('[tf-pose-estimation] Converting Image Error. ' + str(e))
return msg
acquired = self._tf_lock.acquire(False)
if not acquired:
return msg
try:
humans = self._pose_estimator.inference(
cv_image, resize_to_default=True, upsample_size=self._resize_out_ratio)
finally:
self._tf_lock.release()
msg = self.humans_to_msg(humans)
msg.image_w = self._image_msg.width
msg.image_h = self._image_msg.height
msg.header = self._image_msg.header
return msg
def get_3d_pose(self, frame):
output_part = BodyPart()
o_frame = Frame()
try:
cv_image = self._cv_bridge.imgmsg_to_cv2(self._depth_image_msg, "32FC1") #16UC1
#cv_image_norm = cv2.normalize(cv_image, cv_image, 0, 1, cv2.NORM_MINMAX)
except CvBridgeError as e:
self.get_logger().error('[tf-pose-estimation] Converting Depth Image Error. ' + str(e))
return o_frame
if self._camera_info_ready:
for person in frame.persons:
parts = []
for part in person.body_parts:
output_part = part
depth = float(cv_image[part.pixel.x, part.pixel.y])
#center_coordinates = (part.pixel.x, part.pixel.y)
#cv_image_norm = cv2.circle(cv_image_norm, center_coordinates, 2, (255, 0, 0), 1)
output_part.point.x = (
((part.pixel.x - self.cx) * depth) / self.fx)
output_part.point.y = (
((part.pixel.y - self.cy) * depth) / self.fy)
output_part.point.z = depth
parts.append(output_part)
person.body_parts = parts
o_frame.persons.append(person)
#cv2.imshow("Image window", cv_image)
#cv2.waitKey(1)
o_frame.image_w = frame.image_w
o_frame.image_h = frame.image_h
o_frame.header = frame.header
return o_frame
def publish_pose(self):
msg = Frame()
self.frame_2d_msg = self.get_2d_pose()
msg = self.frame_2d_msg
if self.DISTANCE_INFO:
msg = self.get_3d_pose(self.frame_2d_msg)
self._pose_pub.publish(msg)
def start(self):
"""Start the tf_pose_estimation node"""
self.get_logger().info("Spinning...")
while rclpy.ok():
self.publish_pose()
rclpy.spin_once(self, timeout_sec=0.033)
def main(args=None):
rclpy.init(args=args)
node = TFPoseEstimationNode()
try:
node.start()
except KeyboardInterrupt:
pass
node.destroy()
rclpy.shutdown()
if __name__ == '__main__':
main()
```
|
{
"source": "jginsberg3/ThinkBayes2",
"score": 3
}
|
#### File: ThinkBayes2/jgfiles/chapter2.py
```python
from thinkbayes2 import Pmf
# instattiate a Pmf object
pmf = Pmf()
# assign 6 values to the pmf (the integers 1-6)
# the probability of each will be 1/6
for x in [1,2,3,4,5,6]:
pmf.Set(x, 1/6)
# normalize to ensure all the probabilities add up to 1
# in the example above the already do but you could make examples that didn't
pmf.Normalize()
# then you can check the probablity for any object in the pmf like:
pmf[1]
## the cookie problem
pmf = Pmf()
pmf.Set('Bowl 1', 0.5)
pmf.Set('Bowl 2', 0.5)
# these are the priors
# update the distribution based on new data
# to do this, multiply each prior by the corresponding likelihood
pmf.Mult('Bowl 1', 0.75)
pmf.Mult('Bowl 2', 0.5)
# after this, the distribution is no longer normalized so do that again
pmf.Normalize()
# then you can check the new probabilities
pmf.Prob('Bowl 1')
## the bayesian framework
# create a class that inherets from Pmf
class Cookie(Pmf):
def __init__(self, hypos):
Pmf.__init__(self)
for hypo in hypos:
self.Set(hypo, 1)
self.Normalize()
def Update(self, data):
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
self.Normalize()
mixes = {
'Bowl 1': {'vanilla': 0.75, 'chocolate': 0.25},
'Bowl 2': {'vanilla': 0.5, 'chocolate': 0.5}
}
def Likelihood(self, data, hypo):
mix = self.mixes[hypo]
like = mix[data]
return like
hypos = ['Bowl 1', 'Bowl 2']
pmf = Cookie(hypos)
# draw cookies like:
#pmf.Update('chocolate')
#pmf.Update('vanilla')
# and see how the probabilities change
#for hypo, prob in pmf.Items():
# print(hypo,prob)
### the monty hall problem
class Monty(Pmf):
def __init__(self, hypos):
Pmf.__init__(self)
for hypo in hypos:
self.Set(hypo, 1)
self.Normalize()
# update is the same as cookies
def Update(self, data):
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
self.Normalize()
# likelihood is different though
# no need to define separate mixes object this time
def Likelihood(self, data, hypo):
if hypo == data:
return 0
elif hypo == 'A':
return 0.5
else:
return 1
# you can chose any of the three doors
hypos = 'ABC'
pmf = Monty(hypos)
# update process is the similar:
data = 'B'
pmf.Update(data)
# encapsulating framework
from thinkbayes2 import Suite
class Monty(Suite):
def Likelihood(self, data, hypo):
if hypo == data:
return 0
elif hypo =='A':
return 0.5
else:
return 1
suite = Monty('ABC')
suite.Update('B')
suite.Print()
### the M&M problem
class MnM(Suite):
mix94 = dict(brown=30,
yellow=20,
red=20,
green=10,
orange=10,
tan=10)
mix96 = dict(blue=24,
green=20,
orange=16,
yellow=14,
red=13,
brown=13)
hypoA = dict(bag1=mix94, bag2=mix96)
hypoB = dict(bag1=mix96, bag2=mix94)
hypotheses = dict(A=hypoA, B=hypoB)
def Likelihood(self, data, hypo):
bag, color = data
mix = self.hypotheses[hypo][bag]
like = mix[color]
return like
# create like
suite = MnM('AB')
# update like
#suite.Update(('bag1','yellow'))
# see results like
#suite.Print()
```
|
{
"source": "jgirardet/apistar-jwt",
"score": 2
}
|
#### File: apistar-jwt/apistar_jwt/decorators.py
```python
def anonymous_allowed(fn):
fn.authenticated = False
return fn
def authentication_required(fn):
fn.authenticated = True
return fn
```
#### File: apistar-jwt/example/app.py
```python
import datetime
from apistar import App, Route, exceptions, types, validators
from apistar_jwt.token import JWT, JWTUser
# Fake user database
USERS_DB = {'id': 1, 'email': '<EMAIL>', 'password': 'password'}
class UserData(types.Type):
email = validators.String()
password = validators.String()
def welcome(user: JWTUser) -> dict:
message = f'Welcome {user.username}#{user.id}, your login expires at {user.token["exp"]}'
return {'message': message}
def login(data: UserData, jwt: JWT) -> dict:
# do some check with your database here to see if the user is authenticated
if data.email != USERS_DB['email'] or data.password != USERS_DB['password']:
raise exceptions.Forbidden('Incorrect username or password.')
payload = {
'id': USERS_DB['id'],
'username': USERS_DB['email'],
'iat': datetime.datetime.utcnow(),
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60) # ends in 60 minutes
}
token = jwt.encode(payload)
if token is None:
# encoding failed, handle error
raise exceptions.BadRequest()
return {'token': token}
routes = [
Route('/', method='GET', handler=welcome),
Route('/login', method='POST', handler=login),
]
components = [
JWT({
'JWT_SECRET': '<KEY>',
}),
]
app = App(routes=routes, components=components)
if __name__ == '__main__':
app.serve('127.0.0.1', 8080, use_debugger=True, use_reloader=True)
```
|
{
"source": "jgirardet/RustPython",
"score": 3
}
|
#### File: tests/snippets/builtin_dir.py
```python
class A:
def test():
pass
a = A()
assert "test" in dir(a), "test not in a"
assert "test" in dir(A), "test not in A"
class B(A):
def __dir__(self):
return ('q', 'h')
# Gets sorted and turned into a list
assert ['h', 'q'] == dir(B())
# This calls type.__dir__ so isn't changed (but inheritance works)!
assert 'test' in dir(A)
import socket
assert "AF_INET" in dir(socket)
```
#### File: tests/snippets/dict.py
```python
def dict_eq(d1, d2):
return (all(k in d2 and d1[k] == d2[k] for k in d1)
and all(k in d1 and d1[k] == d2[k] for k in d2))
assert dict_eq(dict(a=2, b=3), {'a': 2, 'b': 3})
assert dict_eq(dict({'a': 2, 'b': 3}, b=4), {'a': 2, 'b': 4})
assert dict_eq(dict([('a', 2), ('b', 3)]), {'a': 2, 'b': 3})
a = {'g': 5}
b = {'a': a, 'd': 9}
c = dict(b)
c['d'] = 3
c['a']['g'] = 2
assert dict_eq(a, {'g': 2})
assert dict_eq(b, {'a': a, 'd': 9})
a.clear()
assert len(a) == 0
a = {'a': 5, 'b': 6}
res = set()
for value in a.values():
res.add(value)
assert res == set([5,6])
count = 0
for (key, value) in a.items():
assert a[key] == value
count += 1
assert count == len(a)
res = set()
for key in a.keys():
res.add(key)
assert res == set(['a','b'])
```
|
{
"source": "JGirdwood/AeroSAM_logger",
"score": 3
}
|
#### File: AeroSAM_logger/pix/__init__.py
```python
from pymavlink import mavutil
import time
class MavlinkConnection(object):
"""
An object to represent a connection with an FC via the MAVLINK protocol
"""
def __init__(self, port, baudrate):
self.start_date = []
self.start_time = []
self.all_data_received = 0
self.got_system_time = 0
self.got_global_position_int = 0
self.got_scaled_pressure = 0
self.master = mavutil.mavlink_connection(port, baud=baudrate)
self.wait_for_connection()
self.master.wait_heartbeat()
self.lat = 0
self.lon = 0
self.alt_m = 0
self.vz_ms = 0
self.press_hPa = 0
self.epoch_time = 0
self.boot_time = 0
self.master.mav.request_data_stream_send(self.master.target_system, self.master.target_component,
mavutil.mavlink.MAV_DATA_STREAM_ALL, 1, 1)
def wait_for_connection(self):
msg = None
while not msg:
self.master.mav.ping_send(time.time(), 0, 0, 0)
msg = self.master.recv_match()
time.sleep(0.5)
def data_packet_handler(self):
wait = True
msg = []
while wait:
msg = self.master.recv_match(blocking=False)
if msg:
break
msg_type = msg.get_type()
if msg_type == "GLOBAL_POSITION_INT":
self.got_global_position_int = 1
self.lat = msg.lat
self.lon = msg.lon
self.alt_m = msg.alt
self.vz_ms = msg.vz
if msg_type == "SCALED_PRESSURE":
self.got_scaled_pressure = 1
self.press_hPa = msg.press_abs
if msg_type == "SYSTEM_TIME":
self.got_system_time = 1
self.boot_time = msg.time_boot_ms
self.epoch_time = msg.time_unix_usec
def fill_info_buffer(self):
timeout = 0
while True:
timeout = timeout+1
self.data_packet_handler()
check = self.got_system_time * self.got_scaled_pressure * self.got_global_position_int
if check == 1:
self.all_data_received = 1
self.got_global_position_int = 0
self.got_scaled_pressure = 0
self.got_system_time = 0
break
elif timeout == 60:
self.all_data_received = 0
break
else:
time.sleep(0.01)
def get_date_time(self):
while True:
self.fill_info_buffer()
if self.epoch_time != 0:
break
epoch_sec = self.epoch_time/1000000
date_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(epoch_sec)))
date_time = date_time.split()
self.start_date = date_time[0]
self.start_time = date_time[1]
```
|
{
"source": "jgirgle/Mapping-Mambas",
"score": 3
}
|
#### File: Mapping-Mambas/flask app/app.py
```python
from flask import Flask, jsonify, render_template
import pandas as pd
from sqlalchemy import create_engine
#SQL Use Tool
engine = create_engine("sqlite:///static/data/mappingmambas.sqlite")
#Reflect database into new model
# Base = automap_base()
#Reflect the tables and pass in the engine
# Base.prepare(engine, reflect=True)
#Label tables from classes
# Championships = Base.classes.championships
#Create a session and bind it to the engine
# session = Session(engine)
################
#Flask Setup
################
#Create an app for Flask setup
app = Flask(__name__)
################
#Flask Routes
################
#List all available api routes
@app.route("/")
def welcome():
return render_template("index.html")
@app.route("/championshipdata")
def championshipdata():
championshipdata = pd.read_sql("select * from championships",engine)
championshipjson = championshipdata.to_dict(orient="records")
return jsonify(championshipjson)
@app.route("/championships")
def championships():
return render_template("barchart_race.html")
@app.route("/bubblechart")
def bubblechart():
return render_template("bubblechart.html")
#Define main behavior
if __name__ == "__main__":
app.run(host="localhost", port=5000, debug=True)
```
|
{
"source": "jgirgle/Mars-Web-Scrape",
"score": 3
}
|
#### File: jgirgle/Mars-Web-Scrape/scrape_mars.py
```python
from splinter import Browser
from bs4 import BeautifulSoup
import pandas as pd
from webdriver_manager.chrome import ChromeDriverManager
from urllib.parse import urljoin, urldefrag
import time
# Get NASA news
# Import Splinter, BeautifulSoup, and Pandas
def scrape():
# Path to chromedriver
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=True)
# Visit the mars nasa news site
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
# Optional delay for loading the page
time.sleep(1)
# Convert the browser html to a soup object and then quit the browser
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# .find() the content title and save it as `news_title`
news_title = soup.find_all('div', class_='content_title')[1].find(target="_self").text
# .find() the paragraph text
para_texts = soup.find_all('div', class_="article_teaser_body")[1].text
# quit the browser so it doesn't stay open
# Get JPL space image
# This library enables us to join relative urls to a root url to create an absolute url
# Visit JPL space images Mars URL
url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html'
browser.visit(url)
time.sleep(1)
# Find the more info button and click that.
try:
browser.links.find_by_partial_text('FULL IMAGE').click()
html_image = browser.html
except:
print("Scraping Complete")
# Parse the resulting html with soup
soup_image = BeautifulSoup(html_image, 'html.parser')
# find the relative image url
rel_url = soup_image.find_all('img', class_='fancybox-image')[0].attrs['src']
# Use the base url to create an absolute url. Use urldefrag and urljoin to create a url and remove any extra folders in the filepath. Then select the first element in the resulting list
featured_pic = urldefrag(urljoin(url, rel_url))[0]
# Import Mars facts
# Create a dataframe from the space-facts.com mars page
url = 'https://space-facts.com/mars/'
tables = pd.read_html(url)
mars_df = tables[1].set_index("Mars - Earth Comparison")
# clean the dataframe and export to HTML
mars_df.replace('\n', '')
html_tables = mars_df.to_html(None,index=False)
# Get hemisphere data
# visit the USGS astrogeology page for hemisphere data from Mars
# visit the USGS astrogeology page for hemisphere data from Mars
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
time.sleep(1)
html = browser.html
soup = BeautifulSoup(html, 'lxml')
# Build the list of enhanced urls
hemi_image_urls = []
enhanced_url_list = []
hemi_all = soup.find_all('div', class_='item')
for hemi in hemi_all:
title = hemi.find('h3').text
enhanced_url = hemi.a['href']
enhanced_url = f'https://astrogeology.usgs.gov{enhanced_url}'
enhanced_url_list.append(enhanced_url)
# Build a list of dictionaries with the title and images
for enh_url in enhanced_url_list:
browser.visit(enh_url)
time.sleep(1)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
org_hemi_url = soup.find('div', class_='downloads').a['href']
hemi_image_urls.append({"title": title, "img_url": org_hemi_url})
scrape_dict = {
"news_title": news_title,
"para_texts": para_texts,
"featured_pic": featured_pic,
"html_tables": html_tables,
"hemi_image_urls": hemi_image_urls
}
browser.quit()
return scrape_dict
```
|
{
"source": "jgissend10/django-easy-eddie",
"score": 2
}
|
#### File: management/commands/fetch_cloud_watch_log_events.py
```python
from django.core.management.base import BaseCommand
from easy_eddie.report_fetchers import fetch_logs_of_unprocessed_sms_events
class Command(BaseCommand):
help = 'Fetches log data from CloudWatch for all unprocessed SMS events.'
def handle(self, *args, **options):
fetch_logs_of_unprocessed_sms_events()
```
#### File: django-easy-eddie/easy_eddie/report_fetchers.py
```python
import json
from datetime import datetime
import pytz
from django.utils.timezone import now
from typing import List
from easy_eddie.clients import get_boto_client
from easy_eddie.helpers import get_cloud_watch_filter_pattern, get_cloud_watch_time_tuple_for_sms_events
from easy_eddie.models import SMSEvent
from easy_eddie.settings import CLOUD_WATCH_LOG_GROUP_NAMES
def fetch_log_events(sms_events: List[SMSEvent], log_group_names: List[str]):
if not sms_events:
return False
client = get_boto_client(service_name='logs')
start, end = get_cloud_watch_time_tuple_for_sms_events(sms_events=sms_events)
sms_events_dict = {sms_event.sns_message_id: sms_event for sms_event in sms_events}
# The filter pattern used to fetch the events must be less than 1024 chars. So we have to split our events into
# chunks before creating the request.
sms_event_chunks = get_sms_events_as_chunks(sms_events=sms_events, chunk_size=10)
for sms_event_chunk in sms_event_chunks:
for log_group_name in log_group_names:
paginator = client.get_paginator('filter_log_events')
response_iterator = paginator.paginate(
logGroupName=log_group_name,
startTime=start,
endTime=end,
filterPattern=get_cloud_watch_filter_pattern(sms_events=sms_event_chunk),
interleaved=True,
)
for page in response_iterator:
for event in page.get('events', []):
message = json.loads(event['message'])
message_id = message['notification']['messageId']
sms_event = sms_events_dict.get(message_id)
if sms_event:
delivery = message.get('delivery', {})
sms_event.fetched_cloud_watch_log = now()
sms_event.sns_dwell_time = delivery.get('dwellTimeMs')
sms_event.sns_dwell_time_until_device_acknowledgement = delivery.get(
'dwellTimeMsUntilDeviceAck')
sms_event.sns_mcc = delivery.get('mcc')
sms_event.sns_mnc = delivery.get('mnc')
sms_event.sns_phone_carrier = delivery.get('phoneCarrier', '')
sms_event.sns_price = delivery.get('priceInUSD')
sms_event.sns_provider_response = delivery.get('providerResponse')
sms_event.sns_sms_type = delivery.get('smsType')
sms_event.sns_status = message['status']
# Format timestamp
timestamp = message['notification']['timestamp']
naive_datetime = datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S.%f')
time_zone_aware_timestamp = pytz.utc.localize(naive_datetime)
sms_event.sns_timestamp = time_zone_aware_timestamp
sms_event.save()
def fetch_logs_of_unprocessed_sms_events():
unprocessed_sms_events = SMSEvent.objects.unprocessed()
fetch_log_events(sms_events=unprocessed_sms_events, log_group_names=CLOUD_WATCH_LOG_GROUP_NAMES)
def get_sms_events_as_chunks(sms_events: List[SMSEvent], chunk_size: int):
for i in range(0, len(sms_events), chunk_size):
yield sms_events[i:i + chunk_size]
```
#### File: django-easy-eddie/easy_eddie/reports.py
```python
import csv
from io import StringIO
from django.utils.translation import ugettext_lazy as _
from typing import List
from easy_eddie.models import SMSEvent
def get_sms_event_report(sms_events: List[SMSEvent]) -> StringIO:
csv_file = StringIO()
csv_writer = csv.writer(csv_file)
# Write column headlines
csv_writer.writerow([
_('Timestamp'),
_('Message ID'),
_('Status'),
_('Phone Carrier'),
_('Destination'),
_('Price (USD)'),
_('Dwell Time'),
_('Dwell Time Until Device Acknowledgement'),
_('SMS Type'),
_('Provider Response'),
_('MCC'),
_('MNC'),
])
for sms_event in sms_events:
csv_writer.writerow([
sms_event.sns_timestamp.isoformat() if sms_event.sns_timestamp else '',
sms_event.sns_message_id,
sms_event.sns_status,
sms_event.sns_phone_carrier,
sms_event.sns_destination,
sms_event.sns_price,
sms_event.sns_dwell_time,
sms_event.sns_dwell_time_until_device_acknowledgement,
sms_event.sns_sms_type,
sms_event.sns_provider_response,
sms_event.sns_mcc,
sms_event.sns_mnc,
])
# Seek back to the beginning of the file
csv_file.seek(0)
return csv_file
```
|
{
"source": "jgj9883/Mask-Detector",
"score": 2
}
|
#### File: jgj9883/Mask-Detector/mask_detector-1.py
```python
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.models import load_model
import numpy as np
import cv2
import matplotlib.pyplot as plt
import os
import matplotlib.font_manager as fm
from flask import Flask, render_template
import RPi.GPIO as GPIO
import glob
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.OUT)
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
path = 'NanumBarunGothic.ttf'
fontprop =fm.FontProperties(fname=path, size=18)
facenet = cv2.dnn.readNet('model/deploy.prototxt', 'model/res10_300x300_ssd_iter_140000.caffemodel')
model = load_model('model/mask_detector.model')
#model.summary()
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
return temp_c
def mask_predict():
img = cv2.imread('./static/img/capture.jpg')
h, w = img.shape[:2]
plt.figure(figsize=(8, 5))
plt.imshow(img[:, :, ::-1])
blob = cv2.dnn.blobFromImage(img, scalefactor=1., size=(300, 300), mean=(104., 177., 123.))
facenet.setInput(blob)
dets = facenet.forward()
faces = []
for i in range(dets.shape[2]):
confidence = dets[0, 0, i, 2]
if confidence < 0.5:
continue
x1 = int(dets[0, 0, i, 3] * w)
y1 = int(dets[0, 0, i, 4] * h)
x2 = int(dets[0, 0, i, 5] * w)
y2 = int(dets[0, 0, i, 6] * h)
face = img[y1:y2, x1:x2]
faces.append(face)
#plt.figure(figsize=(16, 6))
for i, face in enumerate(faces):
plt.subplot(1, len(faces), i + 1)
plt.imshow(face[:, :, ::-1])
#plt.figure(figsize=(16, 5))
for i, face in enumerate(faces):
face_input = cv2.resize(face, dsize=(224, 224))
face_input = cv2.cvtColor(face_input, cv2.COLOR_BGR2RGB)
face_input = preprocess_input(face_input)
face_input = np.expand_dims(face_input, axis=0)
mask, nomask = model.predict(face_input).squeeze()
plt.subplot(1, len(faces), i + 1)
plt.imshow(face[:, :, ::-1])
if (mask * 100) < 30 :
plt.title('마스크를 미착용했습니다. %.2f%%' % (100 - mask * 100), fontproperties=fontprop)
print("red_led on")
GPIO.output(17, True)
else :
plt.title('마스크를 착용했습니다. %.2f%%' % (mask * 100),fontproperties=fontprop)
print("red_led off")
GPIO.output(17, False)
#plt.show()
plt.savefig('./static/img/output10.jpg')
print("-------process loading ---------")
app = Flask(__name__)
@app.route('/')
def home():
temp = read_temp()
return render_template('monitering.html', temp=temp, image_file='img/output10.jpg')
if __name__ == '__main__':
try :
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret,frame = cap.read()
# Display the resulting frame
cv2.imshow('frame',frame)
key = cv2.waitKey(10)
if key == 27:
break
if key == ord(' '):
cv2.imwrite('./static/img/capture.jpg',frame)
mask_predict()
app.run(host='localhost', port=8080)
except :
GPIO.output(17, False)
# When everything done, release the capture
GPIO.cleanup()
cap.release()
cv2.destroyAllWindows()
```
|
{
"source": "jgj9883/PrediectPriceUsingRNN",
"score": 3
}
|
#### File: PrediectPriceUsingRNN/RNN/PredictPrice.py
```python
import pandas as pd
from konlpy.tag import Okt
import random
import pickle
from keras.preprocessing import sequence
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import losses
from tensorflow.keras import optimizers
from tensorflow.keras import models
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
def data_load(option):
csv = pd.read_csv('train_data_add.csv')
titles_csv = csv['title']
prices_csv = csv['price']
if option == "titles" :
csv = titles_csv
elif option == 'price' :
csv = prices_csv
return csv
def tokenizer_create(text):
okt = Okt()
text_pos = okt.pos(text, norm=True)
words = []
for word in text_pos:
words.append(word[0])
return words
def words_to_ids(words, word_dict):
ids = []
for word in words:
try:
ids.append(word_dict.index(word))
except Exception as e:
print(e)
return ids
def dictionary_create():
data_load()
okt = Okt()
words_set = set()
titles_words = []
count = 1
for title in data_load("titles"):
title_pos = okt.pos(title, norm=True)
words = []
for word in title_pos:
words_set.add(word[0])
words.append(word[0])
titles_words.append(words)
count += 1
dictionary = list(words_set)
random.shuffle(dictionary)
dictionary = [0] + dictionary
titles_ids = []
count = 1
for title in titles_words:
words_id = words_to_ids(title, dictionary)
titles_ids.append(words_id)
count += 1
def ids_to_words(ids):
words = []
for word_id in ids:
if word_id != 0:
words.append(dictionary[word_id])
return words
def sequence_create(text_ids) :
sequence_np = sequence.pad_sequences([text_ids], maxlen=max_title_len, padding='post')
return sequence_np
try:
with open("titles_words.bin", "rb") as f:
titles_words = pickle.load(f)
with open("dictionary.bin", "rb") as f:
dictionary = pickle.load(f)
with open("titles_ids.bin", "rb") as f:
titles_ids = pickle.load(f)
except Exception as e:
dictionary_create()
with open("titles_words.bin", "wb") as f:
pickle.dump(titles_words, f)
with open("dictionary.bin", "wb") as f:
pickle.dump(dictionary, f)
with open("titles_ids.bin", "wb") as f:
pickle.dump(titles_ids, f)
max_title_len = max(len(title_ids) for title_ids in titles_ids)
# print(max_title_len)
titles_ids_np = sequence.pad_sequences(titles_ids, maxlen=max_title_len, padding='post')
# print(titles_ids_np)
prices_np = np.array([[price] for price in data_load("price")])
# print(prices_np)
index = [i for i in range(len(titles_ids_np))]
random.shuffle(index)
train_len = int(len(index) * 0.9)
train_index = index[:train_len]
test_index = index[train_len:]
# print(len(titles_ids_np))
# print(len(train_index))
# print(len(test_index))
X_train = titles_ids_np[train_index]
X_test = titles_ids_np[test_index]
scaler = MinMaxScaler() # StandardScaler()
scaler.fit(prices_np)
y_scaled = scaler.transform(prices_np)
y_train_scaled = y_scaled[train_index]
y_test_scaled = y_scaled[test_index]
# print(prices_np)
# print(y_scaled)
vocab_size = len(dictionary)
model_name = "baseline_model_data_add.h5"
def model_create() :
model = keras.Sequential([
layers.Embedding(vocab_size, 64),
layers.Bidirectional(layers.LSTM(64, return_sequences=True)),
layers.Bidirectional(layers.LSTM(32)),
layers.Dense(64, activation='relu'),
layers.Dropout(0.5),
layers.Dense(1)
])
model.summary()
model.compile(loss=losses.MeanSquaredError(), optimizer=optimizers.Adam(1e-4), metrics=['mae'])
history = model.fit(X_train, y_train_scaled, epochs=10, validation_data=(X_test, y_test_scaled),
validation_steps=30, verbose=1)
model.save(model_name)
try:
model = models.load_model(model_name)
except Exception as e:
print(e)
model_create()
def plot_graphs(history, metric):
plt.plot(history.history[metric])
plt.plot(history.history['val_' + metric], '')
plt.xlabel("Epochs")
plt.ylabel(metric)
plt.legend([metric, 'val_' + metric])
plt.show()
# plot_graphs(history, 'mae')
# plot_graphs(history, 'loss')
# price_predictions = model.predict(X_test)
#
# y_test_inverse = scaler.inverse_transform(y_test_scaled)
# price_predictions_inverse = scaler.inverse_transform(price_predictions)
# for i in range(100):
# print(f"{i}: {ids_to_words(X_test[i])}")
# print(f"{i}: {y_test_inverse[i]} = {price_predictions_inverse[i]}")
# print()
# print(ids_to_words(X_test[5]))
def predict_phone(text) :
text_words = tokenizer_create(text)
print(text_words)
text_ids = words_to_ids(text_words, dictionary)
text_ids_np = sequence_create(text_ids)
text_predictions = model.predict(text_ids_np)
text_predictions_inverse = scaler.inverse_transform(text_predictions)
print(text_predictions_inverse)
return text_predictions_inverse
test1 = "아이폰SE 2세대 128G 59만원에 판매합니다"
test2 = "갤럭시S9 256G 29만원 판매합니다"
predict_phone(test1)
predict_phone(test2)
```
|
{
"source": "jgj9883/Unsupervised-learning",
"score": 3
}
|
#### File: Unsupervised-learning/DataAnalysis/Make the code.py
```python
import pandas as pd
INPUT_PREFIX = './data/TCS_영업소간통행시간_1시간_1개월_2020'
OUTPUT_PREFIX = './data/data_2020'
OUTPUT_EXTENSION = '.csv'
output_dataframes = []
def generateData(month):
month_string = str(month)
length_month_string = len(month_string)
if length_month_string == 1:
month_string = '0' + month_string
input_file = INPUT_PREFIX + month_string
output_file = OUTPUT_PREFIX + month_string + OUTPUT_EXTENSION
print('INPUT :', input_file, ' OUTPUT : ', output_file)
data = pd.read_csv(input_file, sep=",", encoding="euc-kr")
data_clean = data.drop(['Unnamed: 6'], axis='columns')
data_clean = data_clean[data_clean.통행시간 > 0]
df_data = pd.DataFrame(data_clean, columns=['집계일자', '집계시', '출발영업소코드', '도착영업소코드', '통행시간'])
start_from_101 = df_data[df_data.출발영업소코드 == 101]
start_from_101_to_140 = start_from_101[start_from_101['도착영업소코드'].isin([105, 110, 115, 120, 125, 130, 135, 140])]
start_from_101_to_140 = start_from_101_to_140.assign(
요일=pd.to_datetime(start_from_101_to_140['집계일자'], format='%Y%m%d').dt.dayofweek)
start_from_101_to_140.sort_values(by=['집계일자', '집계시'])
start_from_101_to_140.to_csv(output_file, index=None, header=True)
output_dataframes.append(start_from_101_to_140)
for month in range(1, 13):
generateData(month)
output_data = pd.concat(output_dataframes, ignore_index=True, sort=False)
final = OUTPUT_PREFIX + OUTPUT_EXTENSION
output_data.to_csv(final, index=None, header=True)
```
|
{
"source": "jgj98/PuppetGan",
"score": 3
}
|
#### File: jgj98/PuppetGan/syn_weiz_data.py
```python
from glob import glob
from random import shuffle
import cv2
import numpy as np
import subprocess as sp
SIZE = 64 # Size of the real images
def synthPrep(pth):
"""
Prepares synthetic data for training
args:
pth: path to synthetic image
"""
img = cv2.imread(pth)
img = cv2.resize(img, (SIZE, SIZE*3), interpolation = cv2.INTER_AREA)
board = 255*np.ones((SIZE*3, SIZE*3, 3), dtype=np.uint8)
board[:, :SIZE, :] = img
return board
def makeSynGrid(pths, out):
"""
Prepares synthetic data for evaluation in a GRID like in the paper
args:
pths: paths to the synthetic images
out: Folder to store the dataset
"""
for i in range(0, 1000, 10):
imgs = [cv2.imread(pths[k]) for k in range(i, i+10)]
imgs = [cv2.resize(i, (SIZE, SIZE*3), interpolation = cv2.INTER_AREA) for i in imgs]
grid = 255*np.ones((SIZE*10, SIZE*10, 3), dtype=np.uint8)
for j in range(10):
grid[SIZE*j:SIZE*(j+1), 0:SIZE, :] = imgs[j][SIZE:SIZE*2, 0:SIZE, :]
cv2.imwrite(out+f"image_{i}.png", grid)
pths_r = glob('./data/sample/weizman_c/*png') # Glob path to real images
pths_syn = glob('./data/sample/synaction/*.png') # Glob path to synthetic images
shuffle(pths_r), shuffle(pths_syn)
out = './data/syn_weiz_6/' # Output folder to store the dataset
sp.run(['mkdir', '-p', out+'real_/real'])
for pth in pths_r[:-10]:
file = out+'real_/real/'+pth.split('/')[-1]
img = cv2.imread(pth)
cv2.imwrite(file, img)
sp.run(['mkdir', '-p', out+'synth_/synth'])
for pth in pths_syn[:-1000]:
file = out+'synth_/synth/'+pth.split('/')[-1]
img = synthPrep(pth)
cv2.imwrite(file, img)
sp.run(['mkdir', '-p', out+'rows_'])
sp.run(['mkdir', '-p', out+'rows_/real'])
for pth in pths_r[-10:]:
file = out+'rows_/real/'+pth.split('/')[-1]
img = cv2.imread(pth)
cv2.imwrite(file, img)
sp.run(['mkdir', '-p', out+'rows_/synth'])
makeSynGrid(pths_syn[-1000:], out+'rows_/synth/')
```
|
{
"source": "jgjudd/python_exercises_2",
"score": 4
}
|
#### File: jgjudd/python_exercises_2/ex19.py
```python
def cheese_and_crackers(cheese_count, boxes_of_crackers):
print(f"You have {cheese_count} cheeses")
print(f"You have {boxes_of_crackers} boxes of crackers")
print("That's a lot")
print("Get a blanket\n")
print("We can just give the function numbers directly")
cheese_and_crackers(20, 30)
print("Or, we can use variables from our script")
amount_of_cheese = 10
amount_of_crackers = 50
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
print("We can even do math inside too: ")
cheese_and_crackers(10 + 20, 5 + 6)
print("And we can combine the two, variables and math: ")
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
```
|
{
"source": "jgkawell/feedback_cclfd",
"score": 3
}
|
#### File: feedback_cclfd/scripts/teleop.py
```python
import os
import sys
import rospy
import numpy as np
from std_msgs.msg import Bool
""" This class allows for manual triggering of classification """
class teleop():
def __init__(self):
rospy.init_node('teleop', anonymous=True)
# initializing publisher
self.pub_face = rospy.Publisher(
'/classifiers/face', Bool, queue_size=10)
self.pub_motion = rospy.Publisher(
'/classifiers/motion', Bool, queue_size=10)
def main(self):
# Request input from user
rospy.loginfo("TELEOP: Press [enter] to trigger" +
" negative classification...")
raw_input().upper()
rospy.loginfo("TELEOP: Trigging negative classification!")
# Publish negative classifications
self.pub_face.publish(False)
self.pub_motion.publish(False)
# Exit cleanly
sys.exit(0)
if __name__ == '__main__':
try:
obj = teleop()
obj.main()
rospy.spin()
except rospy.ROSInterruptException:
pass
```
#### File: src/classifiers/synthesizer.py
```python
import rospy
from std_msgs.msg import String
from std_msgs.msg import Bool
from feedback_cclfd.msg import Classification
""" This class synthesizes the various classifications of human
affect/motion to see if the behavior is negative """
class synthesizer():
def __init__(self):
self.face = True
self.motion = True
# initialize pub/sub
rospy.init_node('synthesizer', anonymous=True)
rospy.Subscriber("/classifiers/face", Bool, self.callback_face)
rospy.Subscriber("/classifiers/motion", Bool, self.callback_motion)
self.synthesis_pub = rospy.Publisher(
'/classifiers/synthesis', Classification, queue_size=10)
rospy.loginfo("SYNTHESIZER: Starting...")
def main(self):
# loop checking the labels sent in by the other classifiers
triggered = False
msg = Classification()
while not rospy.is_shutdown():
# TODO: Replace this with a weighted sum based on confidence levels
# if the face is negative or the motion is negative, trigger repair
if (not self.face or not self.motion) and not triggered:
rospy.loginfo("SYNTHESIZER: Recognized a negative response!")
msg.timestamp = rospy.Time.now()
msg.classification = False
self.synthesis_pub.publish(msg)
triggered = True
def callback_face(self, msg):
self.face = msg.data
def callback_motion(self, msg):
self.motion = msg.data
if __name__ == '__main__':
try:
obj = synthesizer()
obj.main()
rospy.spin()
except rospy.ROSInterruptException:
pass
```
#### File: src/planners/constraint_update.py
```python
import rospy
from feedback_cclfd.srv import Constraint
from cairo_lfd_msgs.msg import NodeTime
from std_msgs.msg import UInt8, String
import json
class ConstraintUpdate():
def __init__(self):
rospy.init_node("constraint_updater")
# subscriber to listen to keyframe transitions
self.node_time_sub = rospy.Subscriber("/lfd/node_time",
NodeTime,
self.node_time_callback)
# subscriber to listen for user trigger for bad skill
# assumes trigger is String('True') until false
self.user_trigger_sub = rospy.Subscriber("user_trigger",
UInt8,
self.user_trigger_callback)
# publisher to publish to constraint update topic in
# cairo-cclfd/controllers/study_controller.FeedbackLfDStudyController
self.update_pub = rospy.Publisher('/cairo_lfd/model_update',
String,
queue_size=10)
# list to hold keyframes that occured durring trigger
# NOTE: needs to be reset in between updates
# self.keyframesUpdate = list(range(20)) # NOTE: for testing
self.keyframesUpdate = []
# var to hold trigger status
self.trigger = True
# callback to listen for keyframes while trigger is true
def node_time_callback(self, data):
if self.trigger:
self.keyframesUpdate.append(int(data.cur_node))
self.keyframesUpdate.append(int(data.next_node))
# callback to listen for trigger
def user_trigger_callback(self, data):
self.trigger = data.data
# function to publish to constraint update
def update_constraints(self):
# need to wait for all keyframes to be collected and for constraint
# to update service call to ask for constraint before updating skill
rospy.wait_for_service('add_constraint')
try:
self.add_constraint = rospy.ServiceProxy(
"add_constraint", Constraint) # constraint ID to be updated
resp = self.add_constraint(1)
except rospy.ServiceException:
rospy.logwarn("Service setup failed (add_constraint)")
update_dict = {}
for keyframe in self.keyframesUpdate:
update_dict[keyframe] = {"applied_constraints": [resp.constraint]}
self.update_pub.publish(json.dumps(update_dict))
# clear stored keyframes and constraint
self.keyframesUpdate = []
if __name__ == '__main__':
test = ConstraintUpdate()
test.update_constraints()
```
#### File: src/planners/demonstrator.py
```python
import rospy
import numpy as np
import json
import time
from std_msgs.msg import String
from std_msgs.msg import Bool
from rospy.numpy_msg import numpy_msg
from feedback_cclfd.srv import RequestFeedback
from feedback_cclfd.srv import PerformDemonstration
from feedback_cclfd.msg import ConstraintTypes
from cairo_nlp.srv import TTS, TTSResponse
""" This class is responsible for sampling constraints and
demonstrating them to the user for feedback. """
class Demonstrator():
def __init__(self):
rospy.init_node('demonstrator')
self.finished_first_demo = False
# start pub/sub
rospy.Subscriber("/planners/constraint_types",
numpy_msg(ConstraintTypes),
self.sample_demonstrations)
self.demos_pub = rospy.Publisher(
"/planners/demonstrations", String, queue_size=10)
# set up client for demonstration service
rospy.wait_for_service("feedback_demonstration")
try:
self.feedback_demonstration = rospy.ServiceProxy(
"feedback_demonstration", PerformDemonstration)
except rospy.ServiceException:
rospy.logwarn("Service setup failed (feedback_demonstration)")
# set up client for feedback service
rospy.wait_for_service("request_feedback")
try:
self.request_feedback = rospy.ServiceProxy(
"request_feedback", RequestFeedback)
except rospy.ServiceException:
rospy.logwarn("Service setup failed (request_feedback)")
# Set up client for NLP TTS service
rospy.wait_for_service("/nlp/google/tts")
try:
self.tts_server = rospy.ServiceProxy(
"/nlp/google/tts", TTS)
except rospy.ServiceException:
rospy.logerr("Service setup failed (/nlp/google/tts)")
rospy.loginfo("DEMONSTRATOR: Starting...")
def run(self):
# perform a bad demo to start
rospy.loginfo("DEMONSTRATOR: Starting first skill execution...")
self.tts_server("I am going to hand you the mug.")
finished = self.feedback_demonstration(0) # 0 = negative
if finished.response:
self.finished_first_demo = True
rospy.spin()
def sample_demonstrations(self, constraint_types):
# run until complete
while True:
# don't perform alternative demos until first is finished
if self.finished_first_demo:
num_demos = 2
rospy.loginfo("DEMONSTRATOR: Sampling demonstrations...")
cur_type = constraint_types.data
results = dict()
for i in range(0, num_demos):
# perform a single demonstration
constraint = i
self.tts_server("I am going to try the skill again.")
finished = self.feedback_demonstration(constraint)
if finished.response:
# request feedback about demonstration from user
feedback_type = constraint == 1
msg = self.request_feedback(feedback_type)
key = i
if msg.response:
rospy.loginfo(
"DEMONSTRATOR: Response was POSITIVE!")
results[key] = 1
else:
rospy.loginfo(
"DEMONSTRATOR: Response was NEGATIVE")
results[key] = 0
# save feedback results
rospy.loginfo("DEMONSTRATOR: Saving feedback...")
encoded_data_string = json.dumps(results)
self.demos_pub.publish(encoded_data_string)
# demonstrate what has been learned
rospy.loginfo("DEMONSTRATOR: Showing what has been learned...")
self.tts_server("Let me show you what I have learned.")
for key, value in results.items():
if value:
constraint = key
self.feedback_demonstration(constraint)
break
break
else:
# wait a second
rospy.loginfo(
"DEMONSTRATOR: Waiting for first demo to be finished...")
time.sleep(1)
self.tts_server("Thank you for helping me learn!")
rospy.loginfo("FINISHED!!!")
if __name__ == '__main__':
try:
obj = Demonstrator()
obj.run()
except rospy.ROSInterruptException:
pass
```
#### File: src/visualizations/joint_test.py
```python
import rospy
import numpy as np
from time import sleep
from math import pi, sqrt, sin, cos
from sensor_msgs.msg import JointState
from std_msgs.msg import Header
from geometry_msgs.msg import PoseArray
from geometry_msgs.msg import Pose
from moveit_msgs.msg import DisplayTrajectory
from moveit_msgs.msg import RobotTrajectory
from trajectory_msgs.msg import JointTrajectoryPoint
from rospy_helpers import origin_pose
def circ_angle(index, N):
return 2*pi*(index/N)
class QTester:
""" Exercise the joints """
def __init__(self, refreshRate=300):
# Start the node
rospy.init_node('QTester')
# Set rate
self.heartBeatHz = refreshRate # ----------- Node refresh rate [Hz]
# Best effort to maintain 'heartBeatHz'
# URL: http://wiki.ros.org/rospy/Overview/Time
self.idle = rospy.Rate(self.heartBeatHz)
# Start publishers
self.pub = rospy.Publisher("/viz/ctrl", JointState, queue_size=10)
self.pubX = rospy.Publisher(
"/viz/pointPlans", PoseArray, queue_size=100)
self.pubQ = rospy.Publisher(
"/move_group/display_planned_path",
DisplayTrajectory,
queue_size=100)
# Init vars
self.initTime = rospy.Time.now().to_sec()
self.qCmd = JointState()
self.qCmd.header = Header()
self.qCmd.header.seq = 0
self.qCmd.header.stamp = rospy.get_rostime()
self.qCmd.name = ['head_pan', 'right_j0', 'right_j1',
'right_j2', 'right_j3', 'right_j4',
'right_j5', 'right_j6']
self.qCmd.position = [0.0 for i in range(len(self.qCmd.name))]
self.qCmd.header.frame_id = 'pedestal'
self.Qspeed = pi / 300.0
# Modes available to command arm
# int32 POSITION_MODE = 1
# int32 VELOCITY_MODE = 2
# int32 TORQUE_MODE = 3
# int32 TRAJECTORY_MODE = 4
rospy.loginfo("JOINT TEST: Init completed!")
def test_planned_paths_X(self):
""" Spam the system with dummy poses """
Z = 0.0
rospy.loginfo("JOINT TEST: Publishing pose plans ...")
for i in range(3):
rospy.loginfo("JOINT TEST: %d", i+1)
Z += 0.100
arr = PoseArray()
N = 100
radius = 0.75
ptsList = [[radius*cos(circ_angle(i, N)), radius *
sin(circ_angle(i, N)), Z] for i in range(N)]
for pnt in ptsList:
pose_i = origin_pose()
pose_i.position.x = pnt[0]
pose_i.position.y = pnt[1]
pose_i.position.z = pnt[2]
arr.poses.append(pose_i)
self.pubX.publish(arr)
rospy.loginfo("JOINT TEST: COMPLETE")
def test_planned_paths_Q(self):
""" Spam the system with dummy configurations """
rospy.loginfo("JOINT TEST: Publishing configuration plans")
starts = [0.0, pi/3.0, 2.5*pi/3.0]
span = 1.50*pi/6.0
N = 100
for i in range(3):
rospy.loginfo("JOINT TEST: %d", i+1)
angles = np.linspace(starts[i], starts[i] + span, N)
traj = DisplayTrajectory()
rTrj = RobotTrajectory()
for angle in angles:
q = [angle for j in range(6)]
t_i = JointTrajectoryPoint()
t_i.positions = q
rTrj.joint_trajectory.points.append(t_i)
sleep(1.0)
traj.trajectory.append(rTrj)
self.pubQ.publish(traj)
rospy.loginfo("JOINT TEST: COMPLETE")
def run(self):
if 0:
sleep(2.0)
self.test_planned_paths_X()
if 1:
sleep(2.0)
self.test_planned_paths_Q()
# While ROS is running
while (not rospy.is_shutdown()):
qNu = [self.qCmd.position[i] +
self.Qspeed for i in range(len(self.qCmd.name))]
self.qCmd.position = list(qNu)
self.qCmd.header.seq += 1
self.qCmd.header.stamp = rospy.get_rostime()
self.pub.publish(self.qCmd)
# Wait until the node is supposed to fire next
self.idle.sleep()
# Post-shutdown activities
else:
rospy.loginfo("Node Shutdown after %d seconds.",
rospy.Time.now().to_sec() - self.initTime)
if __name__ == "__main__":
try:
refreshRateHz = rospy.get_param('graphics_refresh_rate', 60)
obj = QTester(refreshRateHz)
obj.run()
except rospy.ROSInterruptException:
pass
```
#### File: src/visualizations/path_display.py
```python
import rospy
import numpy as np
from time import sleep
from math import pi, sin, cos
from geometry_msgs.msg import PoseArray
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Point
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from rospy_helpers import origin_pose, unpack_ROS_xform, \
RollingList, vec_dif_mag, unpack_ROS_pose
from geometry_msgs.msg import TransformStamped
# ~~ Program Constants ~~
_DEFAULTSCALE = 0.01 # Default width of `LINE_STRIP`
_DISTMARGIN = 0.02 # Limit on how close wrist point history can be together
_HISTORYLIMIT = 50 # -- Max number of points of wrist history to display
_PATHNUMLIMIT = 1 # -- Max number of paths to show on the screen
_GREEN = [0/255.0, 255/255.0, 0/255.0]
_BLUE = [66/255.0, 227/255.0, 255/255.0] # 66, 227, 255
_NAMESPACE = "paths"
class PathDisplay:
def add_Pose_arr(self, msg):
""" Add the path to the list of paths """
nuPath = []
for pose in msg.poses:
posn, ornt = unpack_ROS_pose(pose)
nuPath.append(posn)
self.add_path(nuPath, _BLUE)
def update_history(self, msg):
""" Update the mug with the new xform --> pose """
# Get the position from the message
posn, ornt = unpack_ROS_xform(msg)
dist = vec_dif_mag(posn, self.pathList[0][-1])
# If the next point is sufficient distance from the last
if dist >= _DISTMARGIN:
self.pathList[0].append(posn)
def __init__(self, refreshRate=300):
# Start the node
rospy.init_node('PathDisplay')
# Set rate
self.heartBeatHz = refreshRate # ----------- Node refresh rate [Hz]
# Best effort to maintain 'heartBeatHz'
# URL: http://wiki.ros.org/rospy/Overview/Time
self.idle = rospy.Rate(self.heartBeatHz)
# Start subscribers and listeners
rospy.Subscriber("/viz/pointPlans", PoseArray, self.add_Pose_arr)
rospy.Subscriber("/viz/wristXform", TransformStamped,
self.update_history)
# Start publishers
self.pubSngl = rospy.Publisher("/viz/markers", Marker, queue_size=100)
self.pubMany = rospy.Publisher(
"/viz/mrkr_arr", MarkerArray, queue_size=100)
# Init vars
self.initTime = rospy.Time.now().to_sec()
# NOTE: The first element is ALWAYS the path traced by the wrist
self.pathList = [RollingList(_HISTORYLIMIT, initVal=[0, 0, 0])]
self.colrList = [_GREEN]
self.mrkrArr = MarkerArray()
self.mrkrDex = 0
self.hasRun = False
def get_next_index(self):
self.mrkrDex += 1
return self.mrkrDex
def add_path(self, path, color=_GREEN):
""" Add a list of points to the list of paths """
# Add the path and the color
self.pathList.append(path)
self.colrList.append(color)
# If the number of paths have been exceeded
# then erase the first static path (index 1)
if (len(self.pathList) - 1) > _PATHNUMLIMIT:
self.pathList.pop(1)
self.colrList.pop(1)
def del_path(self, index):
""" Remove path from list and the marker array
NOTE: Static path indices begin at 1 """
if index > 0:
self.pathList.pop(index)
self.colrList.pop(index)
def create_line_marker(self, ptsList,
scale=_DEFAULTSCALE, color=_GREEN,
mrkrNS=_NAMESPACE):
""" Return a marker composed of the points """
# Create marker
trace = Marker()
# Populate header
trace.header.stamp = rospy.Time.now()
trace.header.frame_id = "base"
# Set marker info for a persistent marker
trace.ns = mrkrNS
trace.action = trace.ADD
trace.type = trace.LINE_STRIP
trace.id = 200 + self.get_next_index()
# How long the object should last before being automatically deleted.
# 0 means forever
trace.lifetime = rospy.Duration(0)
# Set marker size
# Line strips: Only scale.x is used and it controls
# the width of the line segments.
trace.scale.x = scale
# Set marker color
trace.color.a = 1.0
trace.color.r = color[0]
trace.color.g = color[1]
trace.color.b = color[2]
# Set the marker pose
trace.pose = origin_pose()
# Build the points list
for pnt in ptsList:
mrkrPnt = Point()
mrkrPnt.x = pnt[0]
mrkrPnt.y = pnt[1]
mrkrPnt.z = pnt[2]
trace.points.append(mrkrPnt)
# Return marker
return trace
def publish_all(self):
self.mrkrArr.markers = []
for pDex, path in enumerate(self.pathList):
temp = self.create_line_marker(path, color=self.colrList[pDex])
temp.id = pDex
self.mrkrArr.markers.append(temp)
self.pubMany.publish(self.mrkrArr)
def test_lines(self):
""" Spam some lines to the screen """
rospy.loginfo("PATH DISPLAY: Running the test function")
def circ_angle(index, N):
return 2*pi*(index/N)
N = 100
radius = 0.75
ptsList = [[radius*cos(circ_angle(i, N)), radius *
sin(circ_angle(i, N)), 0.0] for i in range(N)]
mrkr = self.create_line_marker(ptsList, mrkrNS="test")
for i in range(5):
self.pubSngl.publish(mrkr)
sleep(0.1)
def run(self):
""" Publish all of the currently stored paths """
# While ROS is running
while (not rospy.is_shutdown()):
if 0 and (not self.hasRun):
self.test_lines()
self.hasRun = True
self.publish_all()
# Wait until the node is supposed to fire next
self.idle.sleep()
# Post-shutdown activities
else:
rospy.loginfo("Node Shutdown after %d seconds.",
rospy.Time.now().to_sec() - self.initTime)
if __name__ == "__main__":
try:
refreshRateHz = rospy.get_param('graphics_refresh_rate', 60)
obj = PathDisplay(refreshRateHz)
obj.run()
except rospy.ROSInterruptException:
pass
```
|
{
"source": "jgke/haapa",
"score": 3
}
|
#### File: haapa/docs/docs.py
```python
from jinja2 import Environment, PackageLoader, FileSystemLoader
from jinja2.exceptions import UndefinedError
from markdown import markdown
from datetime import datetime
import yaml
import argparse
import traceback
def markdown_filter(value, *args, **kwargs):
return markdown(value, *args, **kwargs)
def render(notifier):
data = None
print("Rendering")
with open("CONFIGURATION.yaml") as fp:
data = yaml.safe_load(fp.read())
data["time"] = datetime.utcnow()
with open("docs.html", "w") as fp:
env = Environment(loader=FileSystemLoader("."))
env.filters["markdown"] = markdown_filter
try:
res = env.get_template("template.html").render(data)
fp.write(res)
except UndefinedError:
traceback.print_exc()
def main():
parser = argparse.ArgumentParser(description='Generate documentation.')
parser.add_argument('--watch', action='store_true',
help="Use pyinotify to watch for changes")
args = parser.parse_args()
if args.watch:
import pyinotify
import functools
wm = pyinotify.WatchManager()
notifier = pyinotify.Notifier(wm)
wm.add_watch('CONFIGURATION.yaml', pyinotify.IN_CLOSE_WRITE)
wm.add_watch('template.html', pyinotify.IN_CLOSE_WRITE)
on_loop_func = functools.partial(render)
notifier.loop(daemonize=False, callback=on_loop_func)
else:
render(None)
if __name__ == "__main__":
main()
```
|
{
"source": "jgkelly/FreezerState",
"score": 2
}
|
#### File: freezerstate/notifiers/slack.py
```python
import freezerstate.config
import requests
class SlackSender():
def __init__(self, test_enabled=None, test_slack_server=None):
self.module = '[SLACK]'
self.enabled = freezerstate.CONFIG.SLACK_ENABLED if test_enabled is None else test_enabled
self.webhook_url = freezerstate.CONFIG.SLACK_WEBHOOK_URL if test_slack_server is None else test_slack_server
def notify(self, message):
payload = {
"text": message
}
return self.notify_extended(payload)
def notify_extended(self, payload):
if (self.enabled is False):
print(f'{self.module} - Slack Sender is disabled')
return False
try:
requests.post(self.webhook_url, json=payload, verify=True)
except Exception as e:
print(f'{self.module} - Slack notify failed: {e}')
return False
return True
```
#### File: jgkelly/FreezerState/freezerstate.py
```python
import os
import io
import sys
import time
import threading
import argparse
import numpy
import humanize
from datetime import datetime
from os import listdir, system
from flask import Flask, jsonify, render_template, make_response
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import freezerstate.config
import freezerstate.conversion
system('modprobe w1-gpio')
system('modprobe w1-therm')
app = Flask('freezerstate')
@app.route("/")
def index():
uptime_delta = datetime.now() - freezerstate.START_TIME
uptime_string = humanize.naturaldelta(uptime_delta.total_seconds())
template_data = {
'location': freezerstate.CONFIG.LOCATION,
'time': freezerstate.GRAPH.last_time(),
'temperature': freezerstate.GRAPH.last_temp(),
'units': freezerstate.CONVERSION.UnitString(),
'min_temperature': freezerstate.CONVERSION.UnitizedTemperature(freezerstate.RANGE_MIN),
'max_temperature': freezerstate.CONVERSION.UnitizedTemperature(freezerstate.RANGE_MAX),
'uptime' : uptime_string
}
return render_template('index.html', **template_data)
@app.route('/plot/temp')
def plot_temp():
self_lock = threading.Lock()
with self_lock:
fig = Figure(figsize=(10, 8), dpi=100)
ys = numpy.array(freezerstate.GRAPH.temperatures())
axis = fig.add_subplot(1, 1, 1)
axis.set_title(f'{freezerstate.CONFIG.LOCATION} Temperature History')
axis.set_ylabel(
f'Temperature ({freezerstate.CONVERSION.UnitString()})')
axis.set_xlabel('Time')
axis.grid(True)
xs = numpy.array(freezerstate.GRAPH.times())
axis.plot(xs, ys)
canvas = FigureCanvas(fig)
output = io.BytesIO()
canvas.print_png(output)
response = make_response(output.getvalue())
response.mimetype = 'image/png'
return response
def raw_temperature():
f = open(find_sensor(), 'r')
lines = f.readlines()
f.close()
return lines
def find_sensor():
devices = listdir(freezerstate.DEVICE_FOLDER)
devices = [device for device in devices if device.startswith('28-')]
if devices:
return freezerstate.DEVICE_FOLDER + devices[0] + freezerstate.DEVICE_SUFFIX
else:
sys.exit("Could not find temperature sensor...")
def get_temperature():
lines = raw_temperature()
while not lines and len(lines) < 2 and lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = raw_temperature()
output_temperature = lines[1].split('t=')[1]
if output_temperature != 1:
celsius = round(float(output_temperature) / 1000.0, 1)
return celsius
def main_thread(name):
if hasattr(sys, 'frozen'):
freezerstate.FULL_PATH = os.path.abspath(sys.executable)
else:
freezerstate.FULL_PATH = os.path.abspath(__file__)
freezerstate.PROG_DIR = os.path.dirname(freezerstate.FULL_PATH)
freezerstate.ARGS = sys.argv[1:]
parser = argparse.ArgumentParser(
description='Temperature monitor and alerter')
parser.add_argument('--datadir', help='Alternate data directory')
parser.add_argument('--config', help='Alternate path to config file')
args = parser.parse_args()
if args.datadir:
freezerstate.DATA_DIR = args.datadir
else:
freezerstate.DATA_DIR = freezerstate.PROG_DIR
if args.config:
freezerstate.CONFIG_FILE = args.config
else:
freezerstate.CONFIG_FILE = os.path.join(
freezerstate.DATA_DIR, 'config.ini')
print(f'Loading configuration from: {freezerstate.CONFIG_FILE}')
freezerstate.initialize(freezerstate.CONFIG_FILE)
print('Monitoring temperature...')
freezerstate.NOTIFY.send_startup_message()
while True:
temperature = get_temperature()
print(
f'Time: {datetime.now()} : {freezerstate.CONVERSION.TemperatureString(temperature, True)}')
freezerstate.GRAPH.plot(
datetime.now(), freezerstate.CONVERSION.UnitizedTemperature(temperature))
freezerstate.NOTIFY.update(temperature)
time.sleep(freezerstate.CONFIG.SAMPLE_FREQUENCY)
return
if __name__ == "__main__":
x = threading.Thread(target=main_thread, args=(1,))
x.start()
app.run(host='0.0.0.0', port=80, debug=False)
```
#### File: FreezerState/freezerstate/statusupdate.py
```python
import freezerstate.config
import time
import datetime
class StatusUpdate:
def __init__(self, test_enabled=None, test_times=None):
self.module = '[StatusUpdate]'
self.notification_times = []
notify_times = freezerstate.CONFIG.STATUS_CHECK_TIMES if test_enabled is None else test_times
self.load_times(notify_times)
def should_notify(self, time_value):
test_time = time_value
if (type(time_value) == datetime.datetime):
# time(hour = time_value.hour, minute = time_value.minute)
test_time = time_value.time()
test_text = test_time.strftime('%H:%M')
for x in self.notification_times:
if x.tm_hour == time_value.hour and x.tm_min == time_value.minute:
return True
return False
def load_times(self, times):
if times is None:
return
time_list = times.split(',')
if len(time_list) == 0:
return
for x in time_list:
try:
note_time = time.strptime(x, '%H:%M')
self.notification_times.append(note_time)
except ValueError as ve:
print(f'Time value: {x} is not a valid time - Ignoring')
```
#### File: freezerstate/updaters/timedstatus.py
```python
import freezerstate.config
import freezerstate.statusupdate
import freezerstate.notifiers.notifiers
import humanize
from datetime import datetime
class TimedStatus():
def __init__(self):
self.module = '[TimedStatus]'
self.alert_frequency = freezerstate.CONFIG.ALERT_FREQUENCY
self.notifiers = freezerstate.notifiers.notifiers.Notifiers()
self.last_notify = datetime.min
self.status_update_times = freezerstate.statusupdate.StatusUpdate()
self.location = freezerstate.CONFIG.LOCATION
def update(self, temperature, current_time):
if self.status_update_times.should_notify(current_time):
self.send_status_update(temperature, current_time)
return False
def send_status_update(self, temperature, current_time):
difference = current_time - self.last_notify
if (difference.total_seconds() <= 60):
print(
f'--- It has been {difference.total_seconds()} seconds since last status update. Skipping status update')
return False
timestring = current_time.strftime(
freezerstate.CONFIG.DATE_TIME_STAMP_FORMAT)
uptime_diff = current_time - freezerstate.START_TIME
uptime = uptime_diff.total_seconds()
uptime_readable = humanize.time.precisedelta(uptime)
message = f'*{self.location}* status update.\n🌡 {freezerstate.CONVERSION.TemperatureString(temperature, True)}\n⏰ {timestring}\n💻 Uptime: {uptime_readable}'
print(f'--- {current_time}: Sending uptime notification')
self.last_notify = current_time
self.notifiers.notify(message)
```
|
{
"source": "jgke/tmc.py",
"score": 2
}
|
#### File: jgke/tmc.py/setup.py
```python
from setuptools import setup
import os
import sys
if sys.version_info < (3, 2, 0):
raise Exception("Only python 3.2+ is supported.")
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
exec(read('tmc/version.py'))
setup(
name='tmc',
version=__version__,
description='TestMyCode client',
long_description=read("README.rst"),
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/JuhaniImberg/tmc.py/',
license='MIT',
packages=['tmc', 'tmc.exercise_tests', 'tmc.tests', 'tmc.ui'],
entry_points={
'console_scripts': [
'tmc = tmc.__main__:main',
'tmc3 = tmc.__main__:main'
]
},
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Environment :: Console :: Curses",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4"
],
install_requires=[
"requests == 2.5.1",
"argh == 0.26.1",
"peewee == 2.4.5"
],
)
```
#### File: tmc.py/tmc/config.py
```python
import os
from os import path, environ
from configparser import ConfigParser
from collections import OrderedDict
class Config(object):
"""
This class will take care of ConfigParser and writing / reading the
configuration.
TODO: What to do when there are more variables to be configured? Should we
overwrite the users config file with the updated variables if the file is
lacking?
"""
config = None
filename = ""
defaults = None
def __init__(self):
default_path = path.join(path.expanduser("~"), ".config", "tmc.ini")
config_filepath = environ.get("TMC_CONFIGFILE", default_path)
super().__setattr__('filename', config_filepath)
super().__setattr__('config', ConfigParser())
self._update_defaults()
self.config["CONFIGURATION"] = {}
for i in self.defaults:
self.config["CONFIGURATION"][i] = str(self.defaults[i])
if self._exists():
self._load()
self._write()
def _update_defaults(self):
defaults = OrderedDict()
if os.name == "nt":
defaults["use_unicode_characters"] = False
defaults["use_ansi_colors"] = False
else:
defaults["use_unicode_characters"] = True
defaults["use_ansi_colors"] = True
defaults["tests_show_trace"] = False
defaults["tests_show_partial_trace"] = False
defaults["tests_show_time"] = True
defaults["tests_show_successful"] = True
super().__setattr__('defaults', defaults)
def _exists(self):
return path.isfile(self.filename)
def _write(self):
d = os.path.dirname(self.filename)
if not os.path.exists(d):
os.makedirs(d)
with open(self.filename, "w") as fp:
self.config.write(fp)
def _load(self):
with open(self.filename, "r") as fp:
self.config.read_file(fp)
for i in self.config["CONFIGURATION"]:
if i not in self.defaults:
print("Warning: unknown configuration option: " + i)
def __getattr__(self, name):
if isinstance(self.defaults.get(name), bool):
return self.config["CONFIGURATION"].getboolean(name)
return self.config["CONFIGURATION"].get(name)
def __setattr__(self, name, value):
self.config["CONFIGURATION"][name] = str(value)
```
#### File: tmc/tests/__init__.py
```python
import os
import sys
from os import path
from tmc.__main__ import run_command
from tmc.errors import TMCExit
from tmc.version import __version__ as version
sys.path.append(os.getcwd())
from testsetup import (username, server_uri, course_id, exercise_id,
fail_file, fail_compile_file, success_file)
username = os.getenv("TMC_USERNAME", username)
password = os.getenv("TMC_PASSWORD", "")
server_uri = os.getenv("TMC_URI", server_uri)
course_id = os.getenv("TMC_CDI", course_id)
exercise_id = os.getenv("TMC_EID", exercise_id)
def test_version():
"""
Prints the version correctly
"""
stdout, _, _ = run_command("version")
assert "tmc.py version {}".format(version) in stdout
def test_reset():
"""
Database resetting works
"""
import tmc.ui.prompt
tmc.ui.prompt.input = lambda _: "y"
stdout, _, _ = run_command("reset")
assert "Database resetted." in stdout
tmc.ui.prompt.input = lambda _: "n"
stdout, _, _ = run_command("reset")
assert "Database resetted." not in stdout
def test_configure():
"""
Configuring works
"""
_, _, ex = run_command(
["configure", "-u", username, "-p", password, "-s", server_uri,
"-i", course_id, "--auto"]
)
assert ex is None
def test_next():
"""
Next works
"""
_, _, ex = run_command("next")
assert ex is None
def test_previous():
"""
Previous works
"""
os.environ["TMC_TESTING"] = "1"
_, _, ex = run_command("previous")
assert ex is None
_, _, ex = run_command("previous")
assert ex is not None
def test_select():
"""
Selecting works
"""
_, _, ex = run_command(["select", "-i", exercise_id])
assert ex is None
def test_download_single():
"""
Downloading works
"""
_, _, ex = run_command(["download", "-f", "-i", exercise_id])
assert ex is None
from tmc.models import Exercise
assert Exercise.get_selected().is_downloaded == True
def test_test_fail():
"""
Testing can fail
"""
from tmc.models import Exercise
fpath = path.join(Exercise.get_selected().path(), "src", "Nimi.java")
with open(fpath, "w") as f:
f.write(fail_file)
os.environ["TMC_TESTING"] = "1"
wasexit = False
stdout, stderr, exception = run_command("test")
if type(exception) == TMCExit:
wasexit = True
assert wasexit == True
assert "Results:" in stdout
assert "\033[31m" in stderr and "\033[0m" in stderr
def test_compile_fail():
"""
Compile can fail
"""
from tmc.models import Exercise
fpath = path.join(Exercise.get_selected().path(), "src", "Nimi.java")
with open(fpath, "w") as f:
f.write(fail_compile_file)
os.environ["TMC_TESTING"] = "1"
wasexit = False
stdout, stderr, exception = run_command("test")
if type(exception) == TMCExit:
wasexit = True
assert wasexit == True
assert "Results:" in stdout
assert "\033[31m" in stderr and "\033[0m" in stderr
def test_test_success():
"""
Testing can succeed
"""
from tmc.models import Exercise
fpath = path.join(Exercise.get_selected().path(), "src", "Nimi.java")
with open(fpath, "w") as f:
f.write(success_file)
os.environ["TMC_TESTING"] = "1"
wasexit = False
stdout, stderr, exception = run_command("test")
if type(exception) == TMCExit:
wasexit = True
assert wasexit == False
assert "Results:" in stdout
assert "\033[32m" in stdout and "\033[0m" in stdout
assert len(stderr) == 0
def test_submit_fail():
"""
Submitted exercise can fail
"""
from tmc.models import Exercise
fpath = path.join(Exercise.get_selected().path(), "src", "Nimi.java")
with open(fpath, "w") as f:
f.write(fail_file)
os.environ["TMC_TESTING"] = "1"
wasexit = False
stdout, stderr, exception = run_command("submit")
if type(exception) == TMCExit:
wasexit = True
assert wasexit == True
assert "Results:" in stdout
uri = os.getenv("TMC_URI", server_uri)
assert "Submission URL: " + uri + "submissions/" in stdout
assert "Pastebin: " + uri + "paste/" not in stdout
assert "Requested a review" not in stdout
assert "\033[31m" in stderr and "\033[0m" in stderr
def test_submit_success():
"""
Submitted exercise can succeed
"""
from tmc.models import Exercise
fpath = path.join(Exercise.get_selected().path(), "src", "Nimi.java")
with open(fpath, "w") as f:
f.write(success_file)
os.environ["TMC_TESTING"] = "1"
wasexit = False
stdout, stderr, exception = run_command(["submit", "-p", "-r"])
if type(exception) == TMCExit:
wasexit = True
assert wasexit == False
assert "Results:" in stdout
assert "Points [1]" in stdout
assert "Requested a review" in stdout
uri = os.getenv("TMC_URI", server_uri)
assert "Submission URL: " + uri + "submissions/" in stdout
assert "Pastebin: " + uri + "paste/" in stdout
assert len(stderr) == 0
```
|
{
"source": "jgkim999/csharp-example",
"score": 2
}
|
#### File: csharp-example/PythonTest1/Test.py
```python
import clr
clr.AddReference("System")
clr.AddReferenceToFile("IronPython.Modules.dll")
login_token = ''
def set_login_token(token):
global login_token
login_token = token
def make_login_id():
login_id = 'test'
login_id += str(proxy.Random(1, 100))
return login_id
def HelloWorld():
data = 'Hello World C# sdfadsfsdf'
return data
def HelloWorld2(data):
return data
def HelloWorld3():
proxy.ShowMessage('called sdfsdlfksdklflksdklflk proxy.ShowMessage')
def ListTest():
data = []
data.append('Hello')
data.append('World')
data.append('black falcon')
return data
class MyClass(object):
def __init__(self, value):
self.value = value
class Calculator:
def add(self, argA, argB):
return argA+argB
def sub(self, argA, argB):
return argA-argB
```
|
{
"source": "jgkwak95/AU-GAN",
"score": 3
}
|
#### File: jgkwak95/AU-GAN/utils.py
```python
import scipy.misc
import numpy as np
import copy
import os
class ImagePool(object):
def __init__(self, maxsize=50):
self.maxsize = maxsize
self.num_img = 0
self.images = []
def __call__(self, image):
if self.maxsize <= 0:
return image
if self.num_img < self.maxsize:
self.images.append(image)
self.num_img += 1
return image
if np.random.rand() > 0.5:
idx = int(np.random.rand()*self.maxsize)
tmp1 = copy.copy(self.images[idx])[0]
self.images[idx][0] = image[0]
idx = int(np.random.rand()*self.maxsize)
tmp2 = copy.copy(self.images[idx])[1]
self.images[idx][1] = image[1]
return [tmp1, tmp2]
else:
return image
def load_test_data(image_path, fine_size=256):
img = imread(image_path)
img = scipy.misc.imresize(img, [fine_size, fine_size*2])
img = img/127.5 - 1
return img
def check_folder(path):
if not os.path.exists(path):
os.mkdir(path)
def load_train_data(image_path, load_size=286, fine_size=256, is_testing=False):
img_A = imread(image_path[0])
img_B = imread(image_path[1])
if not is_testing:
img_A = scipy.misc.imresize(img_A, [load_size, load_size*2])
img_B = scipy.misc.imresize(img_B, [load_size, load_size*2])
h1 = int(np.ceil(np.random.uniform(1e-2, load_size-fine_size)))
w1 = int(np.ceil(np.random.uniform(1e-2, (load_size-fine_size)*2)))
img_A = img_A[h1:h1+fine_size, w1:w1+fine_size*2]
img_B = img_B[h1:h1+fine_size, w1:w1+fine_size*2]
if np.random.random() > 0.5:
img_A = np.fliplr(img_A)
img_B = np.fliplr(img_B)
else:
img_A = scipy.misc.imresize(img_A, [fine_size, fine_size*2])
img_B = scipy.misc.imresize(img_B, [fine_size, fine_size*2])
img_A = img_A/127.5 - 1.
img_B = img_B/127.5 - 1.
img_AB = np.concatenate((img_A, img_B), axis=2)
return img_AB
# -----------------------------
def get_image(image_path, image_size, is_crop=True, resize_w=64, is_grayscale = False):
return transform(imread(image_path, is_grayscale), image_size, is_crop, resize_w)
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imread(path, is_grayscale = False):
if (is_grayscale):
return scipy.misc.imread(path, flatten = True).astype(np.float)
else:
return scipy.misc.imread(path, mode='RGB').astype(np.float)
def merge_images(images, size):
return inverse_transform(images)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j*h:j*h+h, i*w:i*w+w, :] = image
return img
def imsave(images, size, path):
return scipy.misc.imsave(path, merge(images, size))
def center_crop(x, crop_h, crop_w,
resize_h=64, resize_w=64):
if crop_w is None:
crop_w = crop_h
h, w = x.shape[:2]
j = int(round((h - crop_h)/2.))
i = int(round((w - crop_w)/2.))
return scipy.misc.imresize(
x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w])
def transform(image, npx=64, is_crop=True, resize_w=64):
# npx : # of pixels width/height of image
if is_crop:
cropped_image = center_crop(image, npx, resize_w=resize_w)
else:
cropped_image = image
return np.array(cropped_image)/127.5 - 1.
def inverse_transform(images):
return (images+1.)/2.
def norm_img(img):
img = img / np.linalg.norm(img)
img = (img * 2.) - 1.
return img
def set_path(args, experiment_name):
args.checkpoint_dir = f'./check/{experiment_name}'
args.sample_dir = f'./check/{experiment_name}/sample'
if args.which_direction == 'AtoB':
args.test_dir = f'./check/{experiment_name}/testa2b'
else:
args.test_dir = f'./check/{experiment_name}/testb2a'
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
if not os.path.exists(args.sample_dir):
os.makedirs(args.sample_dir)
if not os.path.exists(args.test_dir):
os.makedirs(args.test_dir)
```
|
{
"source": "jglaser/affinity_pred",
"score": 2
}
|
#### File: affinity_pred/affinity_pred/infer_mpi.py
```python
from mpi4py import MPI
from mpi4py.futures import MPICommExecutor
import torch
import transformers
from transformers import AutoModelForSequenceClassification, BertModel, RobertaModel, BertTokenizerFast, RobertaTokenizer
from transformers import PreTrainedModel, BertConfig, RobertaConfig
from transformers import Trainer, TrainingArguments
from transformers.data.data_collator import default_data_collator
from transformers.tokenization_utils_base import BatchEncoding
from transformers import EvalPrediction
from transformers import AutoModelForMaskedLM
from transformers import AdamW
from transformers import HfArgumentParser
from dataclasses import dataclass, field
from transformers.integrations import deepspeed_config, is_deepspeed_zero3_enabled
import deepspeed
from torch.nn import functional as F
import toolz
import time
from functools import partial
import traceback
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from sklearn.metrics import mean_squared_error, mean_absolute_error
import pandas as pd
import re
import gc
import os
import json
import pandas as pd
import numpy as np
import requests
from tqdm.auto import tqdm
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DD
seq_model_name = "Rostlab/prot_bert_bfd" # for fine-tuning
# this logic is necessary because online-downloading and caching doesn't seem to work
if os.path.exists('seq_tokenizer'):
seq_tokenizer = BertTokenizerFast.from_pretrained('seq_tokenizer/', do_lower_case=False)
else:
seq_tokenizer = BertTokenizerFast.from_pretrained(seq_model_name, do_lower_case=False)
seq_tokenizer.save_pretrained('seq_tokenizer/')
model_directory = '/gpfs/alpine/world-shared/bip214/maskedevolution/models/bert_large_1B/model'
tokenizer_directory = '/gpfs/alpine/world-shared/bip214/maskedevolution/models/bert_large_1B/tokenizer'
tokenizer_config = json.load(open(tokenizer_directory+'/config.json','r'))
smiles_tokenizer = BertTokenizerFast.from_pretrained(tokenizer_directory, **tokenizer_config)
max_smiles_length = min(200,BertConfig.from_pretrained(model_directory).max_position_embeddings)
# Mpro has 306 residues
max_seq_length = min(4096,BertConfig.from_pretrained(seq_model_name).max_position_embeddings)
def expand_seqs(seqs):
input_fixed = ["".join(seq.split()) for seq in seqs]
input_fixed = [re.sub(r"[UZOB]", "X", seq) for seq in input_fixed]
return [list(seq) for seq in input_fixed]
# use distributed data parallel on a node-local basis for inference
#os.environ['RANK'] = os.environ['OMPI_COMM_WORLD_LOCAL_RANK']
#os.environ['WORLD_SIZE'] = os.environ['OMPI_COMM_WORLD_LOCAL_SIZE']
#os.environ['LOCAL_RANK'] = os.environ['OMPI_COMM_WORLD_LOCAL_RANK']
#os.environ['MASTER_ADDR'] = '127.0.0.1'
#os.environ['MASTER_PORT'] = '29500'
os.environ['RANK'] = '0'
os.environ['WORLD_SIZE'] = '1'
os.environ['LOCAL_RANK'] = '0'
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = str(29500+int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']))
#torch.cuda.set_device(int(os.environ['LOCAL_RANK']))
@dataclass
class InferenceArguments:
checkpoint: str = field(
default=None
)
batch_size: int = field(
default=1
)
input_path: str = field(
default=None
)
output_path: str = field(
default=None
)
seq: str = field(
default=None
)
smiles_column: str = field(
default='smiles'
)
seq_column: str = field(
default='seq'
)
#
# parser - used to handle deepspeed case as well
parser = HfArgumentParser([TrainingArguments,InferenceArguments])
training_args, inference_args = parser.parse_args_into_dataclasses()
def main(fn):
try:
def encode_canonical(item):
seq_encodings = seq_tokenizer(expand_seqs(item['seq'])[0],
is_split_into_words=True,
return_offsets_mapping=False,
truncation=True,
padding='max_length',
add_special_tokens=True,
max_length=max_seq_length)
smiles_encodings = smiles_tokenizer(item['smiles'][0],
padding='max_length',
max_length=max_smiles_length,
add_special_tokens=True,
truncation=True)
item['input_ids'] = torch.cat([torch.tensor(seq_encodings['input_ids']),
torch.tensor(smiles_encodings['input_ids'])])
item['token_type_ids'] = torch.cat([torch.tensor(seq_encodings['token_type_ids']),
torch.tensor(smiles_encodings['token_type_ids'])])
item['attention_mask'] = torch.cat([torch.tensor(seq_encodings['attention_mask']),
torch.tensor(smiles_encodings['attention_mask'])])
item.pop('smiles')
item.pop('seq')
return item
def transform(seq, smiles_canonical):
item = {'seq': [seq], 'smiles': [smiles_canonical]}
return encode_canonical(item)
def transform_df(df):
if inference_args.seq is not None:
return df[inference_args.smiles_column].apply(lambda x: transform(inference_args.seq, x)).values
else:
assert inference_args.seq is None
return df[[inference_args.seq_column,inference_args.smiles_column]].apply(lambda x: transform(*x),axis=1).values
# load the model and predict a batch
def predict(df, return_dict=False):
from affinity_pred.model import EnsembleSequenceRegressor
def model_init():
return EnsembleSequenceRegressor(seq_model_name, model_directory, max_seq_length=max_seq_length, sparse_attention=True)
trainer = Trainer(
model_init=model_init, # the instantiated <F0><9F><A4><97> Transformers model to be trained
args=training_args, # training arguments, defined above
)
checkpoint = torch.load(inference_args.checkpoint,
map_location=torch.device('cpu'))
trainer.model.load_state_dict(checkpoint,strict=False)
x = transform_df(df)
out = trainer.predict(x)
print('{} samples/second'.format(out.metrics['test_samples_per_second']))
df['affinity_mean'] = pd.Series(data=out.predictions[:,0], index=df.index).astype('float32')
df['affinity_var'] = pd.Series(data=out.predictions[:,1], index=df.index).astype('float32')
return df
df = pd.read_parquet(fn)
df_pred = predict(df)
base = os.path.basename(fn)
df_pred.to_parquet(inference_args.output_path+'/'+base)
except Exception as e:
print(repr(e))
traceback.print_exc()
if __name__ == "__main__":
comm = MPI.COMM_WORLD
with MPICommExecutor(comm, root=0) as executor:
if executor is not None:
import glob
fns = glob.glob(inference_args.input_path)
fns = [f for f in fns if not os.path.exists(inference_args.output_path+'/'+os.path.basename(f))]
executor.map(main, fns)
```
#### File: affinity_pred/train/earlystop.py
```python
import pandas as pd
import tensorflow as tf
from tensorflow.data import TFRecordDataset
from tensorflow.core.util import event_pb2
from tensorflow.python.lib.io import tf_record
from tensorflow.python.framework import tensor_util
import numpy as np
import os
import glob
import sys
tf.config.experimental.set_visible_devices([], 'GPU')
def my_summary_iterator(path):
for r in TFRecordDataset(path):
yield event_pb2.Event.FromString(r.numpy())
def get_eval_losses(summary_dir):
losses = []
steps = []
for entry in os.scandir(summary_dir):
if entry.is_file():
filename = entry.name
path = os.path.join(summary_dir, filename)
for event in my_summary_iterator(path):
for value in event.summary.value:
if value.tag == 'eval/loss':
losses.append(value.simple_value)
steps.append(event.step)
return losses, steps
if __name__ == '__main__':
dirs = list(glob.glob(sys.argv[1]))
min_loss_step = []
for dir in dirs:
losses, steps = get_eval_losses(dir)
print(dir,losses)
step_min = steps[np.argmin(losses)]
min_loss_step.append(step_min)
for dir,step_min in zip(dirs,min_loss_step):
print('{} Min loss step: {}'.format(dir, step_min))
```
|
{
"source": "jglaser/blazingsql",
"score": 2
}
|
#### File: BlazingSQLTest/EndToEndTests/crossJoinsTest.py
```python
from DataBase import createSchema as cs
from pyspark.sql import SparkSession
from Configuration import Settings as Settings
from Runner import runTest
from Utils import Execution
from Utils import gpuMemory, skip_test, init_context
from pynvml import nvmlInit
from blazingsql import DataType
from Configuration import ExecutionMode
queryType = 'Cross join'
def main(dask_client, spark, dir_data_file, bc, nRals):
start_mem = gpuMemory.capture_gpu_memory_usage()
def executionTest():
tables = ["nation", "region", "customer", "lineitem", "orders"]
data_types = [DataType.DASK_CUDF, DataType.CUDF, DataType.CSV,
DataType.ORC, DataType.PARQUET] # TODO json
# Create Tables ------------------------------------------------------
for fileSchemaType in data_types:
if skip_test(dask_client, nRals, fileSchemaType, queryType):
continue
cs.create_tables(bc, dir_data_file, fileSchemaType, tables=tables)
# Run Query ------------------------------------------------------
# Parameter to indicate if its necessary to order
# the resulsets before compare them
worder = 1
use_percentage = False
acceptable_difference = 0
print('==============================')
print(queryType)
print('==============================')
queryId = 'TEST_01'
query = "select * from nation cross join region"
runTest.run_query(bc, spark, query, queryId, queryType,
worder, '', acceptable_difference,
use_percentage, fileSchemaType)
queryId = 'TEST_02'
query = """
select o_orderkey, o_totalprice,
l_linenumber, l_shipmode
from orders cross join lineitem
where o_orderkey < 6
and l_receiptdate > date '1996-07-12'
and l_linenumber > 5
and o_totalprice < 74029.55
and o_clerk = 'Clerk#000000880'
and l_shipmode IN ('FOB', 'RAIL')
order by o_orderkey, o_totalprice, l_linenumber"""
runTest.run_query(bc, spark, query, queryId, queryType,
worder, '', acceptable_difference,
use_percentage, fileSchemaType)
queryId = 'TEST_03'
query = """select o_orderkey, n_nationkey
from nation cross join orders
where o_totalprice > 4000.0
and o_orderdate > date '1998-07-12'
and o_orderkey > 425000
group by o_orderkey, n_nationkey
order by o_orderkey, n_nationkey"""
runTest.run_query(bc, spark, query, queryId, queryType, worder,
'', acceptable_difference,
use_percentage, fileSchemaType)
queryId = 'TEST_04'
query = """
with cust_nation as
(
select c_custkey, c_name, n_nationkey, n_name
from customer inner join nation
on c_nationkey = n_nationkey
where n_nationkey > 21
and c_acctbal > 525.0
and c_custkey > 13450
order by c_custkey, n_nationkey
), ord_lineitem as
(
select o_orderkey, l_quantity
from orders left join lineitem
on o_orderkey = l_orderkey
where l_shipdate > date '1998-11-12'
and o_totalprice > 3500.0
and l_quantity > 48.0
and l_shipmode in ('AIR', 'FOB', 'SHIP')
order by o_orderkey
)
select c_custkey, n_name, l_quantity
from ord_lineitem cross join cust_nation
where n_name = 'RUSSIA'
order by c_custkey"""
runTest.run_query(bc, spark, query, queryId, queryType, worder,
'', acceptable_difference,
use_percentage, fileSchemaType)
if Settings.execution_mode == ExecutionMode.GENERATOR:
print("==============================")
break
executionTest()
end_mem = gpuMemory.capture_gpu_memory_usage()
gpuMemory.log_memory_usage(queryType, start_mem, end_mem)
if __name__ == '__main__':
Execution.getArgs()
nvmlInit()
# NOTE: Drill doesn't support CROSS JOIN
spark = "spark"
compareResults = True
if 'compare_results' in Settings.data['RunSettings']:
compareResults = Settings.data['RunSettings']['compare_results']
if ((Settings.execution_mode == ExecutionMode.FULL
and compareResults == "true")
or Settings.execution_mode == ExecutionMode.GENERATOR):
# Create Table Spark --------------------------------------------------
spark = SparkSession.builder.appName("timestampTest").getOrCreate()
cs.init_spark_schema(spark,
Settings.data['TestSettings']['dataDirectory'])
# Create Context For BlazingSQL
bc, dask_client = init_context()
nRals = Settings.data['RunSettings']['nRals']
main(dask_client, spark, Settings.data['TestSettings']['dataDirectory'],
bc, nRals)
if Settings.execution_mode != ExecutionMode.GENERATOR:
runTest.save_log()
gpuMemory.print_log_gpu_memory()
```
#### File: BlazingSQLTest/EndToEndTests/literalTest.py
```python
from blazingsql import DataType
from Configuration import ExecutionMode
from Configuration import Settings as Settings
from DataBase import createSchema as cs
from pynvml import nvmlInit
from pyspark.sql import SparkSession
from Runner import runTest
from Utils import Execution, gpuMemory, init_context, skip_test
queryType = "Literal"
def main(dask_client, drill, spark, dir_data_file, bc, nRals):
start_mem = gpuMemory.capture_gpu_memory_usage()
def executionTest():
tables = ["orders", "customer"]
data_types = [DataType.ORC] # TODO gdf csv parquet json
for fileSchemaType in data_types:
if skip_test(dask_client, nRals, fileSchemaType, queryType):
continue
cs.create_tables(bc, dir_data_file, fileSchemaType, tables=tables)
# Run Query ------------------------------------------------------
# Parameter to indicate if its necessary to order
# the resulsets before compare them
worder = 1
use_percentage = False
acceptable_difference = 0.01
print("==============================")
print(queryType)
print("==============================")
queryId = "TEST_01"
query = """select 2, o_orderdate
from orders
order by o_orderdate asc
limit 5"""
query_spark = """select 2, o_orderdate
from orders
order by o_orderdate nulls last
limit 5"""
runTest.run_query(
bc,
spark,
query,
queryId,
queryType,
worder,
"o_orderdate",
acceptable_difference,
use_percentage,
fileSchemaType,
query_spark=query_spark,
)
queryId = "TEST_02"
query = """select 'Rommel',c_name from customer
order by c_name limit 5"""
query_spark = """select 'Rommel',c_name from customer
order by c_name nulls last limit 5"""
runTest.run_query(
bc,
spark,
query,
queryId,
queryType,
worder,
"c_name",
acceptable_difference,
use_percentage,
fileSchemaType,
query_spark=query_spark,
)
queryId = "TEST_03"
query = """select '1990-01-01', c_custkey from customer
order by c_custkey limit 5"""
query_spark = """select '1990-01-01', c_custkey from customer
order by c_custkey nulls last limit 5"""
runTest.run_query(
bc,
spark,
query,
queryId,
queryType,
worder,
"c_custkey",
acceptable_difference,
use_percentage,
fileSchemaType,
query_spark=query_spark,
)
queryId = "TEST_04"
query = """select timestamp '1990-01-01 00:00:00', c_custkey
from customer order by c_custkey limit 5"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"c_custkey",
acceptable_difference,
True,
fileSchemaType,
) # TODO: Change sum/count for avg KC
queryId = "TEST_05"
query = """select 80000 as constant, c_custkey
from customer order by c_custkey limit 5"""
query_spark = """select 80000 as constant, c_custkey
from customer order by c_custkey nulls last limit 5"""
runTest.run_query(
bc,
spark,
query,
queryId,
queryType,
worder,
"c_custkey",
acceptable_difference,
use_percentage,
fileSchemaType,
query_spark=query_spark,
)
queryId = "TEST_06" # TODO: Blazing not support: 2+2
query = """select 2+2, o_orderdate from orders
order by o_orderkey limit 5"""
# runTest.run_query(bc, spark, query, queryId, queryType,
# 0, '', acceptable_difference, use_percentage, fileSchemaType)
if Settings.execution_mode == ExecutionMode.GENERATOR:
print("==============================")
break
executionTest()
end_mem = gpuMemory.capture_gpu_memory_usage()
gpuMemory.log_memory_usage(queryType, start_mem, end_mem)
if __name__ == "__main__":
Execution.getArgs()
nvmlInit()
drill = "drill" # None
spark = "spark"
compareResults = True
if "compare_results" in Settings.data["RunSettings"]:
compareResults = Settings.data["RunSettings"]["compare_results"]
if ((Settings.execution_mode == ExecutionMode.FULL and
compareResults == "true") or
Settings.execution_mode == ExecutionMode.GENERATOR):
# Create Table Drill ------------------------------------------------
from pydrill.client import PyDrill
drill = PyDrill(host="localhost", port=8047)
cs.init_drill_schema(drill,
Settings.data["TestSettings"]["dataDirectory"])
# Create Table Spark -------------------------------------------------
spark = SparkSession.builder.appName("timestampTest").getOrCreate()
cs.init_spark_schema(spark,
Settings.data["TestSettings"]["dataDirectory"])
# Create Context For BlazingSQL
bc, dask_client = init_context()
nRals = Settings.data["RunSettings"]["nRals"]
main(
dask_client,
drill,
spark,
Settings.data["TestSettings"]["dataDirectory"],
bc,
nRals,
)
if Settings.execution_mode != ExecutionMode.GENERATOR:
runTest.save_log()
gpuMemory.print_log_gpu_memory()
```
|
{
"source": "jglaser/DeepSpeed",
"score": 2
}
|
#### File: aio/py_test/test_ds_aio.py
```python
import os
import torch
import argparse
import time
import sys
from multiprocessing import Pool
import multiprocessing as mp
from ds_aio_basic import aio_basic_multiprocessing
from ds_aio_handle import aio_handle_multiprocessing
from test_ds_aio_utils import refine_args
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--read_file', type=str, default=None, help='Read file.')
parser.add_argument('--write_file', type=str, default=None, help='Write file.')
parser.add_argument('--write_size',
type=str,
default=None,
help='Number of bytes to write.')
parser.add_argument('--block_size', type=str, default='1M', help='I/O block size.')
parser.add_argument('--queue_depth', type=int, default=32, help='I/O queue depth.')
parser.add_argument('--threads',
type=int,
default=1,
help='Thread parallelism count.')
parser.add_argument(
'--single_submit',
action='store_true',
help=
'Submit I/O requests in singles (default is submit queue_depth amount at once.).'
)
parser.add_argument('--overlap_events',
action='store_true',
help='Overlap I/O submission and completion requests.')
parser.add_argument('--validate',
action='store_true',
help='Perform validation in library.')
parser.add_argument('--handle', action='store_true', help='Use AIO handle.')
parser.add_argument('--loops',
type=int,
default=1,
help='Count of operation repetitions')
parser.add_argument('--io_parallel',
type=int,
default=None,
help='Per iop parallelism')
parser.add_argument('--gpu', action='store_true', help='Use GPU memory')
args = parser.parse_args()
print(f'args = {args}')
return args
def validate_args(args):
if args.read_file and not os.path.isfile(args.read_file):
print(f'args validation error: {args.read_file} not found')
return False
return True
def main():
print(f'Testing deepspeed_aio python frontend')
args = parse_arguments()
refine_args(args)
if not validate_args(args):
quit()
mp.set_start_method('spawn')
multiprocess_function = aio_handle_multiprocessing if args.handle else aio_basic_multiprocessing
if args.read_file:
multiprocess_function(args, True)
if args.write_file:
multiprocess_function(args, False)
if __name__ == "__main__":
main()
```
#### File: deepspeed/runtime/dataloader.py
```python
import torch
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
class RepeatingLoader:
def __init__(self, loader):
"""Wraps an iterator to allow for infinite iteration. This is especially useful
for DataLoader types that we wish to automatically restart upon completion.
Args:
loader (iterator): The data loader to repeat.
"""
self.loader = loader
self.data_iter = iter(self.loader)
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.data_iter)
except StopIteration:
self.data_iter = iter(self.loader)
batch = next(self.data_iter)
return batch
class DeepSpeedDataLoader(object):
def __init__(self,
dataset,
batch_size,
pin_memory,
local_rank,
tput_timer,
collate_fn=None,
num_local_io_workers=None,
data_sampler=None,
data_parallel_world_size=None,
data_parallel_rank=None):
self.tput_timer = tput_timer
self.batch_size = batch_size
if local_rank >= 0:
if data_sampler is None:
data_sampler = DistributedSampler(dataset=dataset,
num_replicas=data_parallel_world_size,
rank=data_parallel_rank)
device_count = 1
else:
if data_sampler is None:
data_sampler = RandomSampler(dataset)
device_count = torch.cuda.device_count()
batch_size *= device_count
if num_local_io_workers is None:
num_local_io_workers = 2 * device_count
self.num_local_io_workers = num_local_io_workers
self.data_sampler = data_sampler
self.dataset = dataset
self.collate_fn = collate_fn
self.device_count = device_count
self.batch_size = batch_size
self.pin_memory = pin_memory
self.len = len(self.data_sampler)
self.data = None
def __iter__(self):
self._create_dataloader()
return self
def __len__(self):
return self.len
def __next__(self):
if self.tput_timer:
self.tput_timer.start()
return next(self.data)
def _create_dataloader(self):
if self.collate_fn is None:
self.dataloader = DataLoader(self.dataset,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
sampler=self.data_sampler,
num_workers=self.num_local_io_workers)
else:
self.dataloader = DataLoader(self.dataset,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
sampler=self.data_sampler,
collate_fn=self.collate_fn,
num_workers=self.num_local_io_workers)
self.data = (x for x in self.dataloader)
return self.dataloader
# DataLoader([(torch.randn(3, 3), torch.tensor(i % 2)) for i in range(10)], batch_size=2))
```
#### File: fp16/onebit/adam.py
```python
import types
import torch
import importlib
import numpy as np
import time
import torch.distributed as dist
from deepspeed.utils.logging import logger
class OnebitAdam(torch.optim.Optimizer):
"""Implements the 1-bit Adam algorithm. Currently GPU-only.
For usage example please see https://www.deepspeed.ai/tutorials/onebit-adam/
For technical details please read https://arxiv.org/abs/2102.02888
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
freeze_step (int, optional): Number of steps for warmup (uncompressed)
stage before we start using compressed communication. (default 100000)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in 1-bit Adam!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
cuda_aware (boolean, required): Set True if the underlying MPI implementation
supports CUDA-Aware communication. (default: False)
comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl')
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self,
params,
deepspeed=None,
lr=1e-3,
freeze_step=100000,
bias_correction=True,
betas=(0.9,
0.999),
eps=1e-8,
eps_inside_sqrt=False,
weight_decay=0.,
max_grad_norm=0.,
amsgrad=False,
cuda_aware=False,
comm_backend_name='nccl'):
if amsgrad:
raise RuntimeError('1-bit Adam does not support the AMSGrad variant.')
defaults = dict(lr=lr,
bias_correction=bias_correction,
betas=betas,
eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(OnebitAdam, self).__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
assert (dist.is_initialized())
self.comm_time = 0.0
self.step_time = 0.0
self.ave_step = 1
self.bk_time = 0.0
self.deepspeed = deepspeed
self.adam_freeze_key = False
self.initialize = False
self.freeze_step = freeze_step
self.cuda_aware = cuda_aware
self.using_pipeline = False
self.comm_backend_name = comm_backend_name
# Empty initializer. Set handle based on the comm backend as follows.
self.comm_backend_handle = None
if self.comm_backend_name == 'nccl':
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
assert TORCH_MAJOR >= 1 and TORCH_MINOR >= 8, "Please use torch 1.8 or greater to enable NCCL backend in 1-bit Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend"
assert dist.is_initialized() == True, "Please initialize the torch distributed backend."
from deepspeed.runtime.comm.nccl import NcclBackend
self.using_pipeline = hasattr(self.deepspeed,
'pipeline_enable_backward_allreduce')
self.comm_backend_handle = NcclBackend(self.deepspeed.mpu)
elif self.comm_backend_name == 'mpi':
from deepspeed.runtime.comm.mpi import MpiBackend
self.comm_backend_handle = MpiBackend(cuda_aware)
self.size = self.comm_backend_handle.size
self.divider = int(self.size * 8 / np.gcd(self.size, 8))
def step(self, closure=None, grads=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced recision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients. (default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
gather_time = 0
allgather_time = 0
all_time = 0
if self.adam_freeze_key is False:
v_diff_buffer = 0.0
if grads is None:
grads_group = [None] * len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
for group, grads_this_group in zip(self.param_groups, grads_group):
if grads_this_group is None:
grads_this_group = [None] * len(group['params'])
bias_correction = 1 if group['bias_correction'] else 0
for p, grad in zip(group['params'], grads_this_group):
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('1-bit Adam does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
state['tensor_size'] = torch.numel(p.data)
state['corrected_tensor_size'] = state['tensor_size']
if state['tensor_size'] % (self.size * self.divider) != 0:
state['corrected_tensor_size'] += ((self.size * self.divider) -
(state['tensor_size'] %
(self.size * self.divider)))
state['server_chunk_size'] = state[
'corrected_tensor_size'] // self.size
if not self.initialize or (self.adam_freeze_key
and 'worker_error' not in state.keys()):
torch.cuda.empty_cache()
state['worker_error'] = torch.zeros(state['corrected_tensor_size'],
device=p.device)
state['server_error'] = torch.zeros(state['server_chunk_size'],
device=p.device)
torch.cuda.empty_cache()
self.adam_freeze_key = True
if not self.initialize and torch.distributed.get_rank() == 0:
print("Cupy Buffers Initialized Successfully.")
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if self.adam_freeze_key is False:
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
grad = None
if self.initialize:
update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
else:
if 'non_freeze' in group.keys() and group['non_freeze'] is True:
dist.all_reduce(grad)
grad.mul_(1 / dist.get_world_size())
exp_avg.mul_(beta1).add(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
grad = None
else:
if self.initialize is True:
exp_avg.mul_(beta1).add_(1 - beta1, grad)
grad = None
if self.size > 1:
exp_avg.set_(
self.comm_backend_handle.compressed_allreduce(
exp_avg,
state['worker_error'],
state['server_error'],
self.deepspeed.local_rank))
# Because 1-bit compression cannot represent exact zero, it is required to
# provide a momentum mask for those params that have constant exact zeros in their
# momentums, otherwise the compression error would keep accumulating.
# For example, for BERT pre-training seq 128, bert.embeddings.position_embeddings.weight
# always have exact zeros in its momentum for row 129 to 512, because it only
# learns up to seq length 128 while the model supports up to 512 seq length.
# (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
if 'exp_avg_mask' in group:
if exp_avg.device != group['exp_avg_mask'].device:
group['exp_avg_mask'] = group['exp_avg_mask'].to(
device=exp_avg.device)
exp_avg.mul_(group['exp_avg_mask'])
if self.initialize:
update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
if self.initialize:
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
with torch.no_grad():
p.add_(-group['lr'] * update)
if not self.initialize:
print('Pop out errors', flush=True)
state.pop('worker_error')
state.pop('server_error')
if not self.initialize:
self.adam_freeze_key = False
self.initialize = True
print(
f"Finished the initialization step at rank {torch.distributed.get_rank()}"
)
return loss
if self.adam_freeze_key is False:
if state['step'] >= self.freeze_step:
print('OnebitAdam - starting compressed communication')
self.adam_freeze_key = True
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
return loss
def load_state_dict(self, state_dict):
"""
Overrides load_state_dict() to add special handling when loading checkpoints
"""
# Because at different stage exp_avg_mask may change (e.g.,
# BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask
# in checkpoints but always use the one user provided in training script.
# (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
# Thus here we keep the exp_avg_mask unchanged when loading checkpoint
for i, group in enumerate(self.param_groups):
if 'exp_avg_mask' in group:
state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask']
elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict[
'param_groups'][i]:
state_dict['param_groups'][i].pop('exp_avg_mask')
super().load_state_dict(state_dict)
if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step:
if torch.distributed.get_rank() == 0:
print("Checkpoint loaded and OnebitAdam warmup stage starts/continues.")
if self.adam_freeze_key is True:
self.adam_freeze_key = False
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = True
else:
self.deepspeed.enable_backward_allreduce = True
else:
if torch.distributed.get_rank() == 0:
print(
"Checkpoint loaded and OnebitAdam compression stage starts/continues."
)
if self.adam_freeze_key is False:
self.adam_freeze_key = True
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
# We reset the compression errors when loading checkpoints for 3 reasons:
# 1) The worker and server error at each GPU are distinct, so in current implementation
# only rank 0's errors are saved in the checkpoint. Thus we have to reset the errors.
# If we want to save them correctly we need O(num_gpu*model_size) memory in order to
# gather all the error, which is a very large memory requirement. It's possible to save
# them in a distributed way, but it will make the checkpoint saving/loading much more complicated.
# 2) Even if we are able to save the compression errors correctly, you need to have the
# exact same number of GPUs in order to load them correctly.
# 3) We verified on BERT pre-training that occasionally resetting the compression error
# at checkpoint loading does not affect the convergence.
# However, please avoid frequent checkpoint loading which could break the error
# compensation mechanism thus affect the convergence.
for group in self.param_groups:
for p in group['params']:
if 'worker_error' in self.state[p]:
self.state[p].pop('worker_error')
if 'server_error' in self.state[p]:
self.state[p].pop('server_error')
```
#### File: fp16/onebit/lamb.py
```python
import types
import torch
import numpy as np
import torch.distributed as dist
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
class OnebitLamb(torch.optim.Optimizer):
"""Implements the 1-bit Lamb algorithm. Currently GPU-only.
For usage example please see https://www.deepspeed.ai/tutorials/onebit-lamb/
For technical details please see our paper https://arxiv.org/abs/2104.06069.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
freeze_step (int, optional): Number of steps for warmup (uncompressed)
stage before we start using compressed communication. (default 100000)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
max_coeff(float, optional): maximum value of the lamb coefficient (default: 10.0)
min_coeff(float, optional): minimum value of the lamb coefficient (default: 0.01)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in 1-bit Lamb!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
cuda_aware (boolean, required): Set True if the underlying MPI implementation
supports CUDA-Aware communication. (default: False)
comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl')
coeff_beta (float, optional): coefficient used for computing
running averages of lamb coefficient (default: 0.9) note that you may want to
increase or decrease this beta depending on the freeze_step you choose, as
1/(1 - coeff_beta) should be smaller than or equal to freeze_step
factor_max (float, optional): maximum value of scaling factor to the frozen lamb
coefficient during compression stage (default: 4.0)
factor_min (float, optional): minimum value of scaling factor to the frozen lamb
coefficient during compression stage (default: 0.5)
factor_threshold (float, optional): threshold of how much the scaling factor can
fluctuate between steps (default: 0.1)
.. _Large Batch Optimization for Deep Learning\: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self,
params,
deepspeed=None,
lr=1e-3,
freeze_step=100000,
bias_correction=True,
betas=(0.9,
0.999),
eps=1e-8,
eps_inside_sqrt=False,
weight_decay=0.,
max_grad_norm=0.,
max_coeff=10.0,
min_coeff=0.01,
amsgrad=False,
cuda_aware=False,
comm_backend_name='nccl',
coeff_beta=0.9,
factor_max=4.0,
factor_min=0.5,
factor_threshold=0.1):
if amsgrad:
raise RuntimeError('1-bit Lamb does not support the AMSGrad variant.')
defaults = dict(lr=lr,
bias_correction=bias_correction,
betas=betas,
eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm,
max_coeff=max_coeff,
min_coeff=min_coeff)
super(OnebitLamb, self).__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
assert (dist.is_initialized())
self.deepspeed = deepspeed
self.lamb_freeze_key = False
self.initialize = False
self.freeze_step = freeze_step
self.cuda_aware = cuda_aware
self.coeff_beta = coeff_beta
self.factor_max = factor_max
self.factor_min = factor_min
self.factor_threshold = factor_threshold
self.using_pipeline = False
self.comm_backend_name = comm_backend_name
# Empty initializer. Set handle based on the comm backend as follows.
self.comm_backend_handle = None
if self.comm_backend_name == 'nccl':
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
assert TORCH_MAJOR >= 1 and TORCH_MINOR >= 8, "Please use torch 1.8 or greater to enable NCCL backend in 1-bit Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend"
assert dist.is_initialized() == True, "Please initialize the torch distributed backend."
from deepspeed.runtime.comm.nccl import NcclBackend
self.using_pipeline = hasattr(self.deepspeed,
'pipeline_enable_backward_allreduce')
self.comm_backend_handle = NcclBackend(self.deepspeed.mpu)
elif self.comm_backend_name == 'mpi':
from deepspeed.runtime.comm.mpi import MpiBackend
self.comm_backend_handle = MpiBackend(cuda_aware)
self.size = self.comm_backend_handle.size
self.divider = int(self.size * 8 / np.gcd(self.size, 8))
self.exp_avg_flat = []
self.dummy_exp_avg = {}
self.corrected_tensor_sizes = []
self.server_chunk_sizes = []
self.worker_errors = []
self.server_errors = []
self.lamb_coeffs = []
def step(self, closure=None, grads=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
"""
loss = None
if closure is not None:
loss = closure()
if grads is None:
grads_group = [None] * len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
#remove the previous stats
del self.lamb_coeffs[:]
if self.lamb_freeze_key:
exp_avg_last_step = []
for group in self.param_groups:
exp_avg_last_step.append(
[self.state[p]['exp_avg'].detach().clone() for p in group['params']])
if 'scaling_coeff' not in self.state[self.param_groups[0]['params'][0]]:
# Compute the scaling_coeff for each momentum at the end of warmup stage.
# This is used to reduce compression error during compression stage.
momentum_scales = []
for group in self.param_groups:
momentum_scales.append([
(torch.norm(self.state[p]['exp_avg']) /
np.sqrt(torch.numel(self.state[p]['exp_avg']))).item()
for p in group['params']
])
united_scale = sum([sum(x) for x in momentum_scales]) / sum(
[len(x) for x in momentum_scales])
for i, group in enumerate(self.param_groups):
for j, p in enumerate(group['params']):
self.state[p][
'scaling_coeff'] = united_scale / momentum_scales[i][j]
for group, grads_this_group in zip(self.param_groups, grads_group):
if grads_this_group is None:
grads_this_group = [None] * len(group['params'])
bias_correction = 1 if group['bias_correction'] else 0
for p, grad in zip(group['params'], grads_this_group):
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('1-bit Lamb does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0 or (len(state) == 1
and 'scaling_coeff' in state.keys()):
state['step'] = 0
state['lamb_coeff_freeze'] = 0.0
state['last_factor'] = 1.0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
state['exp_avg_sq_fresh'] = torch.zeros_like(p.data)
if not self.initialize:
self.lamb_freeze_key = True
exp_avg, exp_avg_sq, exp_avg_sq_fresh = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_sq_fresh']
beta1, beta2 = group['betas']
max_coeff = group['max_coeff']
min_coeff = group['min_coeff']
state['step'] += 1
if self.lamb_freeze_key is False:
# warmup stage, baseline Lamb optimization
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if state['step'] == self.freeze_step:
exp_avg_sq_fresh.data = exp_avg_sq.detach().clone()
grad = None
if self.initialize:
weight_norm = p.data.pow(2).sum().sqrt()
update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
update_norm = update.pow(2).sum().sqrt()
lamb_coeff = 1.0
if weight_norm != 0 and update_norm != 0:
lamb_coeff = (weight_norm / update_norm).item()
if lamb_coeff > max_coeff:
lamb_coeff = max_coeff
if lamb_coeff < min_coeff:
lamb_coeff = min_coeff
if lamb_coeff != 1.0:
state['lamb_coeff_freeze'] = self.coeff_beta * state[
'lamb_coeff_freeze'] + (1 - self.coeff_beta) * lamb_coeff
self.lamb_coeffs.append(lamb_coeff)
with torch.no_grad():
p.add_(-group['lr'] * lamb_coeff * update)
else:
# compression stage, update each momentum locally, then
# communicate based on the compressed_allreduce below
if self.initialize:
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg.mul_(self.state[p]['scaling_coeff'])
grad = None
# init fused momentum
if len(self.exp_avg_flat) == 0:
momentum_groups = []
tensor_size = 0
for group in self.param_groups:
for p in group['params']:
momentum_groups.append(self.state[p]['exp_avg'])
tensor_size += torch.numel(p.data)
corrected_tensor_size = tensor_size
if tensor_size % (self.size * self.divider) != 0:
difference = ((self.size * self.divider) - (tensor_size %
(self.size * self.divider)))
corrected_tensor_size += difference
self.dummy_exp_avg[0] = torch.zeros(
difference,
device=momentum_groups[0].data.device)
momentum_groups.append(self.dummy_exp_avg[0])
self.corrected_tensor_sizes.append(corrected_tensor_size)
self.server_chunk_sizes.append(corrected_tensor_size // self.size)
self.exp_avg_flat.append(
_flatten_dense_tensors([p.detach().clone() for p in momentum_groups]))
updated_params = _unflatten_dense_tensors(self.exp_avg_flat[0],
momentum_groups)
for p, q in zip(momentum_groups, updated_params):
p.data = q.data
if self.initialize and len(self.worker_errors) == 0:
torch.cuda.empty_cache()
for i in range(len(self.exp_avg_flat)):
self.worker_errors.append(
torch.zeros(self.corrected_tensor_sizes[i],
device=self.exp_avg_flat[i].device))
self.server_errors.append(
torch.zeros(self.server_chunk_sizes[i],
device=self.exp_avg_flat[i].device))
torch.cuda.empty_cache()
if self.lamb_freeze_key:
if self.size > 1:
for i in range(len(self.exp_avg_flat)):
if not self.initialize:
torch.cuda.empty_cache()
self.worker_errors.append(
torch.zeros(self.corrected_tensor_sizes[i],
device=self.exp_avg_flat[i].device))
self.server_errors.append(
torch.zeros(self.server_chunk_sizes[i],
device=self.exp_avg_flat[i].device))
torch.cuda.empty_cache()
if torch.distributed.get_rank() == 0:
print("Cupy Buffers Initialized Successfully.")
self.comm_backend_handle.compressed_allreduce(
self.exp_avg_flat[i],
self.worker_errors[0],
self.server_errors[0],
self.deepspeed.local_rank)
if torch.distributed.get_rank() == 0:
print('Pop out errors', flush=True)
del self.worker_errors[:]
del self.server_errors[:]
else:
self.comm_backend_handle.compressed_allreduce(
self.exp_avg_flat[i],
self.worker_errors[i],
self.server_errors[i],
self.deepspeed.local_rank)
if self.lamb_freeze_key and self.initialize:
for i, group in enumerate(self.param_groups):
bias_correction = 1 if group['bias_correction'] else 0
for j, p in enumerate(group['params']):
state = self.state[p]
exp_avg, exp_avg_sq, exp_avg_sq_fresh = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_sq_fresh']
beta1, beta2 = group['betas']
exp_avg.div_(self.state[p]['scaling_coeff'])
# Because 1-bit compression cannot represent exact zero, it is required to
# provide a momentum mask for those params that have constant exact zeros in their
# momentums, otherwise the compression error would keep accumulating.
# For example, for BERT pre-training seq 128, bert.embeddings.position_embeddings.weight
# always have exact zeros in its momentum for row 129 to 512, because it only
# learns up to seq length 128 while the model supports up to 512 seq length.
# (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py about how
# to add this exp_avg_mask for BERT pre-training.)
if 'exp_avg_mask' in group:
if exp_avg.device != group['exp_avg_mask'].device:
group['exp_avg_mask'] = group['exp_avg_mask'].to(
device=exp_avg.device)
exp_avg.mul_(group['exp_avg_mask'])
grad_reconstruct = ((exp_avg - exp_avg_last_step[i][j] * beta1) /
(1 - beta1))
exp_avg_sq_fresh.mul_(beta2).addcmul_(1 - beta2,
grad_reconstruct,
grad_reconstruct)
denom = exp_avg_sq.sqrt() + group['eps']
update_prelim = exp_avg / denom
if group['weight_decay'] > 0.0:
update = update_prelim + group['weight_decay'] * p.data
else:
update = update_prelim
lamb_coeff = 1.0
update_norm = update.pow(2).sum().sqrt()
denom_real = exp_avg_sq_fresh.sqrt() + group['eps']
factor = (denom / denom_real).max().item()
if group['weight_decay'] > 0.0:
update_ratio = min(1.0,
(update_prelim.pow(2).sum().sqrt() /
update_norm).item())
factor = factor * update_ratio + (1.0 - update_ratio)
if factor > self.factor_max:
factor = self.factor_max
if factor < self.factor_min:
factor = self.factor_min
if factor > state['last_factor'] * (1.0 + self.factor_threshold):
factor = state['last_factor'] * (1.0 + self.factor_threshold)
if factor < state['last_factor'] * (1.0 - self.factor_threshold):
factor = state['last_factor'] * (1.0 - self.factor_threshold)
state['last_factor'] = factor
lamb_coeff = state['lamb_coeff_freeze'] * factor
self.lamb_coeffs.append(lamb_coeff)
with torch.no_grad():
p.add_(-group['lr'] * lamb_coeff * update)
del exp_avg_last_step[:]
exp_avg_last_step = None
if not self.initialize:
self.lamb_freeze_key = False
self.initialize = True
print(
f"Finished the initialization step at rank {torch.distributed.get_rank()}"
)
return loss
if self.lamb_freeze_key is False:
if state['step'] >= self.freeze_step:
print('OnebitLamb - starting compressed communication')
self.lamb_freeze_key = True
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
return loss
def load_state_dict(self, state_dict):
"""
Overrides load_state_dict() to add special handling when loading checkpoints
"""
# Because at different stage exp_avg_mask may change (e.g.,
# BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask
# in checkpoints but always use the one user provided in training script.
# (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
# Thus here we keep the exp_avg_mask unchanged when loading checkpoint
for i, group in enumerate(self.param_groups):
if 'exp_avg_mask' in group:
state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask']
elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict[
'param_groups'][i]:
state_dict['param_groups'][i].pop('exp_avg_mask')
super().load_state_dict(state_dict)
# need to reset the fused momentum since loading states will break the linking
del self.exp_avg_flat[:]
self.dummy_exp_avg.clear()
del self.corrected_tensor_sizes[:]
del self.server_chunk_sizes[:]
if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step:
if torch.distributed.get_rank() == 0:
print("Checkpoint loaded and OnebitLamb warmup stage starts/continues.")
if self.lamb_freeze_key is True:
self.lamb_freeze_key = False
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = True
else:
self.deepspeed.enable_backward_allreduce = True
for group in self.param_groups:
for p in group['params']:
self.state[p]['lamb_coeff_freeze'] = 0.0
self.state[p]['last_factor'] = 1.0
if 'scaling_coeff' in self.state[p]:
self.state[p].pop('scaling_coeff')
else:
if torch.distributed.get_rank() == 0:
print(
"Checkpoint loaded and OnebitLamb compression stage starts/continues."
)
if self.lamb_freeze_key is False:
self.lamb_freeze_key = True
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
# We reset the compression errors when loading checkpoints for 3 reasons:
# 1) The worker and server error at each GPU are distinct, so in current implementation
# only rank 0's errors are saved in the checkpoint. Thus we have to reset the errors.
# If we want to save them correctly we need O(num_gpu*model_size) memory in order to
# gather all the error, which is a very large memory requirement. It's possible to save
# them in a distributed way, but it will make the checkpoint saving/loading much more complicated.
# 2) Even if we are able to save the compression errors correctly, you need to have the
# exact same number of GPUs in order to load them correctly.
# 3) We verified on BERT pre-training that occasionally resetting the compression error
# at checkpoint loading does not affect the convergence.
# However, please avoid frequent checkpoint loading which could break the error
# compensation mechanism thus affect the convergence.
del self.worker_errors[:]
del self.server_errors[:]
def get_lamb_coeffs(self):
return self.lamb_coeffs
```
#### File: runtime/swap_tensor/optimizer_utils.py
```python
import os
import torch
from deepspeed.utils.logging import logger
from deepspeed.runtime.zero.offload_constants import *
from deepspeed.runtime.swap_tensor.constants import *
from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, \
MIN_AIO_BYTES, AIO_ALIGNED_BYTES, get_sized_buffers, get_sized_buffer
from deepspeed.runtime.swap_tensor.utils import SwapBufferManager, SwapBufferPool
class FlattenedTensorSwapInfo(object):
def __init__(self, path, length, offset):
self.path = path
self.offset = offset
self.length = length
class OptimizerStateSwapInfo(object):
def __init__(self, parameter, numel, base_folder):
self.tensors = []
self.param_id = id(parameter)
self.swap_folder = base_folder
self.swap_paths = []
self.swapped_gradients = {}
self.unswapped_gradients = {}
self.tensor_numel = numel
self.tensor_dtype = parameter.dtype
self.tensor_device = parameter.device
self.has_state_tensors = False
self._add_tensors([parameter])
def numel(self):
return self.tensor_numel
def has_gradients(self):
return self.swapped_gradients or self.unswapped_gradients
def _add_tensors(self, tensor_list):
for t in tensor_list:
self.tensors.append(t)
self.swap_paths.append(os.path.join(self.swap_folder, f'{id(t)}.tensor.swp'))
def add_state_tensors(self, tensor_list):
self.has_state_tensors = True
self._add_tensors(tensor_list)
def device(self):
return self.tensor_device
def dtype(self):
return self.tensor_dtype
def release_memory(self):
for tensor in self.tensors:
tensor.data = torch.Tensor()
def get_or_create_gradient_paths(self, offsets, lengths):
gradient_paths = []
for offset, length in zip(offsets, lengths):
if not offset in self.swapped_gradients.keys():
path = os.path.join(
self.swap_folder,
f'{self.param_id}_gradient_{offset}_{length}.tensor.swp')
self.swapped_gradients[offset] = FlattenedTensorSwapInfo(
path,
length,
offset)
gradient_paths.append(self.swapped_gradients[offset].path)
return gradient_paths
def set_swap_buffers(self, buffers):
compute_lengths = [self.numel()] * len(self.tensors)
compute_buffers = get_sized_buffers(buffers, compute_lengths)
for t, buffer in zip(self.tensors, compute_buffers):
t.data = buffer.data
def get_swap_gradient_buffers(self, swap_buffer):
assert self.numel() <= swap_buffer.numel()
return [
swap_buffer.narrow(0,
grad.offset,
grad.length) for grad in self.swapped_gradients.values()
]
def get_swap_gradient_paths(self):
return [grad.path for grad in self.swapped_gradients.values()]
def get_unpinned_state_tensors(self):
return [t for t in self.tensors if not t.is_pinned()]
def read_unswapped_gradients(self, dest_buffer):
num_elem_count = 0
for offset, grad_partition in self.unswapped_gradients.items():
dst_tensor = dest_buffer.narrow(0, offset, grad_partition.numel())
dst_tensor.data.copy_(grad_partition.data)
num_elem_count += grad_partition.numel()
return num_elem_count
def release_unswapped_gradients(self):
self.unswapped_gradients = {}
SWAPPER_DEBUG_MODE = False
SWAP_OUT_GRADIENT_TIMER = 'swap_out_gradient'
class OptimizerSwapper(object):
def __init__(self,
swap_config,
aio_config,
base_folder,
optimizer,
largest_numel,
device,
dtype,
timers):
self.swap_config = swap_config
self.aio_config = aio_config
# NVMe swap management
self.swap_params_info = {}
self.swap_element_size = torch.tensor([], dtype=dtype).element_size()
self.swap_folder = os.path.join(base_folder,
'optimizer',
f'rank{torch.distributed.get_rank()}')
os.makedirs(self.swap_folder, exist_ok=True)
self.optimizer = optimizer
# Read/Write alignment for each thread during Intra-request parallelism
self.min_aio_bytes = max(MIN_AIO_BYTES, aio_config[AIO_BLOCK_SIZE])
self.aligned_bytes = AIO_ALIGNED_BYTES * aio_config[AIO_THREAD_COUNT]
self.numel_alignment = self.aligned_bytes // self.swap_element_size
# Swap buffer management
self.largest_numel = self._io_aligned_numel(largest_numel)
self.dtype = dtype
self.swap_buffer_manager = SwapBufferManager(
num_elems=self.largest_numel,
count=swap_config[OFFLOAD_OPTIMIZER_BUFFER_COUNT],
dtype=dtype)
# Timers
self.timers = timers
self.timer_names = set()
# Print exclusion list
self.print_exclude_list = [
'optimizer',
'swap_buffer_manager',
'swap_params_info',
'timers',
'timer_names',
]
def swappable_tensor(self, param=None, numel=None):
assert param is not None or numel is not None, "Either param or numel must be provided"
if param is not None:
return self.min_aio_bytes <= (param.numel() * self.swap_element_size)
return self.min_aio_bytes <= (numel * self.swap_element_size)
def init_timers(self):
self.timer_names = set()
def log_timers(self):
if self.timer_names:
self._log_timers(list(self.timer_names), force=True)
def pre_backward(self):
self.init_timers()
def post_backward(self):
pass
def _flush_gradient_swapper(self, gradient_swapper):
if gradient_swapper.has_buffers():
self._start_timer(SWAP_OUT_GRADIENT_TIMER)
pinned_buffers = gradient_swapper.release_buffers()
self.swap_buffer_manager.free(pinned_buffers)
self._stop_timer(SWAP_OUT_GRADIENT_TIMER)
self.timer_names.add(SWAP_OUT_GRADIENT_TIMER)
self.timer_names.update(gradient_swapper.get_timer_names())
def _swap_out_gradients(self,
parameter,
gradient_offsets,
gradient_tensors,
gradient_swapper):
if not id(parameter) in self.swap_params_info.keys():
return
swap_info = self.swap_params_info[id(parameter)]
swappable_tensors = []
swappable_offsets = []
swappable_lengths = []
aligned_gradients, aligned_offsets = self._adjust_for_misaligned_lengths(
tensors=gradient_tensors,
offsets=gradient_offsets
)
self._start_timer(SWAP_OUT_GRADIENT_TIMER)
for tensor, offset in zip(aligned_gradients, aligned_offsets):
if not self.swappable_tensor(param=tensor):
swap_info.unswapped_gradients[offset] = tensor
continue
swappable_tensors.append(tensor)
swappable_offsets.append(offset)
swappable_lengths.append(tensor.numel())
if len(swappable_tensors) > 0:
if not gradient_swapper.has_buffers():
pinned_buffers = self.swap_buffer_manager.allocate_all(
num_elems=self.largest_numel,
dtype=self.dtype)
gradient_swapper.add_buffers(pinned_buffers)
swappable_paths = swap_info.get_or_create_gradient_paths(
swappable_offsets,
swappable_lengths)
gradient_swapper.swap_out_tensors(tensor_list=swappable_tensors,
path_list=swappable_paths)
self._stop_timer(SWAP_OUT_GRADIENT_TIMER)
self.timer_names.add(SWAP_OUT_GRADIENT_TIMER)
def _initialize_from_swapped_fp16_params(self,
aio_handle,
fp16_partitions_info,
fp16_num_elems,
fp16_pinned_buffers,
fp32_parameters):
assert len(fp32_parameters) == len(fp16_partitions_info)
assert len(fp32_parameters) == len(fp16_num_elems)
assert all([buffer.is_pinned() for buffer in fp16_pinned_buffers])
fp32_swap_paths = self._get_swap_paths(parameters=fp32_parameters,
num_elems=fp16_num_elems)
fp32_pinned_buffers = self.swap_buffer_manager.allocate_all(
num_elems=self.largest_numel,
dtype=self.dtype)
fp16_buffer_numel = [buf.numel() for buf in fp16_pinned_buffers]
assert all([numel >= self.largest_numel for numel in fp16_buffer_numel]), \
f"numel of fp16 buffers {fp16_buffer_numel} is too small for initializing fp32 params {self.largest_numel}"
fp32_swap_buffers = SwapBufferPool(fp32_pinned_buffers)
fp16_swap_buffers = SwapBufferPool(fp16_pinned_buffers)
curr_index = 0
while curr_index < len(fp32_parameters):
fp16_pinned_tensors = self._swap_in_fp16_params(
aio_handle=aio_handle,
fp16_num_elems=fp16_num_elems[curr_index:],
fp16_partitions_info=fp16_partitions_info[curr_index:],
fp16_swap_buffers=fp16_swap_buffers)
if torch.distributed.get_rank() == 0 and SWAPPER_DEBUG_MODE:
for i, tensor in enumerate(fp16_pinned_tensors):
true_index = curr_index + i
logger.info(
f'swap_in_fp16_param: fp32_id = {id(fp32_parameters[true_index])} index = {true_index} orig_num_elem = {fp16_num_elems[true_index]}, swap_num_elem = {fp16_pinned_tensors[i].numel()}'
)
swap_out_count = self._swap_out_fp16_params(
aio_handle=aio_handle,
fp32_swap_paths=fp32_swap_paths[curr_index:],
fp32_swap_buffers=fp32_swap_buffers,
fp16_pinned_tensors=fp16_pinned_tensors)
assert swap_out_count == len(fp16_pinned_tensors), \
f"{swap_out_count} does not match {len(fp16_pinned_tensors)}"
fp16_swap_buffers.reset()
fp32_swap_buffers.reset()
curr_index += swap_out_count
self.swap_buffer_manager.free(fp32_pinned_buffers)
def _swap_in_fp16_params(self,
aio_handle,
fp16_num_elems,
fp16_partitions_info,
fp16_swap_buffers):
assert len(fp16_num_elems) > 0
swapped_fp16_tensors = []
swap_tensors = []
swap_paths = []
unswapped_srcs = []
unswapped_dsts = []
for i, numel in enumerate(fp16_num_elems):
pinned_tensor, _ = fp16_swap_buffers.allocate_tensor(numel, None, numel)
if pinned_tensor is None:
break
swapped_fp16_tensors.append(pinned_tensor)
offset = 0
for tensor, partition_numel, partition_path in fp16_partitions_info[i]:
dst_tensor = pinned_tensor.narrow(0, offset, partition_numel)
if partition_path is None:
unswapped_srcs.append(tensor)
unswapped_dsts.append(dst_tensor)
else:
swap_paths.append(partition_path)
swap_tensors.append(dst_tensor)
offset += partition_numel
assert len(swapped_fp16_tensors) + len(unswapped_srcs) > 0
ret = swap_in_tensors(aio_handle, swap_tensors, swap_paths)
for src, dst in zip(unswapped_srcs, unswapped_dsts):
dst.data.copy_(src.data)
assert len(swap_tensors) == aio_handle.wait()
return swapped_fp16_tensors
def _swap_out_fp16_params(self,
aio_handle,
fp32_swap_paths,
fp32_swap_buffers,
fp16_pinned_tensors):
assert len(fp16_pinned_tensors) <= len(fp32_swap_paths)
swap_out_count = 0
for i, fp16_tensor in enumerate(fp16_pinned_tensors):
if not fp32_swap_buffers.has_space(fp16_tensor.numel()):
fp32_swap_buffers.swap_out(aio_handle)
fp32_swap_buffers.reset()
pinned_tensor, _ = fp32_swap_buffers.insert_tensor(
fp16_tensor,
fp32_swap_paths[i],
self._io_aligned_numel(fp16_tensor.numel())
)
assert pinned_tensor is not None
swap_out_count += 1
if len(fp32_swap_buffers.get_swap_tensors()) > 0:
fp32_swap_buffers.swap_out(aio_handle)
return swap_out_count
def _initialize_parameters(self, parameters, src_tensors, aio_handle):
assert len(parameters) == len(src_tensors)
swap_paths = self._get_swap_paths(parameters=parameters,
num_elems=[src.numel() for src in src_tensors])
SWAP_INIT_TIMER = "swap_init_write"
self._start_timer(SWAP_INIT_TIMER)
pinned_buffers = self.swap_buffer_manager.allocate_all(
num_elems=self.largest_numel,
dtype=self.dtype)
assert pinned_buffers is not None
self._swap_out_unpinned_tensors(aio_handle=aio_handle,
unpinned_tensors=src_tensors,
dest_paths=swap_paths,
pinned_buffers=pinned_buffers)
if torch.distributed.get_rank() == 0 and SWAPPER_DEBUG_MODE:
for i, tensor in enumerate(src_tensors):
logger.info(
f'copy_in_fp16_param: fp32_id = {id(parameters[i])} index = {i}, swap_num_elem = {src_tensors[i].numel()}'
)
self.swap_buffer_manager.free(pinned_buffers)
self._stop_timer(SWAP_INIT_TIMER)
self._log_timers([SWAP_INIT_TIMER])
def _get_swap_paths(self, parameters, num_elems):
swap_info_list = [
self._create_param_swap_info(parameter=p,
numel=numel) \
for p, numel in zip(parameters, num_elems)
]
assert len(swap_info_list) == len(num_elems)
swap_paths = [info.swap_paths[0] for info in swap_info_list]
return swap_paths
def _swap_out_unpinned_tensors(self,
aio_handle,
unpinned_tensors,
dest_paths,
pinned_buffers):
swap_buffer_count = len(pinned_buffers)
unpinned_tensor_count = len(unpinned_tensors)
for i in range(0, unpinned_tensor_count, swap_buffer_count):
swap_tensor_count = min((unpinned_tensor_count - i), swap_buffer_count)
src_tensors = unpinned_tensors[i:(i + swap_tensor_count)]
compute_lengths = [t.numel() for t in src_tensors]
compute_buffers = get_sized_buffers(pinned_buffers, compute_lengths)
for dst, src in zip(compute_buffers, src_tensors):
dst.data.copy_(src.data)
swap_lengths = [self._io_aligned_numel(t.numel()) for t in src_tensors]
swap_buffers = get_sized_buffers(pinned_buffers, swap_lengths)
swap_paths = dest_paths[i:(i + swap_tensor_count)]
swap_out_tensors(aio_handle, swap_buffers, swap_paths)
assert aio_handle.wait() == swap_tensor_count
def _adjust_for_misaligned_lengths(self, tensors, offsets):
new_tensors = []
new_offsets = []
for orig_tensor, orig_offset in zip(tensors, offsets):
if not self.swappable_tensor(param=orig_tensor):
new_tensors.append(orig_tensor)
new_offsets.append(orig_offset)
continue
remainder = orig_tensor.numel() % self.numel_alignment
if remainder == 0:
new_tensors.append(orig_tensor)
new_offsets.append(orig_offset)
continue
# Split into two by making remainder a tensor
aligned_length = (orig_tensor.numel() //
self.numel_alignment) * self.numel_alignment
new_tensors.append(orig_tensor.narrow(0, 0, aligned_length))
new_offsets.append(orig_offset)
# remainder tensor
new_tensors.append(orig_tensor.narrow(0, aligned_length, remainder))
new_offsets.append(orig_offset + aligned_length)
return new_tensors, new_offsets
def _retrieve_unswapped_grad_partitions(self, swap_info, dest_buffer):
UNSWAPPED_READ_GRADIENTS = 'unswapped_read_gradients'
self._start_timer(UNSWAPPED_READ_GRADIENTS)
tensor_count = len(swap_info.unswapped_gradients)
num_elem_count = swap_info.read_unswapped_gradients(dest_buffer)
self._stop_timer(UNSWAPPED_READ_GRADIENTS)
self._log_timers([UNSWAPPED_READ_GRADIENTS])
# It shoud be safe to discard unswapped gradient partitions
swap_info.release_unswapped_gradients()
if SWAPPER_DEBUG_MODE:
logger.info(
f'optimizer_retreive_unswapped_radients: param={swap_info.param_id} tensor_count={tensor_count} elem_count={num_elem_count}'
)
def _get_state_tensors(self, parameter):
if not parameter in self.optimizer.state:
return []
tensor_list = []
for value in self.optimizer.state[parameter].values():
if torch.is_tensor(value):
tensor_list.append(value)
return tensor_list
def _update_param_state_info(self, swap_info, parameter):
if not swap_info.has_state_tensors:
state_tensors = self._get_state_tensors(parameter)
if state_tensors:
swap_info.add_state_tensors(state_tensors)
def _create_param_swap_info(self, parameter, numel):
param_id = id(parameter)
assert not param_id in self.swap_params_info
self.swap_params_info[param_id] = OptimizerStateSwapInfo(
parameter=parameter,
numel=numel,
base_folder=self.swap_folder)
swap_info = self.swap_params_info[param_id]
self._update_param_state_info(swap_info, parameter)
return swap_info
def _get_param_swap_info(self, parameter):
param_id = id(parameter)
swap_info = self.swap_params_info.get(param_id, None)
if swap_info is not None:
self._update_param_state_info(swap_info, parameter)
return swap_info
def _start_timer(self, name):
if self.timers:
self.timers(name).start()
def _stop_timer(self, name):
if self.timers:
self.timers(name).stop()
def _log_timers(self, name_list, force=False):
if self.timers and (SWAPPER_DEBUG_MODE or force):
self.timers.log(name_list)
def _io_aligned_numel(self, numel):
remainder = numel % self.numel_alignment
return numel if remainder == 0 else (numel + self.numel_alignment - remainder)
```
#### File: runtime/swap_tensor/pipelined_optimizer_swapper.py
```python
import os
import torch
from deepspeed.utils.logging import logger
from deepspeed.ops.aio import AsyncIOBuilder
from deepspeed.runtime.zero.offload_constants import *
from deepspeed.runtime.swap_tensor.constants import *
from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, print_object, \
MIN_AIO_BYTES, AIO_ALIGNED_BYTES
from deepspeed.runtime.swap_tensor.async_swapper import AsyncTensorSwapper
from deepspeed.runtime.swap_tensor.optimizer_utils import SwapBufferManager, get_sized_buffer
from deepspeed.runtime.swap_tensor.optimizer_utils import OptimizerSwapper
class OptimizerSwapOp(object):
def __init__(self,
aio_handle,
read_op,
param_info,
allocated_buffers,
state_buffers,
num_ops):
self.aio_handle = aio_handle
self.read_op = read_op
self.param_info = param_info
self.allocated_buffers = allocated_buffers
self.state_buffers = state_buffers
self.wait_required = True
self.num_ops = num_ops
def is_parameter(self, parameter):
return id(parameter) == self.param_info.param_id
def wait(self):
assert self.wait_required
assert self.aio_handle.wait() == self.num_ops
self.wait_required = False
SYNC_SWAP_IN = 'sync_swap_in'
ASYNC_SWAP_IN = 'async_swap_in'
SYNC_SWAP_OUT = 'sync_swap_out'
ASYNC_SWAP_OUT = 'async_swap_out'
SWAP_IN_STATE_TIMER = 'swap_in_state'
SWAP_OUT_STATE_TIMER = 'swap_out_state'
SWAP_OUT_GRADIENT_TIMER = 'swap_out_gradient'
ASYNC_SWAP_IN_STATE_TIMER = "async_swap_in_state"
ASYNC_SWAP_OUT_STATE_TIMER = 'async_swap_out_state'
class PipelinedOptimizerSwapper(OptimizerSwapper):
def __init__(self,
swap_config,
aio_config,
base_folder,
optimizer,
largest_numel,
device,
dtype,
timers):
super(PipelinedOptimizerSwapper,
self).__init__(swap_config,
aio_config,
base_folder,
optimizer,
largest_numel,
device,
dtype,
timers)
aio_op = AsyncIOBuilder().load()
self.write_aio_handle = aio_op.aio_handle(aio_config[AIO_BLOCK_SIZE],
aio_config[AIO_QUEUE_DEPTH],
aio_config[AIO_SINGLE_SUBMIT],
aio_config[AIO_OVERLAP_EVENTS],
aio_config[AIO_THREAD_COUNT])
self.read_aio_handle = aio_op.aio_handle(aio_config[AIO_BLOCK_SIZE],
aio_config[AIO_QUEUE_DEPTH],
aio_config[AIO_SINGLE_SUBMIT],
aio_config[AIO_OVERLAP_EVENTS],
aio_config[AIO_THREAD_COUNT])
# Overlap gradient swap out
self.gradient_swapper = AsyncTensorSwapper(aio_handle=self.write_aio_handle,
numel_alignment=self.numel_alignment,
timers=self.timers)
self.async_swap_in = swap_config[OFFLOAD_OPTIMIZER_PIPELINE_READ]
self.async_swap_out = swap_config[OFFLOAD_OPTIMIZER_PIPELINE_WRITE]
self.swap_ops = {
SYNC_SWAP_IN: None,
ASYNC_SWAP_IN: None,
SYNC_SWAP_OUT: None,
ASYNC_SWAP_OUT: None
}
self.print_exclude_list += [
'gradient_swapper',
'read_aio_handle',
'write_aio_handle',
'swap_ops',
'print_exclude_list'
]
if torch.distributed.get_rank() == 0:
print_object(obj=self,
name='PipelinedOptimizerSwapper',
exclude_list=self.print_exclude_list)
def initialize_parameters(self, parameters, src_tensors):
self._initialize_parameters(parameters=parameters,
src_tensors=src_tensors,
aio_handle=self.write_aio_handle)
def initialize_from_swapped_fp16_params(self,
fp16_partitions_info,
fp16_num_elems,
fp16_pinned_buffers,
fp32_parameters):
self._initialize_from_swapped_fp16_params(
aio_handle=self.write_aio_handle,
fp16_partitions_info=fp16_partitions_info,
fp16_num_elems=fp16_num_elems,
fp16_pinned_buffers=fp16_pinned_buffers,
fp32_parameters=fp32_parameters)
def flush_gradients(self):
self._flush_gradient_swapper(self.gradient_swapper)
def swap_in_optimizer_state(self, parameter, async_parameter):
assert parameter is not None
assert self.swap_ops[SYNC_SWAP_IN] is None
self._flush_gradient_swapper(self.gradient_swapper)
self._start_timer(SWAP_IN_STATE_TIMER)
if self.swap_ops[ASYNC_SWAP_IN]:
assert self.swap_ops[ASYNC_SWAP_IN].is_parameter(parameter)
self.swap_ops[SYNC_SWAP_IN] = self.swap_ops[ASYNC_SWAP_IN]
self.swap_ops[ASYNC_SWAP_IN] = None
else:
self.swap_ops[SYNC_SWAP_IN] = self._swap_in_optimizer_state(
aio_handle=self.read_aio_handle,
parameter=parameter)
if self.swap_ops[SYNC_SWAP_IN]:
self.swap_ops[SYNC_SWAP_IN].wait()
if self.async_swap_in and async_parameter is not None:
assert self.swap_ops[ASYNC_SWAP_IN] is None
self.swap_ops[ASYNC_SWAP_IN] = self._swap_in_optimizer_state(
aio_handle=self.read_aio_handle,
parameter=async_parameter)
self._stop_timer(SWAP_IN_STATE_TIMER)
self.timer_names.add(SWAP_IN_STATE_TIMER)
def swap_out_optimizer_state(self, parameter, async_swap):
self._start_timer(SWAP_OUT_STATE_TIMER)
if self.swap_ops[ASYNC_SWAP_OUT]:
self._start_timer(ASYNC_SWAP_OUT_STATE_TIMER)
self._complete_swap_out(ASYNC_SWAP_OUT)
self._stop_timer(ASYNC_SWAP_OUT_STATE_TIMER)
self.timer_names.add(ASYNC_SWAP_OUT_STATE_TIMER)
assert self.swap_ops[SYNC_SWAP_IN] is not None
assert not self.swap_ops[SYNC_SWAP_IN].wait_required
swap_op = self._swap_out_optimizer_state(aio_handle=self.write_aio_handle,
parameter=parameter,
swap_in_op=self.swap_ops[SYNC_SWAP_IN])
self.swap_ops[SYNC_SWAP_IN] = None
if self.async_swap_out and async_swap:
self.swap_ops[ASYNC_SWAP_OUT] = swap_op
else:
self.swap_ops[SYNC_SWAP_OUT] = swap_op
self._complete_swap_out(SYNC_SWAP_OUT)
self._stop_timer(SWAP_OUT_STATE_TIMER)
self.timer_names.add(SWAP_OUT_STATE_TIMER)
def swap_out_gradients(self, parameter, gradient_offsets, gradient_tensors):
self._swap_out_gradients(parameter=parameter,
gradient_offsets=gradient_offsets,
gradient_tensors=gradient_tensors,
gradient_swapper=self.gradient_swapper)
def _complete_swap_out(self, swap_out_type):
self.swap_ops[swap_out_type].wait()
self.swap_buffer_manager.free(self.swap_ops[swap_out_type].allocated_buffers)
self.swap_ops[swap_out_type] = None
def _swap_out_optimizer_state(self, aio_handle, parameter, swap_in_op):
assert swap_in_op.is_parameter(parameter)
allocated_buffers = swap_in_op.allocated_buffers.copy()
swap_buffers = swap_in_op.state_buffers.copy()
param_info = swap_in_op.param_info
self._update_param_state_info(param_info, parameter)
unpinned_tensors = param_info.get_unpinned_state_tensors()
if len(unpinned_tensors) > 0:
new_alloc_buffers = self.swap_buffer_manager.allocate(
num_elems=self._io_aligned_numel(param_info.numel()),
count=len(unpinned_tensors),
dtype=param_info.dtype())
assert new_alloc_buffers is not None
allocated_buffers += new_alloc_buffers
swap_buffers += new_alloc_buffers
for pinned_dst, unpinned_src in zip(new_alloc_buffers, unpinned_tensors):
dst = get_sized_buffer(pinned_dst, unpinned_src.numel())
dst.data.copy_(unpinned_src.data)
swap_paths = param_info.swap_paths.copy()
assert len(swap_paths) == len(swap_buffers)
swap_out_tensors(aio_handle, swap_buffers, swap_paths)
swap_out_op = OptimizerSwapOp(aio_handle=aio_handle,
param_info=param_info,
read_op=False,
allocated_buffers=allocated_buffers,
state_buffers=swap_buffers,
num_ops=len(swap_buffers))
return swap_out_op
def _swap_in_optimizer_state(self, aio_handle, parameter):
param_info = self._get_param_swap_info(parameter)
if param_info is None:
return None
required_buffer_count = len(
param_info.tensors) + (1 if param_info.has_gradients() else 0)
aligned_numel = self._io_aligned_numel(param_info.numel())
allocated_buffers = self.swap_buffer_manager.allocate(
num_elems=aligned_numel,
count=required_buffer_count,
dtype=parameter.dtype)
assert allocated_buffers is not None, \
f"PipelinedOptimizerSwapper ran out of swap buffers, try increasing {OFFLOAD_OPTIMIZER_BUFFER_COUNT}"
state_buffers = allocated_buffers[:len(param_info.tensors)]
param_info.set_swap_buffers(state_buffers)
swap_buffers = state_buffers.copy()
swap_paths = param_info.swap_paths.copy()
if param_info.has_gradients():
parameter.grad = allocated_buffers[-1].narrow(0, 0, param_info.numel())
if param_info.swapped_gradients:
swap_buffers += param_info.get_swap_gradient_buffers(parameter.grad)
swap_paths += param_info.get_swap_gradient_paths()
swap_in_tensors(aio_handle, swap_buffers, swap_paths)
if param_info.unswapped_gradients:
self._retrieve_unswapped_grad_partitions(swap_info=param_info,
dest_buffer=parameter.grad)
swap_in_op = OptimizerSwapOp(aio_handle=aio_handle,
param_info=param_info,
read_op=True,
allocated_buffers=allocated_buffers,
state_buffers=state_buffers,
num_ops=len(swap_buffers))
return swap_in_op
```
|
{
"source": "jglaser/hoomd-blue",
"score": 2
}
|
#### File: hoomd-blue/hoomd/communicator.py
```python
from hoomd import _hoomd
import hoomd
import contextlib
class decomposition(object):
""" Set the domain decomposition.
Args:
x (list): First nx-1 fractional domain widths (if *nx* is None)
y (list): First ny-1 fractional domain widths (if *ny* is None)
z (list): First nz-1 fractional domain widths (if *nz* is None)
nx (int): Number of processors to uniformly space in x dimension (if *x* is None)
ny (int): Number of processors to uniformly space in y dimension (if *y* is None)
nz (int): Number of processors to uniformly space in z dimension (if *z* is None)
linear (bool): (MPI only) Force a slab (1D) decomposition along the z-direction
onelevel (bool): (MPI only) Disable node-local (two-level) domain decomposition
A single domain decomposition is defined for the simulation.
A standard domain decomposition divides the simulation box into equal volumes along the Cartesian axes while minimizing
the surface area between domains. This works well for systems where particles are uniformly distributed and
there is equal computational load for each domain, and is the default behavior in HOOMD-blue. If no decomposition is
specified for an MPI run, a uniform decomposition is automatically constructed on initialization.
In simulations with density gradients, such as a vapor-liquid interface, there can be a considerable imbalance of
particles between different ranks. The simulation time then becomes limited by the slowest processor. It may then be
advantageous in certain systems to create domains of unequal volume, for example, by increasing the volume of less
dense regions of the simulation box in order to balance the number of particles.
The decomposition command allows the user to control the geometry and positions of the decomposition.
The fractional width of the first :math:`n_i - 1` domains is specified along each dimension, where
:math:`n_i` is the number of ranks desired along dimension :math:`i`. If no cut planes are specified, then a uniform
spacing is assumed. The number of domains with uniform spacing can also be specified. If the desired decomposition
is not commensurate with the number of ranks available (for example, a 3x3x3 decomposition when only 8 ranks are
available), then a default uniform spacing is chosen. For the best control, the user should specify the number of
ranks in each dimension even if uniform spacing is desired.
decomposition can only be called *before* the system is initialized, at which point the particles are decomposed.
An error is raised if the system is already initialized.
The decomposition can be adjusted dynamically if the best static decomposition is not known, or the system
composition is changing dynamically. For this associated command, see update.balance().
Priority is always given to specified arguments over the command line arguments. If one of these is not set but
a command line option is, then the command line option is used. Otherwise, a default decomposition is chosen.
Examples::
comm.decomposition(x=0.4, ny=2, nz=2)
comm.decomposition(nx=2, y=0.8, z=[0.2,0.3])
Warning:
The decomposition command will override specified command line options.
Warning:
This command must be invoked *before* the system is initialized because particles are decomposed at this time.
Note:
The domain size cannot be chosen arbitrarily small. There are restrictions placed on the decomposition by the
ghost layer width set by the pair potentials. An error will be raised at run time if the ghost layer width
exceeds half the shortest domain size.
Warning:
Both fractional widths and the number of processors cannot be set simultaneously, and an error will be
raised if both are set.
"""
def __init__(self, x=None, y=None, z=None, nx=None, ny=None, nz=None, linear=False, onelevel=False):
# check that the context has been initialized though
if hoomd.context.current is None:
raise RuntimeError("Cannot initialize decomposition without context.initialize() first")
# check that system is not initialized
if hoomd.context.current.system is not None:
hoomd.context.current.device.cpp_msg.error("comm.decomposition: cannot modify decomposition after system is initialized. Call before init.*\n")
raise RuntimeError("Cannot create decomposition after system is initialized. Call before init.*")
# make sure MPI is enabled if any arguments are not None
if (x or y or z or nx or ny or nz) and (not hoomd.version.mpi_enabled):
raise RuntimeError("the x, y, z, nx, ny, nz options are only available in MPI builds")
self._onelevel = onelevel # cache this for later when we can make the cpp object
# check that there are ranks available for decomposition
if hoomd.context.current.device.comm.cpp_mpi_conf == 1:
hoomd.context.current.device.cpp_msg.warning("Only 1 rank in system, ignoring decomposition to use optimized code pathways.\n")
return
else:
self.x = []
self.y = []
self.z = []
self.nx = 0
self.ny = 0
self.nz = 0
self.uniform_x = True
self.uniform_y = True
self.uniform_z = True
self.set_params(x,y,z,nx,ny,nz)
"""
# do a one time update of the cuts to the global values if a global is set
if not self.x and self.nx == 0 and hoomd.context.options.nx is not None:
self.nx = hoomd.context.options.nx
self.uniform_x = True
if not self.y and self.ny == 0 and hoomd.context.options.ny is not None:
self.ny = hoomd.context.options.ny
self.uniform_y = True
if not self.z and self.nz == 0:
if linear:
self.nz = hoomd.context.current.device.cpp_mpi_conf.getNRanks()
self.uniform_z = True
elif hoomd.context.options.nz is not None:
self.nz = hoomd.context.options.nz
self.uniform_z = True
"""
# set the global decomposition to this class
if hoomd.context.current.decomposition is not None:
hoomd.context.current.device.cpp_msg.warning("comm.decomposition: overriding currently defined domain decomposition\n")
hoomd.context.current.decomposition = self
def set_params(self,x=None,y=None,z=None,nx=None,ny=None,nz=None):
"""Set parameters for the decomposition before initialization.
Args:
x (list): First nx-1 fractional domain widths (if *nx* is None)
y (list): First ny-1 fractional domain widths (if *ny* is None)
z (list): First nz-1 fractional domain widths (if *nz* is None)
nx (int): Number of processors to uniformly space in x dimension (if *x* is None)
ny (int): Number of processors to uniformly space in y dimension (if *y* is None)
nz (int): Number of processors to uniformly space in z dimension (if *z* is None)
Examples::
decomposition.set_params(x=[0.2])
decomposition.set_params(nx=1, y=[0.3,0.4], nz=2)
"""
if (x is not None and nx is not None) or (y is not None and ny is not None) or (z is not None and nz is not None):
hoomd.context.current.device.cpp_msg.error("comm.decomposition: cannot set fractions and number of processors simultaneously\n")
raise RuntimeError("Cannot set fractions and number of processors simultaneously")
# if x is set, use it. otherwise, if nx is set, compute x and set it
if x is not None:
# recast single floats as lists that can be iterated, this is the only single input we should expect
if isinstance(x, float):
self.x = [x]
else:
self.x = x
self.uniform_x = False
elif nx is not None:
self.nx = nx
self.uniform_x = True
# do the same in y
if y is not None:
if isinstance(y, float):
self.y = [y]
else:
self.y = y
self.uniform_y = False
elif ny is not None:
self.ny = ny
self.uniform_y = True
# do the same in z (but also use the linear command line option if it is present, which supersedes nz)
if z is not None:
if isinstance(z, float):
self.z = [z]
else:
self.z = z
self.uniform_z = False
elif nz is not None:
self.nz = nz
self.uniform_z = True
## \internal
# \brief Delayed construction of the C++ object for this balanced decomposition
# \param box Global simulation box for decomposition
def _make_cpp_decomposition(self, box):
# if the box is uniform in all directions, just use these values
if self.uniform_x and self.uniform_y and self.uniform_z:
self.cpp_dd = _hoomd.DomainDecomposition(hoomd.context.current.device.cpp_exec_conf, box.getL(), self.nx, self.ny, self.nz, not self._onelevel)
return self.cpp_dd
# otherwise, make the fractional decomposition
try:
fxs = _hoomd.std_vector_scalar()
fys = _hoomd.std_vector_scalar()
fzs = _hoomd.std_vector_scalar()
# if uniform, correct the fractions to be uniform as well
if self.uniform_x and self.nx > 0:
self.x = [1.0/self.nx]*(self.nx-1)
if self.uniform_y and self.ny > 0:
self.y = [1.0/self.ny]*(self.ny-1)
if self.uniform_z and self.nz > 0:
self.z = [1.0/self.nz]*(self.nz-1)
sum_x = sum_y = sum_z = 0.0
tol = 1.0e-5
for i in self.x:
if i <= -tol or i >= 1.0 - tol:
hoomd.context.current.device.cpp_msg.error("comm.decomposition: fraction must be between 0.0 and 1.0\n")
raise RuntimeError("Fractional decomposition must be between 0.0 and 1.0")
fxs.append(i)
sum_x += i
if sum_x >= 1.0 - tol or sum_x <= -tol:
hoomd.context.current.device.cpp_msg.error("comm.decomposition: fraction must be between 0.0 and 1.0\n")
raise RuntimeError("Sum of decomposition in x must lie between 0.0 and 1.0")
for i in self.y:
if i <= -tol or i >= 1.0 - tol:
hoomd.context.current.device.cpp_msg.error("comm.decomposition: fraction must be between 0.0 and 1.0\n")
raise RuntimeError("Fractional decomposition must be between 0.0 and 1.0")
fys.append(i)
sum_y += i
if sum_y >= 1.0 - tol or sum_y <= -tol:
hoomd.context.current.device.cpp_msg.error("comm.decomposition: fraction must be between 0.0 and 1.0\n")
raise RuntimeError("Sum of decomposition in y must lie between 0.0 and 1.0")
for i in self.z:
if i <= -tol or i >= 1.0 - tol:
hoomd.context.current.device.cpp_msg.error("comm.decomposition: fraction must be between 0.0 and 1.0\n")
raise RuntimeError("Fractional decomposition must be between 0.0 and 1.0")
fzs.append(i)
sum_z += i
if sum_z >= 1.0 - tol or sum_z <= -tol:
hoomd.context.current.device.cpp_msg.error("comm.decomposition: fraction must be between 0.0 and 1.0\n")
raise RuntimeError("Sum of decomposition in z must lie between 0.0 and 1.0")
self.cpp_dd = _hoomd.DomainDecomposition(hoomd.context.current.device.cpp_exec_conf, box.getL(), fxs, fys, fzs)
return self.cpp_dd
except TypeError as te:
hoomd.context.current.device.cpp_msg.error("Fractional cuts must be iterable (list, tuple, etc.)\n")
raise te
class Communicator(object):
"""
MPI communicator
Args:
mpi_comm: Accepts an mpi4py communicator. Use this argument to perform many independent hoomd simulations
where you communicate between those simulations using your own mpi4py code.
nrank (int): (MPI) Number of ranks to include in a partition
"""
def __init__(self, mpi_comm=None, nrank=None):
# check nrank
if nrank is not None:
if not hoomd.version.mpi_enabled:
raise RuntimeError("The nrank option is only available in MPI builds.\n")
mpi_available = hoomd.version.mpi_enabled;
self.cpp_mpi_conf = None
# create the specified configuration
if mpi_comm is None:
self.cpp_mpi_conf = _hoomd.MPIConfiguration();
else:
if not mpi_available:
raise RuntimeError("mpi_comm is not supported in serial builds");
handled = False;
# pass in pointer to MPI_Comm object provided by mpi4py
try:
import mpi4py
if isinstance(mpi_comm, mpi4py.MPI.Comm):
addr = mpi4py.MPI._addressof(mpi_comm);
self.cpp_mpi_conf = _hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(addr);
handled = True
except ImportError:
# silently ignore when mpi4py is missing
pass
# undocumented case: handle plain integers as pointers to MPI_Comm objects
if not handled and isinstance(mpi_comm, int):
self.cpp_mpi_conf = _hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(mpi_comm);
handled = True
if not handled:
raise RuntimeError("Invalid mpi_comm object: {}".format(mpi_comm));
if nrank is not None:
# check validity
if (self.cpp_mpi_conf.getNRanksGlobal() % nrank):
raise RuntimeError('Total number of ranks is not a multiple of --nrank');
# split the communicator into partitions
self.cpp_mpi_conf.splitPartitions(nrank)
@property
def num_ranks(self):
""" Get the number of ranks in this partition.
Returns:
The number of MPI ranks in this partition.
Note:
Returns 1 in non-mpi builds.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getNRanks();
else:
return 1;
@property
def rank(self):
""" Get the current rank.
Returns:
Index of the current rank in this partition.
Note:
Always returns 0 in non-mpi builds.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getRank()
else:
return 0;
@property
def partition(self):
""" Get the current partition index.
Returns:
Index of the current partition.
Note:
Always returns 0 in non-mpi builds.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getPartition()
else:
return 0;
def barrier_all(self):
""" Perform a MPI barrier synchronization across the whole MPI run.
Note:
Does nothing in in non-MPI builds.
"""
if hoomd.version.mpi_enabled:
_hoomd.mpi_barrier_world();
def barrier(self):
""" Perform a MPI barrier synchronization across all ranks in the partition.
Note:
Does nothing in in non-MPI builds.
"""
if hoomd.version.mpi_enabled:
self.cpp_mpi_conf.barrier()
@contextlib.contextmanager
def localize_abort(self):
""" Localize MPI_Abort to this partition.
HOOMD calls MPI_Abort to tear down all running MPI processes whenever
there is an uncaught exception. By default, this will abort the entire
MPI execution. When using partitions (``nrank is not None``), an uncaught
exception on one partition will therefore abort all of them.
Use the return value of :py:meth:`localize_abort()` as a context manager
to tell HOOMD that all operations within the context will use only
that MPI communicator so that an uncaught exception in one partition
will only abort that partition and leave the others running.
"""
global _current_communicator
prev = _current_communicator
_current_communicator = self
yield None
_current_communicator = prev
# store the "current" communicator to be used for MPI_Abort calls. This defaults
# to the world communicator, but users can opt in to a more specific
# communicator using the Device.localize_abort context manager
_current_communicator = Communicator()
```
#### File: hoomd-blue/hoomd/conftest.py
```python
import pytest
import hoomd
import atexit
import numpy
import itertools
from hoomd.snapshot import Snapshot
from hoomd import Simulation
devices = [hoomd.device.CPU]
if (hoomd.device.GPU.is_available()
and len(hoomd.device.GPU.get_available_devices()) > 0):
devices.append(hoomd.device.GPU)
@pytest.fixture(scope='session', params=devices)
def device(request):
"""Parameterized Device fixture.
Tests that use `device` will run once on the CPU and once on the GPU. The
device object is session scoped to avoid device creation overhead when
running tests.
"""
d = request.param()
# enable GPU error checking
if isinstance(d, hoomd.device.GPU):
d.gpu_error_checking = True
return d
@pytest.fixture(scope='session')
def simulation_factory(device):
"""Make a Simulation object from a snapshot.
TODO: duck type this to allow it to create state from GSD files as well
"""
def make_simulation(snapshot=None):
sim = Simulation(device)
# reduce sorter grid to avoid Hilbert curve overhead in unit tests
for tuner in sim.operations.tuners:
if isinstance(tuner, hoomd.tune.ParticleSorter):
tuner.grid = 8
if (snapshot is not None):
sim.create_state_from_snapshot(snapshot)
return sim
return make_simulation
@pytest.fixture(scope='session')
def two_particle_snapshot_factory(device):
"""Make a snapshot with two particles.
Args:
particle_types: List of particle type names
dimensions: Number of dimensions (2 or 3)
d: Distance apart to place particles
L: Box length
The two particles are placed at (-d/2, 0, 0) and (d/2,0,0). When,
dimensions==3, the box is L by L by L. When dimensions==2, the box is L by L
by 1.
"""
def make_snapshot(particle_types=['A'], dimensions=3, d=1, L=20):
s = Snapshot(device.communicator)
N = 2
if s.exists:
box = [L, L, L, 0, 0, 0]
if dimensions == 2:
box[2] = 0
s.configuration.box = box
s.particles.N = N
# shift particle positions slightly in z so MPI tests pass
s.particles.position[:] = [[-d / 2, 0, .1], [d / 2, 0, .1]]
s.particles.types = particle_types
if dimensions == 2:
box[2] = 0
s.particles.position[:] = [[-d / 2, 0.1, 0], [d / 2, 0.1, 0]]
return s
return make_snapshot
@pytest.fixture(scope='session')
def lattice_snapshot_factory(device):
"""Make a snapshot with particles on a cubic/square lattice.
Args:
particle_types: List of particle type names
dimensions: Number of dimensions (2 or 3)
a: Lattice constant
n: Number of particles along each box edge
r: Fraction of `a` to randomly perturb particles
Place particles on a simple cubic (dimensions==3) or square (dimensions==2)
lattice. The box is cubic (or square) with a side length of `n * a`.
Set `r` to randomly perturb particles a small amount off their lattice
positions. This is useful in MD simulation testing so that forces do not
cancel out by symmetry.
"""
def make_snapshot(particle_types=['A'], dimensions=3, a=1, n=7, r=0):
s = Snapshot(device.communicator)
if s.exists:
box = [n * a, n * a, n * a, 0, 0, 0]
if dimensions == 2:
box[2] = 0
s.configuration.box = box
s.particles.N = n**dimensions
s.particles.types = particle_types
# create the lattice
range_ = numpy.arange(-n / 2, n / 2)
if dimensions == 2:
pos = list(itertools.product(range_, range_, [0]))
else:
pos = list(itertools.product(range_, repeat=3))
pos = numpy.array(pos) * a
pos[:, 0] += a / 2
pos[:, 1] += a / 2
if dimensions == 3:
pos[:, 2] += a / 2
# perturb the positions
if r > 0:
shift = numpy.random.uniform(-r, r, size=(s.particles.N, 3))
if dimensions == 2:
shift[:, 2] = 0
pos += shift
s.particles.position[:] = pos
return s
return make_snapshot
@pytest.fixture(autouse=True)
def skip_mpi(request):
if request.node.get_closest_marker('serial'):
if 'device' in request.fixturenames:
if request.getfixturevalue('device').communicator.num_ranks > 1:
pytest.skip('Test does not support MPI execution')
else:
raise ValueError('skip_mpi requires the *device* fixture')
@pytest.fixture(autouse=True)
def only_gpu(request):
if request.node.get_closest_marker('gpu'):
if 'device' in request.fixturenames:
if not isinstance(request.getfixturevalue('device'),
hoomd.device.GPU):
pytest.skip('Test is run only on GPU(s).')
else:
raise ValueError('only_gpu requires the *device* fixture')
@pytest.fixture(autouse=True)
def only_cpu(request):
if request.node.get_closest_marker('cpu'):
if 'device' in request.fixturenames:
if not isinstance(request.getfixturevalue('device'),
hoomd.device.CPU):
pytest.skip('Test is run only on CPU(s).')
else:
raise ValueError('only_cpu requires the *device* fixture')
@pytest.fixture(scope='function', autouse=True)
def numpy_random_seed():
"""Seed the numpy random number generator.
Automatically reset the numpy random seed at the start of each function
for reproducible tests.
"""
numpy.random.seed(42)
def pytest_addoption(parser):
"""Add HOOMD specific options to the pytest command line.
* validate - run validation tests
"""
parser.addoption(
"--validate",
action="store_true",
default=False,
help="Enable long running validation tests.",
)
@pytest.fixture(autouse=True)
def skip_validate(request):
"""Skip validation tests by default.
Pass the command line option --validate to enable these tests.
"""
if request.node.get_closest_marker('validate'):
if not request.config.getoption("validate"):
pytest.skip('Validation tests not requested.')
def pytest_configure(config):
config.addinivalue_line(
"markers",
"serial: Tests that will not execute with more than 1 MPI process")
config.addinivalue_line("markers",
"gpu: Tests that should only run on the gpu.")
config.addinivalue_line(
"markers",
"cupy_optional: tests that should pass with and without CuPy.")
config.addinivalue_line(
"markers",
"validate: Tests that perform long-running validations.")
config.addinivalue_line("markers", "cpu: Tests that only run on the CPU.")
config.addinivalue_line("markers", "gpu: Tests that only run on the GPU.")
def abort(exitstatus):
# get a default mpi communicator
communicator = hoomd.communicator.Communicator()
# abort the deadlocked ranks
hoomd._hoomd.abort_mpi(communicator.cpp_mpi_conf, exitstatus)
def pytest_sessionfinish(session, exitstatus):
""" Finalize pytest session
MPI tests may fail on one rank but not others. To prevent deadlocks in these
situations, this code calls ``MPI_Abort`` when pytest is exiting with a
non-zero exit code. **pytest** should be run with the ``-x`` option so that
it exits on the first error.
"""
if exitstatus != 0 and hoomd.version.mpi_enabled:
atexit.register(abort, exitstatus)
def logging_check(cls, expected_namespace, expected_loggables):
"""Function for testing object logging specification.
Args:
cls (object): The loggable class to test for the correct logging
specfication.
expected_namespace (tuple[str]): A tuple of strings that indicate the
expected namespace minus the class name.
expected_loggables (dict[str, dict[str, Any]]): A dict with string keys
representing the expected loggable quantities. If the value for a
key is ``None`` then, only check for the existence of the loggable
quantity. Otherwise, the inner `dict` should consist of some
combination of the keys ``default`` and ``category`` indicating the
expected value of each for the loggable.
"""
# Check namespace
assert all(log_quantity.namespace == expected_namespace + (cls.__name__,)
for log_quantity in cls._export_dict.values())
# Check specific loggables
def check_loggable(cls, name, properties):
assert name in cls._export_dict
if properties is None:
return None
log_quantity = cls._export_dict[name]
for name, prop in properties.items():
assert getattr(log_quantity, name) == prop
for name, properties in expected_loggables.items():
check_loggable(cls, name, properties)
```
#### File: hpmc/pytest/test_write_debug_data_hpmc.py
```python
import hoomd
def test_write_debug_data(simulation_factory, lattice_snapshot_factory,
tmp_path):
"""Test write_debug_data for MD simulations."""
sim = simulation_factory(lattice_snapshot_factory())
mc = hoomd.hpmc.integrate.ConvexPolyhedron()
mc.shape['A'] = dict(vertices=[
(-0.5, 0, 0),
(0.5, 0, 0),
(0, -0.5, 0),
(0, 0.5, 0),
(0, 0, -0.5),
(0, 0, 0.5),
])
sim.operations.integrator = mc
sim.write_debug_data(tmp_path / 'test_unscheduled.json')
sim.run(10)
sim.write_debug_data(tmp_path / 'test_scheduled.json')
```
#### File: hpmc/validation/lj_spheres.py
```python
from hoomd import *
from hoomd import hpmc
import numpy as np
import math
import unittest
import BlockAverage
# Reference potential energy (U/N/eps) from MC simulations
# https://mmlapps.nist.gov/srs/LJ_PURE/mc.htm
# mean_Uref = -5.5121E+00;
# sigma_Uref = 4.55E-04;
# Interaction cut-off
rcut = 3.0;
# LJ length scale
sigma = 1.0;
# Tstar = 8.50E-01;
# rho_star = 7.76E-01;
# Diameter of particles
diameter = sigma;
# linear lattice dimension
n = 8;
class nvt_lj_sphere_energy(unittest.TestCase):
def run_statepoint(self, Tstar, rho_star, mean_Uref, sigma_Uref, use_clusters, union):
"""
Tstar: Temperature (kT/eps)
rho_star: Reduced density: rhostar = (N / V) * sigma**3
mean_Uref: reference energy
sigma_Uref: standard deviation of the mean of reference energy
"""
context.initialize()
eps = 1.0 / Tstar;
# Particle volume
V_p = math.pi/6.*diameter**3.;
# lattice constant (sc)
d_eff = (V_p*6/math.pi)**(1./3.);
a = (d_eff**3.0/rho_star)**(1./3.);
system = init.create_lattice(unitcell=lattice.sc(a=a), n=n);
N = len(system.particles);
mc = hpmc.integrate.sphere(d=0.3,seed=321);
mc.shape_param.set('A',diameter=0)
lennard_jones = """
float rsq = dot(r_ij, r_ij);
float rcut = {};
if (rsq <= rcut*rcut)
{{
float sigma = {};
float eps = {};
float sigmasq = sigma*sigma;
float rsqinv = sigmasq / rsq;
float r6inv = rsqinv*rsqinv*rsqinv;
return 4.0f*eps*r6inv*(r6inv-1.0f);
}}
else
{{
return 0.0f;
}}
""".format(rcut,sigma,eps);
from hoomd import jit
if not union:
jit.patch.user(mc, r_cut=rcut, code=lennard_jones);
else:
u = jit.patch.user_union(mc,r_cut=rcut, code=lennard_jones)
u.set_params('A', positions=[(0,0,0)],typeids=[0])
log = analyze.log(filename=None, quantities=['hpmc_overlap_count','hpmc_patch_energy'],period=100,overwrite=True);
energy_val = [];
def accumulate_energy(timestep):
energy = log.query('hpmc_patch_energy') / float(N) / eps;
# apply long range correction (used in reference data)
energy += 8/9.0 * math.pi * rho_star * ((1/rcut)**9-3*(1/rcut)**3)
energy_val.append(energy);
if (timestep % 100 == 0): context.current.device.cpp_msg.notice(1,'energy = {:.5f}\n'.format(energy));
mc_tune = hpmc.util.tune(mc, tunables=['d','a'],max_val=[4,0.5],gamma=0.5,target=0.4);
for i in range(5):
run(100,quiet=True);
d = mc.get_d();
translate_acceptance = mc.get_translate_acceptance();
print('d: {:3.2f} accept: {:3.2f}'.format(d,translate_acceptance));
mc_tune.update();
# Equilibrate
run(500);
if use_clusters:
clusters = hpmc.update.clusters(mc, seed=99685)
mc.set_params(d=0, a=0); # test cluster moves alone
# Sample
if use_clusters:
run(5000,callback=accumulate_energy, callback_period=10)
else:
run(1000,callback=accumulate_energy, callback_period=10)
block = BlockAverage.BlockAverage(energy_val)
mean_U = np.mean(energy_val)
i, sigma_U = block.get_error_estimate()
context.current.device.cpp_msg.notice(1,'rho_star = {:.3f}\nU = {:.5f} +- {:.5f}\n'.format(rho_star,mean_U,sigma_U))
context.current.device.cpp_msg.notice(1,'Uref = {:.5f} +- {:.5f}\n'.format(mean_Uref,sigma_Uref))
# max error 0.5%
self.assertLessEqual(sigma_U/mean_U,0.005)
# 0.99 confidence interval
ci = 2.576
# compare if 0 is within the confidence interval around the difference of the means
sigma_diff = (sigma_U**2 + sigma_Uref**2)**(1/2.);
self.assertLessEqual(math.fabs(mean_U - mean_Uref), ci*sigma_diff)
def test_low_density_normal(self):
self.run_statepoint(Tstar=8.50E-01, rho_star=5.00E-03, mean_Uref=-5.1901E-02, sigma_Uref=7.53E-05,
use_clusters=False, union=False);
self.run_statepoint(Tstar=8.50E-01, rho_star=7.00E-03, mean_Uref=-7.2834E-02, sigma_Uref=1.34E-04,
use_clusters=False, union=False);
self.run_statepoint(Tstar=8.50E-01, rho_star=9.00E-03, mean_Uref=-9.3973E-02, sigma_Uref=1.29E-04,
use_clusters=False, union=False);
def test_low_density_union(self):
# test that the trivial union shape (a single point particle) also works
self.run_statepoint(Tstar=8.50E-01, rho_star=5.00E-03, mean_Uref=-5.1901E-02, sigma_Uref=7.53E-05,
use_clusters=False, union=True);
self.run_statepoint(Tstar=8.50E-01, rho_star=7.00E-03, mean_Uref=-7.2834E-02, sigma_Uref=1.34E-04,
use_clusters=False, union=True);
self.run_statepoint(Tstar=8.50E-01, rho_star=9.00E-03, mean_Uref=-9.3973E-02, sigma_Uref=1.29E-04,
use_clusters=False, union=True);
def test_low_density_clusters(self):
self.run_statepoint(Tstar=8.50E-01, rho_star=9.00E-03, mean_Uref=-9.3973E-02, sigma_Uref=1.29E-04,
use_clusters=True, union=False);
def test_moderate_density_normal(self):
self.run_statepoint(Tstar=9.00E-01, rho_star=7.76E-01, mean_Uref=-5.4689E+00, sigma_Uref=4.20E-04,
use_clusters=False, union=False);
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
```
#### File: hpmc/validation/spheres_implicit_repulsive.py
```python
from __future__ import division
from hoomd import *
from hoomd import hpmc
import numpy as np
import math
import unittest
context.initialize()
#seed_list=[123, 456]
seed_list = [123]
#phi_c_list=[0.01, 0.05, 0.10, 0.2, 0.3]
phi_c_list=[0.1]
#eta_p_r_list=[0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0]
eta_p_r_list=[0.4]
import itertools
params = []
params = list(itertools.product(seed_list, phi_c_list, eta_p_r_list))
context.current.device.cpp_msg.notice(1,"{} parameters\n".format(len(params)))
# choose a random state point
p = int(option.get_user()[0])
(seed, phi_c, eta_p_r) = params[p % len(params)]
context.current.device.cpp_msg.notice(1,"parameter {} seed {} phi_c {:.3f} eta_p_r {:.3f}\n".format(p,seed, phi_c, eta_p_r))
# test the equation of state for the free volume fraction of hard spheres, when simultaneously
# applying depletion with a positive and negative coefficients so that both cancel
# reference data key = (phi_c, eta_p_r), value = (alpha, error)
# 128 spheres
alpha_ref=dict()
alpha_ref[(0.1,0.4)] = (0.375450,0.000130)
# number of spheres along one dimension
n = 5
N = n**3
d_sphere = 1.0
V_sphere = math.pi/6.0*math.pow(d_sphere,3.0)
# depletant-colloid size ratio
q=1.0
L_target= math.pow(N*V_sphere/phi_c,1.0/3.0)
class depletion_test(unittest.TestCase):
def setUp(self):
# initialize random configuration
a = L_target/n
self.system = init.create_lattice(unitcell=lattice.sc(a=a), n=n);
self.system.particles.types.add('B')
self.system.particles.types.add('C')
def test_measure_etap(self):
self.mc = hpmc.integrate.sphere(seed=seed)
self.mc.set_params(d=0.1,a=0.1)
self.mc.shape_param.set('A', diameter=d_sphere)
self.mc.shape_param.set('B', diameter=d_sphere*q)
self.mc.shape_param.set('C', diameter=d_sphere*q)
self.mc_tune = hpmc.util.tune(self.mc, tunables=['d'],max_val=[d_sphere],gamma=1,target=0.2)
for i in range(10):
run(100, quiet=True)
self.mc_tune.update()
# warm up
run(2000);
# set ntrial > 0
self.mc.set_params(ntrial=2)
# set depletant fugacity
nR = eta_p_r/(math.pi/6.0*math.pow(d_sphere*q,3.0))
self.mc.set_fugacity('B',nR)
# set negative fugacity to same amount to cancel
self.mc.set_fugacity('C',-nR)
free_volume = hpmc.compute.free_volume(mc=self.mc, seed=seed, nsample=10000, test_type='B')
log=analyze.log(filename=None, quantities=['hpmc_overlap_count','volume','hpmc_free_volume'], overwrite=True,period=1000)
alpha_measure = []
def log_callback(timestep):
v = log.query('hpmc_free_volume')/log.query('volume')
alpha_measure.append(v)
self.assertEqual(log.query('hpmc_overlap_count'),0)
# if context.current.device.comm.rank == 0:
# print('alpha =', v);
run(4e5,callback=log_callback,callback_period=100)
import BlockAverage
block = BlockAverage.BlockAverage(alpha_measure)
alpha_avg = np.mean(np.array(alpha_measure))
i, alpha_err = block.get_error_estimate()
if context.current.device.comm.rank == 0:
print(i)
(n, num, err, err_err) = block.get_hierarchical_errors()
print('Hierarchical error analysis:')
for (i, num_samples, e, ee) in zip(n, num, err, err_err):
print('{0} {1} {2} {3}'.format(i,num_samples,e,ee))
if context.current.device.comm.rank == 0:
print('avg: {:.6f} +- {:.6f}'.format(alpha_avg, alpha_err))
print('tgt: {:.6f} +- {:.6f}'.format(alpha_ref[(phi_c,eta_p_r)][0], alpha_ref[(phi_c,eta_p_r)][1]))
# max error 0.5%
self.assertLessEqual(alpha_err/alpha_avg,0.005)
# confidence interval, 0.95 quantile of the normal distribution
ci = 1.96
# check against reference value within reference error + measurement error
self.assertLessEqual(math.fabs(alpha_avg-alpha_ref[(phi_c,eta_p_r)][0]),ci*(alpha_ref[(phi_c,eta_p_r)][1]+alpha_err))
def tearDown(self):
del self.system
context.initialize();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
```
#### File: hoomd/md/angle.py
```python
from hoomd import _hoomd
from hoomd.md import _md
from hoomd.md import force
from hoomd.md.force import Force
from hoomd.data.typeparam import TypeParameter
from hoomd.data.parameterdicts import TypeParameterDict
import hoomd
import math
class Angle(Force):
"""Constructs the angular bond potential.
Note:
:py:class:`Angle` is the base class for all angular potentials.
Users should not instantiate this class directly.
"""
def _attach(self):
# check that some angles are defined
if self._simulation.state._cpp_sys_def.getAngleData().getNGlobal() == 0:
self._simulation.device._cpp_msg.warning("No angles are defined.\n")
# create the c++ mirror class
if isinstance(self._simulation.device, hoomd.device.CPU):
cpp_cls = getattr(_md, self._cpp_class_name)
else:
cpp_cls = getattr(_md, self._cpp_class_name + "GPU")
self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def)
super()._attach()
class Harmonic(Angle):
R""" Harmonic angle potential.
The command angle.harmonic specifies a harmonic potential energy between
every triplet of particles with an angle specified between them.
.. math::
V(\theta) = \frac{1}{2} k \left( \theta - \theta_0 \right)^2
where :math:`\theta` is the angle between the triplet of particles.
Attributes:
params (TypeParameter[``angle type``, dict]):
The parameter of the harmonic bonds for each particle type.
The dictionary has the following keys:
* ``k`` (`float`, **required**) - potential constant
(in units of energy/radians^2)
* ``t0`` (`float`, **required**) - rest angle
(in units radians)
Examples::
harmonic = angle.Harmonic()
harmonic.params['polymer'] = dict(k=3.0, t0=0.7851)
harmonic.params['backbone'] = dict(k=100.0, t0=1.0)
"""
_cpp_class_name = 'HarmonicAngleForceCompute'
def __init__(self):
params = TypeParameter('params', 'angle_types',
TypeParameterDict(t0=float, k=float, len_keys=1))
self._add_typeparam(params)
class Cosinesq(Angle):
R""" Cosine squared angle potential.
The command angle.cosinesq specifies a cosine squared potential energy
between every triplet of particles with an angle specified between them.
.. math::
V(\theta) = \frac{1}{2} k \left( \cos\theta - \cos\theta_0 \right)^2
where :math:`\theta` is the angle between the triplet of particles.
This angle style is also known as g96, since they were used in the
gromos96 force field. These are also the types of angles used with the
coarse-grained MARTINI force field.
Attributes:
params (TypeParameter[``angle type``, dict]):
The parameter of the harmonic bonds for each particle type.
The dictionary has the following keys:
* ``k`` (`float`, **required**) - potential constant
(in units of energy/radians^2)
* ``t0`` (`float`, **required**) - rest angle :math:`\theta_0`
(in units radians)
Parameters :math:`k` and :math:`\theta_0` must be set for each type of
angle in the simulation. Note that the value of :math:`k` for this angle
potential is not comparable to the value of :math:`k` for harmonic angles,
as they have different units.
Examples::
cosinesq = angle.Cosinesq()
cosinesq.params['polymer'] = dict(k=3.0, t0=0.7851)
cosinesq.params['backbone'] = dict(k=100.0, t0=1.0)
"""
_cpp_class_name = 'CosineSqAngleForceCompute'
def __init__(self):
params = TypeParameter('params', 'angle_types',
TypeParameterDict(t0=float, k=float, len_keys=1))
self._add_typeparam(params)
def _table_eval(theta, V, T, width):
dth = (math.pi) / float(width-1);
i = int(round((theta)/dth))
return (V[i], T[i])
class table(force._force):
R""" Tabulated angle potential.
Args:
width (int): Number of points to use to interpolate V and F (see documentation above)
name (str): Name of the force instance
:py:class:`table` specifies that a tabulated angle potential should be added to every bonded triple of particles
in the simulation.
The torque :math:`T` is (in units of force * distance) and the potential :math:`V(\theta)` is (in energy units):
.. math::
T(\theta) = & T_{\mathrm{user}}(\theta) \\
V(\theta) = & V_{\mathrm{user}}(\theta)
where :math:`\theta` is the angle from A-B to B-C in the triple.
:math:`T_{\mathrm{user}}(\theta)` and :math:`V_{\mathrm{user}}(\theta)` are evaluated on *width* grid points
between :math:`0` and :math:`\pi`. Values are interpolated linearly between grid points.
For correctness, you must specify: :math:`T = -\frac{\partial V}{\partial \theta}`
Parameters:
- :math:`T_{\mathrm{user}}(\theta)` and :math:`V_{\mathrm{user}}(\theta)` - evaluated by ``func`` (see example)
- coefficients passed to ``func`` - ``angle_coeff`` (see example)
The table *width* is set once when :py:class:`table` is specified. There are two ways to specify the other
parameters.
.. rubric:: Set table from a given function
When you have a functional form for T and F, you can enter that
directly into python. :py:class:`table` will evaluate the given function over *width* points between :math:`0` and :math:`\pi`
and use the resulting values in the table::
def harmonic(theta, kappa, theta_0):
V = 0.5 * kappa * (theta-theta_0)**2;
T = -kappa*(theta-theta_0);
return (V, T)
btable = angle.table(width=1000)
btable.angle_coeff.set('angle1', func=harmonic, coeff=dict(kappa=330, theta_0=0))
btable.angle_coeff.set('angle2', func=harmonic,coeff=dict(kappa=30, theta_0=0.1))
.. rubric:: Set a table from a file
When you have no function for for *T* or *F*, or you otherwise have the data listed in a file, :py:class:`table` can use the given
values directly. You must first specify the number of rows in your tables when initializing :py:class:`table`. Then use
:py:meth:`set_from_file()` to read the file::
btable = angle.table(width=1000)
btable.set_from_file('polymer', 'angle.dat')
"""
def __init__(self, width, name=None):
# initialize the base class
force._force.__init__(self, name);
# create the c++ mirror class
if not hoomd.context.current.device.cpp_exec_conf.isCUDAEnabled():
self.cpp_force = _md.TableAngleForceCompute(hoomd.context.current.system_definition, int(width), self.name);
else:
self.cpp_force = _md.TableAngleForceComputeGPU(hoomd.context.current.system_definition, int(width), self.name);
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficient matrix
self.angle_coeff = coeff();
# stash the width for later use
self.width = width;
def update_angle_table(self, atype, func, coeff):
# allocate arrays to store V and F
Vtable = _hoomd.std_vector_scalar();
Ttable = _hoomd.std_vector_scalar();
# calculate dth
dth = math.pi / float(self.width-1);
# evaluate each point of the function
for i in range(0, self.width):
theta = dth * i;
(V,T) = func(theta, **coeff);
# fill out the tables
Vtable.append(V);
Ttable.append(T);
# pass the tables on to the underlying cpp compute
self.cpp_force.setTable(atype, Vtable, Ttable);
def update_coeffs(self):
# check that the angle coefficients are valid
if not self.angle_coeff.verify(["func", "coeff"]):
hoomd.context.current.device.cpp_msg.error("Not all angle coefficients are set for angle.table\n");
raise RuntimeError("Error updating angle coefficients");
# set all the params
ntypes = hoomd.context.current.system_definition.getAngleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getAngleData().getNameByType(i));
# loop through all of the unique type angles and evaluate the table
for i in range(0,ntypes):
func = self.angle_coeff.get(type_list[i], "func");
coeff = self.angle_coeff.get(type_list[i], "coeff");
self.update_angle_table(i, func, coeff);
def set_from_file(self, anglename, filename):
R""" Set a angle pair interaction from a file.
Args:
anglename (str): Name of angle
filename (str): Name of the file to read
The provided file specifies V and F at equally spaced theta values::
#t V T
0.0 2.0 -3.0
1.5707 3.0 -4.0
3.1414 2.0 -3.0
Warning:
The theta values are not used by the code. It is assumed that a table that has N rows will start at 0, end at :math:`\pi`
and that :math:`\delta \theta = \pi/(N-1)`. The table is read
directly into the grid points used to evaluate :math:`T_{\mathrm{user}}(\theta)` and :math:`V_{\mathrm{user}}(\theta)`.
"""
# open the file
f = open(filename);
theta_table = [];
V_table = [];
T_table = [];
# read in lines from the file
for line in f.readlines():
line = line.strip();
# skip comment lines
if line[0] == '#':
continue;
# split out the columns
cols = line.split();
values = [float(f) for f in cols];
# validate the input
if len(values) != 3:
hoomd.context.current.device.cpp_msg.error("angle.table: file must have exactly 3 columns\n");
raise RuntimeError("Error reading table file");
# append to the tables
theta_table.append(values[0]);
V_table.append(values[1]);
T_table.append(values[2]);
# validate input
if self.width != len(theta_table):
hoomd.context.current.device.cpp_msg.error("angle.table: file must have exactly " + str(self.width) + " rows\n");
raise RuntimeError("Error reading table file");
# check for even spacing
dth = math.pi / float(self.width-1);
for i in range(0,self.width):
theta = dth * i;
if math.fabs(theta - theta_table[i]) > 1e-3:
hoomd.context.current.device.cpp_msg.error("angle.table: theta must be monotonically increasing and evenly spaced\n");
raise RuntimeError("Error reading table file");
self.angle_coeff.set(anglename, func=_table_eval, coeff=dict(V=V_table, T=T_table, width=self.width))
## \internal
# \brief Get metadata
def get_metadata(self):
data = force._force.get_metadata(self)
# make sure coefficients are up-to-date
self.update_coeffs()
data['angle_coeff'] = self.angle_coeff
return data
```
#### File: md/pytest/test_active.py
```python
import hoomd
import pytest
import numpy
import itertools
def test_attach(simulation_factory, two_particle_snapshot_factory):
sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=8))
integrator = hoomd.md.Integrator(.05)
integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0))
integrator.forces.append(hoomd.md.force.Active(filter=hoomd.filter.All(), rotation_diff=0.01))
sim.operations.integrator = integrator
sim.operations._schedule()
sim.run(10)
```
#### File: md/pytest/test_methods.py
```python
import hoomd
import pytest
import numpy
import itertools
from copy import deepcopy
def test_brownian_attributes():
"""Test attributes of the Brownian integrator before attaching."""
all_ = hoomd.filter.All()
constant = hoomd.variant.Constant(2.0)
brownian = hoomd.md.methods.Brownian(filter = all_, kT=constant)
assert brownian.filter is all_
assert brownian.kT is constant
assert brownian.alpha is None
type_A = hoomd.filter.Type(['A'])
brownian.filter = type_A
assert brownian.filter is type_A
ramp = hoomd.variant.Ramp(1, 2, 1000000, 2000000)
brownian.kT = ramp
assert brownian.kT is ramp
brownian.alpha = 0.125
assert brownian.alpha == 0.125
def test_brownian_attributes_attached(simulation_factory,
two_particle_snapshot_factory):
"""Test attributes of the Brownian integrator after attaching."""
all_ = hoomd.filter.All()
constant = hoomd.variant.Constant(2.0)
brownian = hoomd.md.methods.Brownian(filter = all_, kT=constant)
sim = simulation_factory(two_particle_snapshot_factory())
sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[brownian])
sim.operations._schedule()
assert brownian.filter is all_
assert brownian.kT is constant
assert brownian.alpha is None
type_A = hoomd.filter.Type(['A'])
with pytest.raises(AttributeError):
# filter cannot be set after scheduling
brownian.filter = type_A
assert brownian.filter is all_
ramp = hoomd.variant.Ramp(1, 2, 1000000, 2000000)
brownian.kT = ramp
assert brownian.kT is ramp
brownian.alpha = 0.125
assert brownian.alpha == 0.125
@pytest.mark.serial
def test_berendsen_attributes(device):
"""Test attributes of the Berendsen integrator before attaching."""
all_ = hoomd.filter.All()
constant = hoomd.variant.Constant(2.0)
berendsen = hoomd.md.methods.Berendsen(filter=all_, kT=constant, tau=10.0)
assert berendsen.filter == all_
assert berendsen.kT == constant
assert berendsen.tau == 10.0
type_A = hoomd.filter.Type(['A'])
berendsen.filter = type_A
assert berendsen.filter == type_A
ramp = hoomd.variant.Ramp(1, 2, 1000000, 2000000)
berendsen.kT = ramp
assert berendsen.kT == ramp
berendsen.tau = 1.2
assert berendsen.tau == 1.2
@pytest.mark.serial
def test_berendsen_attributes_attached(simulation_factory,
two_particle_snapshot_factory):
"""Test attributes of the Berendsen integrator after attaching."""
all_ = hoomd.filter.All()
constant = hoomd.variant.Constant(2.0)
berendsen = hoomd.md.methods.Berendsen(filter=all_, kT=constant, tau=10.0)
sim = simulation_factory(two_particle_snapshot_factory())
sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[berendsen])
sim.operations._schedule()
assert berendsen.filter == all_
assert berendsen.kT == constant
assert berendsen.tau == 10.0
type_A = hoomd.filter.Type(['A'])
with pytest.raises(AttributeError):
# filter cannot be set after scheduling
berendsen.filter = type_A
assert berendsen.filter == all_
ramp = hoomd.variant.Ramp(1, 2, 1000000, 2000000)
berendsen.kT = ramp
assert berendsen.kT == ramp
berendsen.tau = 1.2
assert berendsen.tau == 1.2
def test_langevin_attributes():
"""Test attributes of the Langevin integrator before attaching."""
all_ = hoomd.filter.All()
constant = hoomd.variant.Constant(2.0)
langevin = hoomd.md.methods.Langevin(filter = all_, kT=constant)
assert langevin.filter is all_
assert langevin.kT is constant
assert langevin.alpha is None
assert (not langevin.tally_reservoir_energy)
type_A = hoomd.filter.Type(['A'])
langevin.filter = type_A
assert langevin.filter is type_A
ramp = hoomd.variant.Ramp(1, 2, 1000000, 2000000)
langevin.kT = ramp
assert langevin.kT is ramp
langevin.alpha = 0.125
assert langevin.alpha == 0.125
langevin.tally_reservoir_energy = True
assert langevin.tally_reservoir_energy
def test_langevin_attributes_attached(simulation_factory,
two_particle_snapshot_factory):
"""Test attributes of the Langevin integrator before attaching."""
all_ = hoomd.filter.All()
constant = hoomd.variant.Constant(2.0)
langevin = hoomd.md.methods.Langevin(filter = all_, kT=constant)
sim = simulation_factory(two_particle_snapshot_factory())
sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[langevin])
sim.operations._schedule()
assert langevin.filter is all_
assert langevin.kT is constant
assert langevin.alpha is None
assert (not langevin.tally_reservoir_energy)
type_A = hoomd.filter.Type(['A'])
with pytest.raises(AttributeError):
# filter cannot be set after scheduling
langevin.filter = type_A
assert langevin.filter is all_
ramp = hoomd.variant.Ramp(1, 2, 1000000, 2000000)
langevin.kT = ramp
assert langevin.kT is ramp
langevin.alpha = 0.125
assert langevin.alpha == 0.125
langevin.tally_reservoir_energy = True
assert langevin.tally_reservoir_energy
def test_npt_attributes():
"""Test attributes of the NPT integrator before attaching."""
all_ = hoomd.filter.All()
constant_t = hoomd.variant.Constant(2.0)
constant_s = [hoomd.variant.Constant(1.0),
hoomd.variant.Constant(2.0),
hoomd.variant.Constant(3.0),
hoomd.variant.Constant(0.125),
hoomd.variant.Constant(.25),
hoomd.variant.Constant(.5)]
npt = hoomd.md.methods.NPT(filter = all_, kT=constant_t, tau=2.0,
S = constant_s,
tauS = 2.0,
couple='xyz')
assert npt.filter is all_
assert npt.kT is constant_t
assert npt.tau == 2.0
assert len(npt.S) == 6
for i in range(6):
assert npt.S[i] is constant_s[i]
assert npt.tauS == 2.0
assert npt.box_dof == (True,True,True,False,False,False)
assert npt.couple == 'xyz'
assert not npt.rescale_all
assert npt.gamma == 0.0
type_A = hoomd.filter.Type(['A'])
npt.filter = type_A
assert npt.filter is type_A
ramp = hoomd.variant.Ramp(1, 2, 1000000, 2000000)
npt.kT = ramp
assert npt.kT is ramp
npt.tau = 10.0
assert npt.tau == 10.0
ramp_s = [hoomd.variant.Ramp(1.0, 4.0, 1000, 10000),
hoomd.variant.Ramp(2.0, 4.0, 1000, 10000),
hoomd.variant.Ramp(3.0, 4.0, 1000, 10000),
hoomd.variant.Ramp(0.125, 4.0, 1000, 10000),
hoomd.variant.Ramp(.25, 4.0, 1000, 10000),
hoomd.variant.Ramp(.5, 4.0, 1000, 10000)]
npt.S = ramp_s
assert len(npt.S) == 6
for i in range(6):
assert npt.S[i] is ramp_s[i]
npt.tauS = 10.0
assert npt.tauS == 10.0
npt.box_dof = (True,False,False,False,True,False)
assert npt.box_dof == (True,False,False,False,True,False)
npt.couple = 'none'
assert npt.couple == 'none'
npt.rescale_all = True
assert npt.rescale_all
npt.gamma = 2.0
assert npt.gamma == 2.0
assert npt.translational_thermostat_dof == (0.0, 0.0)
npt.translational_thermostat_dof = (0.125, 0.5)
assert npt.translational_thermostat_dof == (0.125, 0.5)
assert npt.rotational_thermostat_dof == (0.0, 0.0)
npt.rotational_thermostat_dof = (0.5, 0.25)
assert npt.rotational_thermostat_dof == (0.5, 0.25)
assert npt.barostat_dof == (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
npt.barostat_dof = (1.0, 2.0, 4.0, 6.0, 8.0, 10.0)
assert npt.barostat_dof == (1.0, 2.0, 4.0, 6.0, 8.0, 10.0)
def test_nph_attributes():
"""Test attributes of the NPH integrator before attaching."""
all_ = hoomd.filter.All()
constant_s = [
hoomd.variant.Constant(1.0),
hoomd.variant.Constant(2.0),
hoomd.variant.Constant(3.0),
hoomd.variant.Constant(0.125),
hoomd.variant.Constant(.25),
hoomd.variant.Constant(.5)
]
nph = hoomd.md.methods.NPH(filter=all_,
S=constant_s,
tauS=2.0,
couple='xyz')
assert nph.filter == all_
assert len(nph.S) == 6
for i in range(6):
assert nph.S[i] is constant_s[i]
assert nph.tauS == 2.0
assert nph.box_dof == (True, True, True, False, False, False)
assert nph.couple == 'xyz'
assert not nph.rescale_all
assert nph.gamma == 0.0
type_A = hoomd.filter.Type(['A'])
nph.filter = type_A
assert nph.filter == type_A
ramp_s = [
hoomd.variant.Ramp(1.0, 4.0, 1000, 10000),
hoomd.variant.Ramp(2.0, 4.0, 1000, 10000),
hoomd.variant.Ramp(3.0, 4.0, 1000, 10000),
hoomd.variant.Ramp(0.125, 4.0, 1000, 10000),
hoomd.variant.Ramp(.25, 4.0, 1000, 10000),
hoomd.variant.Ramp(.5, 4.0, 1000, 10000)
]
nph.S = ramp_s
assert len(nph.S) == 6
for i in range(6):
assert nph.S[i] is ramp_s[i]
nph.tauS = 10.0
assert nph.tauS == 10.0
nph.box_dof = (True, False, False, False, True, False)
assert nph.box_dof == (True, False, False, False, True, False)
nph.couple = 'none'
assert nph.couple == 'none'
nph.rescale_all = True
assert nph.rescale_all
nph.gamma = 2.0
assert nph.gamma == 2.0
assert nph.barostat_dof == (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
nph.barostat_dof = (1.0, 2.0, 4.0, 6.0, 8.0, 10.0)
assert nph.barostat_dof == (1.0, 2.0, 4.0, 6.0, 8.0, 10.0)
def test_npt_attributes_attached_3d(simulation_factory,
two_particle_snapshot_factory):
"""Test attributes of the NPT integrator before attaching."""
all_ = hoomd.filter.All()
constant_t = hoomd.variant.Constant(2.0)
constant_s = [hoomd.variant.Constant(1.0),
hoomd.variant.Constant(2.0),
hoomd.variant.Constant(3.0),
hoomd.variant.Constant(0.125),
hoomd.variant.Constant(.25),
hoomd.variant.Constant(.5)]
npt = hoomd.md.methods.NPT(filter = all_, kT=constant_t, tau=2.0,
S = constant_s,
tauS = 2.0,
couple='xyz')
sim = simulation_factory(two_particle_snapshot_factory())
sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[npt])
sim.operations._schedule()
assert npt.filter is all_
assert npt.kT is constant_t
assert npt.tau == 2.0
assert len(npt.S) == 6
for i in range(6):
assert npt.S[i] is constant_s[i]
assert npt.tauS == 2.0
assert npt.couple == 'xyz'
type_A = hoomd.filter.Type(['A'])
with pytest.raises(AttributeError):
# filter cannot be set after scheduling
npt.filter = type_A
assert npt.filter is all_
ramp = hoomd.variant.Ramp(1, 2, 1000000, 2000000)
npt.kT = ramp
assert npt.kT is ramp
npt.tau = 10.0
assert npt.tau == 10.0
ramp_s = [hoomd.variant.Ramp(1.0, 4.0, 1000, 10000),
hoomd.variant.Ramp(2.0, 4.0, 1000, 10000),
hoomd.variant.Ramp(3.0, 4.0, 1000, 10000),
hoomd.variant.Ramp(0.125, 4.0, 1000, 10000),
hoomd.variant.Ramp(.25, 4.0, 1000, 10000),
hoomd.variant.Ramp(.5, 4.0, 1000, 10000)]
npt.S = ramp_s
assert len(npt.S) == 6
for i in range(6):
assert npt.S[i] is ramp_s[i]
npt.tauS = 10.0
assert npt.tauS == 10.0
npt.box_dof = (True,False,False,False,True,False)
assert tuple(npt.box_dof) == (True,False,False,False,True,False)
npt.couple = 'none'
assert npt.couple == 'none'
npt.rescale_all = True
assert npt.rescale_all
npt.gamma = 2.0
assert npt.gamma == 2.0
assert npt.translational_thermostat_dof == (0.0, 0.0)
npt.translational_thermostat_dof = (0.125, 0.5)
assert npt.translational_thermostat_dof == (0.125, 0.5)
assert npt.rotational_thermostat_dof == (0.0, 0.0)
npt.rotational_thermostat_dof = (0.5, 0.25)
assert npt.rotational_thermostat_dof == (0.5, 0.25)
assert npt.barostat_dof == (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
npt.barostat_dof = (1.0, 2.0, 4.0, 6.0, 8.0, 10.0)
assert npt.barostat_dof == (1.0, 2.0, 4.0, 6.0, 8.0, 10.0)
def test_nph_attributes_attached_3d(simulation_factory,
two_particle_snapshot_factory):
"""Test attributes of the NPH integrator after attaching in 3D."""
all_ = hoomd.filter.All()
constant_s = [
hoomd.variant.Constant(1.0),
hoomd.variant.Constant(2.0),
hoomd.variant.Constant(3.0),
hoomd.variant.Constant(0.125),
hoomd.variant.Constant(.25),
hoomd.variant.Constant(.5)
]
nph = hoomd.md.methods.NPH(filter=all_,
S=constant_s,
tauS=2.0,
couple='xyz')
sim = simulation_factory(two_particle_snapshot_factory())
sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[nph])
sim.run(0)
assert nph.filter == all_
assert len(nph.S) == 6
for i in range(6):
assert nph.S[i] is constant_s[i]
assert nph.tauS == 2.0
assert nph.couple == 'xyz'
type_A = hoomd.filter.Type(['A'])
with pytest.raises(AttributeError):
# filter cannot be set after scheduling
nph.filter = type_A
assert nph.filter == all_
nph.tauS = 10.0
assert nph.tauS == 10.0
nph.box_dof = (True, False, False, False, True, False)
assert tuple(nph.box_dof) == (True, False, False, False, True, False)
nph.couple = 'none'
assert nph.couple == 'none'
nph.rescale_all = True
assert nph.rescale_all
nph.gamma = 2.0
assert nph.gamma == 2.0
assert nph.barostat_dof == (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
nph.barostat_dof = (1.0, 2.0, 4.0, 6.0, 8.0, 10.0)
assert nph.barostat_dof == (1.0, 2.0, 4.0, 6.0, 8.0, 10.0)
ramp_s = [
hoomd.variant.Ramp(1.0, 4.0, 1000, 10000),
hoomd.variant.Ramp(2.0, 4.0, 1000, 10000),
hoomd.variant.Ramp(3.0, 4.0, 1000, 10000),
hoomd.variant.Ramp(0.125, 4.0, 1000, 10000),
hoomd.variant.Ramp(.25, 4.0, 1000, 10000),
hoomd.variant.Ramp(.5, 4.0, 1000, 10000)
]
nph.S = ramp_s
assert len(nph.S) == 6
for _ in range(5):
sim.run(1)
for i in range(6):
assert nph.S[i] is ramp_s[i]
def test_npt_thermalize_thermostat_and_barostat_dof(
simulation_factory, two_particle_snapshot_factory):
"""Tests that NPT.thermalize_thermostat_and_barostat_dof can be called."""
all_ = hoomd.filter.All()
constant_t = hoomd.variant.Constant(2.0)
constant_s = [1, 2, 3, 0.125, 0.25, 0.5]
npt = hoomd.md.methods.NPT(filter=all_,
kT=constant_t,
tau=2.0,
S=constant_s,
tauS=2.0,
box_dof=[True, True, True, True, True, True],
couple='xyz')
sim = simulation_factory(two_particle_snapshot_factory())
sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[npt])
sim.operations._schedule()
npt.thermalize_thermostat_and_barostat_dof()
xi, eta = npt.translational_thermostat_dof
assert xi != 0.0
assert eta == 0.0
xi_rot, eta_rot = npt.rotational_thermostat_dof
assert xi_rot == 0.0
assert eta_rot == 0.0
for v in npt.barostat_dof:
assert v != 0.0
def test_npt_thermalize_thermostat_and_barostat_aniso_dof(
simulation_factory, two_particle_snapshot_factory):
"""Tests that NPT.thermalize_thermostat_and_barostat_dof can be called."""
all_ = hoomd.filter.All()
constant_t = hoomd.variant.Constant(2.0)
constant_s = [1, 2, 3, 0.125, 0.25, 0.5]
npt = hoomd.md.methods.NPT(filter=all_,
kT=constant_t,
tau=2.0,
S=constant_s,
tauS=2.0,
box_dof=[True, True, True, True, True, True],
couple='xyz')
snap = two_particle_snapshot_factory()
if snap.exists:
snap.particles.moment_inertia[:] = [[1, 1, 1], [2, 0, 0]]
sim = simulation_factory(snap)
sim.operations.integrator = hoomd.md.Integrator(0.005,
methods=[npt],
aniso=True)
sim.run(0)
npt.thermalize_thermostat_and_barostat_dof()
xi, eta = npt.translational_thermostat_dof
assert xi != 0.0
assert eta == 0.0
xi_rot, eta_rot = npt.rotational_thermostat_dof
assert xi_rot != 0.0
assert eta_rot == 0.0
for v in npt.barostat_dof:
assert v != 0.0
def test_nph_thermalize_barostat_dof(simulation_factory,
two_particle_snapshot_factory):
"""Tests that NPT.thermalize_thermostat_and_barostat_dof can be called."""
all_ = hoomd.filter.All()
constant_s = [1, 2, 3, 0.125, 0.25, 0.5]
nph = hoomd.md.methods.NPH(filter=all_,
S=constant_s,
tauS=2.0,
box_dof=[True, True, True, True, True, True],
couple='xyz')
sim = simulation_factory(two_particle_snapshot_factory())
sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[nph])
sim.run(0)
nph.thermalize_barostat_dof()
for v in nph.barostat_dof:
assert v != 0.0
def test_npt_attributes_attached_2d(simulation_factory,
two_particle_snapshot_factory):
"""Test attributes of the NPT integrator specific to 2D simulations."""
all_ = hoomd.filter.All()
npt = hoomd.md.methods.NPT(filter = all_, kT=1.0, tau=2.0,
S = 2.0,
tauS = 2.0,
couple='xy')
assert npt.box_dof == (True,True,True,False,False,False)
assert npt.couple == 'xy'
sim = simulation_factory(two_particle_snapshot_factory(dimensions=2))
sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[npt])
sim.operations._schedule()
# after attaching in 2d, only some coupling modes and box dof are valid
assert tuple(npt.box_dof) == (True,True,False,False,False,False)
assert npt.couple == 'xy'
with pytest.raises(ValueError):
npt.couple = 'xyz'
with pytest.raises(ValueError):
npt.couple = 'xz'
with pytest.raises(ValueError):
npt.couple = 'yz'
npt.couple = 'none'
assert npt.couple == 'none'
npt.box_dof = (True, True, True, True, True, True)
assert tuple(npt.box_dof) == (True, True, False, True, False, False)
def test_nph_attributes_attached_2d(simulation_factory,
two_particle_snapshot_factory):
"""Test attributes of the NPH integrator specific to 2D simulations."""
all_ = hoomd.filter.All()
nph = hoomd.md.methods.NPH(filter=all_, S=2.0, tauS=2.0, couple='xy')
assert nph.box_dof == (True, True, True, False, False, False)
assert nph.couple == 'xy'
sim = simulation_factory(two_particle_snapshot_factory(dimensions=2))
sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[nph])
sim.run(0)
# after attaching in 2d, only some coupling modes and box dof are valid
assert tuple(nph.box_dof) == (True, True, False, False, False, False)
assert nph.couple == 'xy'
with pytest.raises(ValueError):
nph.couple = 'xyz'
with pytest.raises(ValueError):
nph.couple = 'xz'
with pytest.raises(ValueError):
nph.couple = 'yz'
nph.couple = 'none'
assert nph.couple == 'none'
nph.box_dof = (True, True, True, True, True, True)
assert tuple(nph.box_dof) == (True, True, False, True, False, False)
def test_nve_attributes():
"""Test attributes of the NVE integrator before attaching."""
all_ = hoomd.filter.All()
constant = hoomd.variant.Constant(2.0)
nve = hoomd.md.methods.NVE(filter = all_)
assert nve.filter is all_
type_A = hoomd.filter.Type(['A'])
nve.filter = type_A
assert nve.filter is type_A
def test_nve_attributes_attached(simulation_factory,
two_particle_snapshot_factory):
"""Test attributes of the NVE integrator before attaching."""
all_ = hoomd.filter.All()
nve = hoomd.md.methods.NVE(filter = all_)
sim = simulation_factory(two_particle_snapshot_factory())
sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[nve])
sim.operations._schedule()
assert nve.filter is all_
type_A = hoomd.filter.Type(['A'])
with pytest.raises(AttributeError):
# filter cannot be set after scheduling
nve.filter = type_A
assert nve.filter is all_
def test_nvt_attributes():
"""Test attributes of the NVT integrator before attaching."""
all_ = hoomd.filter.All()
constant = hoomd.variant.Constant(2.0)
nvt = hoomd.md.methods.NVT(filter = all_, kT=constant, tau=2.0)
assert nvt.filter is all_
assert nvt.kT is constant
assert nvt.tau == 2.0
type_A = hoomd.filter.Type(['A'])
nvt.filter = type_A
assert nvt.filter is type_A
ramp = hoomd.variant.Ramp(1, 2, 1000000, 2000000)
nvt.kT = ramp
assert nvt.kT is ramp
nvt.tau = 10.0
assert nvt.tau == 10.0
assert nvt.translational_thermostat_dof == (0.0, 0.0)
nvt.translational_thermostat_dof = (0.125, 0.5)
assert nvt.translational_thermostat_dof == (0.125, 0.5)
assert nvt.rotational_thermostat_dof == (0.0, 0.0)
nvt.rotational_thermostat_dof = (0.5, 0.25)
assert nvt.rotational_thermostat_dof == (0.5, 0.25)
def test_nvt_attributes_attached(simulation_factory,
two_particle_snapshot_factory):
"""Test attributes of the NVT integrator before attaching."""
all_ = hoomd.filter.All()
constant = hoomd.variant.Constant(2.0)
nvt = hoomd.md.methods.NVT(filter = all_, kT=constant, tau=2.0)
sim = simulation_factory(two_particle_snapshot_factory())
sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[nvt])
sim.operations._schedule()
assert nvt.filter is all_
assert nvt.kT is constant
assert nvt.tau == 2.0
type_A = hoomd.filter.Type(['A'])
with pytest.raises(AttributeError):
# filter cannot be set after scheduling
nvt.filter = type_A
assert nvt.filter is all_
ramp = hoomd.variant.Ramp(1, 2, 1000000, 2000000)
nvt.kT = ramp
assert nvt.kT is ramp
nvt.tau = 10.0
assert nvt.tau == 10.0
assert nvt.translational_thermostat_dof == (0.0, 0.0)
nvt.translational_thermostat_dof = (0.125, 0.5)
assert nvt.translational_thermostat_dof == (0.125, 0.5)
assert nvt.rotational_thermostat_dof == (0.0, 0.0)
nvt.rotational_thermostat_dof = (0.5, 0.25)
assert nvt.rotational_thermostat_dof == (0.5, 0.25)
def test_nvt_thermalize_thermostat_dof(simulation_factory,
two_particle_snapshot_factory):
"""Tests that NVT.thermalize_thermostat_dof can be called."""
all_ = hoomd.filter.All()
constant = hoomd.variant.Constant(2.0)
nvt = hoomd.md.methods.NVT(filter=all_, kT=constant, tau=2.0)
sim = simulation_factory(two_particle_snapshot_factory())
sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[nvt])
sim.operations._schedule()
nvt.thermalize_thermostat_dof()
xi, eta = nvt.translational_thermostat_dof
assert xi != 0.0
assert eta == 0.0
xi_rot, eta_rot = nvt.rotational_thermostat_dof
assert xi_rot == 0.0
assert eta_rot == 0.0
def test_nvt_thermalize_thermostat_aniso_dof(simulation_factory,
two_particle_snapshot_factory):
"""Tests that NVT.thermalize_thermostat_dof can be called."""
all_ = hoomd.filter.All()
constant = hoomd.variant.Constant(2.0)
nvt = hoomd.md.methods.NVT(filter=all_, kT=constant, tau=2.0)
snap = two_particle_snapshot_factory()
if snap.exists:
snap.particles.moment_inertia[:] = [[1, 1, 1], [2, 0, 0]]
sim = simulation_factory(snap)
sim.operations.integrator = hoomd.md.Integrator(0.005,
methods=[nvt],
aniso=True)
sim.run(0)
nvt.thermalize_thermostat_dof()
xi, eta = nvt.translational_thermostat_dof
assert xi != 0.0
assert eta == 0.0
xi_rot, eta_rot = nvt.rotational_thermostat_dof
assert xi_rot != 0.0
assert eta_rot == 0.0
```
#### File: hoomd-blue/hoomd/operation.py
```python
from hoomd.util import is_iterable, dict_map, dict_filter, str_to_tuple_keys
from hoomd.trigger import Trigger
from hoomd.variant import Variant, Constant
from hoomd.filter import ParticleFilter
from hoomd.logging import Loggable, log
from hoomd.data.typeconverter import RequiredArg
from hoomd.util import NamespaceDict
from hoomd._hoomd import GSDStateReader
from hoomd.data.parameterdicts import ParameterDict
from collections.abc import Mapping
from copy import deepcopy
def _convert_values_to_log_form(value):
"""Function for making state loggable quantity conform to spec.
Since the state dictionary is composed of properties for a given class
instance that does not have flags associated with it, we need to add the
flags when querying for the state. This does makes state logger type flag
generation dynamic meaning that we must be careful that we won't wrongly
detect different flags for the same attribute. In general this shouldn't
be a concern, though.
"""
if value is RequiredArg:
return RequiredArg
elif isinstance(value, Variant):
if isinstance(value, Constant):
return (value.value, 'scalar')
else:
return (value, 'object')
elif isinstance(value, Trigger) or isinstance(value, ParticleFilter):
return (value, 'object')
elif isinstance(value, Operation):
return (value, 'object')
elif isinstance(value, str):
return (value, 'string')
elif (is_iterable(value)
and len(value) != 0
and all([isinstance(v, str) for v in value])):
return (value, 'strings')
elif not is_iterable(value):
return (value, 'scalar')
else:
return (value, 'sequence')
def _handle_gsd_arrays(arr):
if arr.size == 1:
return arr[0]
if arr.ndim == 1:
if arr.size < 3:
return tuple(arr.flatten())
else:
return arr
class _HOOMDGetSetAttrBase:
"""Provides the use of `ParameterDicts` and `TypeParameterDicts` as attrs.
Provides many hooks for varying behavior.
Attributes:
_reserved_default_attrs (dict[str, Callable([], T)]): Attributes that
have defaults and should be set using `object.__setattr__`. Has
`_param_dict` and `_typeparam_dict` keys by default.
_override_setattr (set[str]): Attributes that should not use the
provided `__setattr__`. `super().__setattr__` is called for them.
Likely, this wil no longer be necessary when triggers are added to
C++ Updaters and Analyzers.
_param_dict (ParameterDict): The `ParameterDict` for the class/instance.
_typeparam_dict (dict[str, TypeParameter]): A dict of all the
`TypeParameter`s for the class/instance.
"""
_reserved_default_attrs = dict(_param_dict=ParameterDict,
_typeparam_dict=dict)
_override_setattr = set()
def __getattr__(self, attr):
if attr in self._reserved_default_attrs.keys():
value = self._reserved_default_attrs[attr]()
object.__setattr__(self, attr, value)
return value
elif attr in self._param_dict.keys():
return self._getattr_param(attr)
elif attr in self._typeparam_dict.keys():
return self._getattr_typeparam(attr)
else:
raise AttributeError("Object {} has no attribute {}".format(
type(self), attr))
def _getattr_param(self, attr):
"""Hook for getting an attribute from `_param_dict`."""
return self._param_dict[attr]
def _getattr_typeparam(self, attr):
"""Hook for getting an attribute from `_typeparam_dict`."""
return self._typeparam_dict[attr]
def __setattr__(self, attr, value):
if attr in self._override_setattr:
super().__setattr__(attr, value)
elif attr in self._param_dict.keys():
self._setattr_param(attr, value)
elif attr in self._typeparam_dict.keys():
self._setattr_typeparam(attr, value)
else:
self._setattr_hook(attr, value)
def _setattr_hook(self, attr, value):
"""Used when attr is not found in `_param_dict` or `_typeparam_dict`."""
super().__setattr__(attr, value)
def _setattr_param(self, attr, value):
"""Hook for setting an attribute in `_param_dict`."""
old_value = self._param_dict[attr]
self._param_dict[attr] = value
new_value = self._param_dict[attr]
if self._attached:
try:
setattr(self._cpp_obj, attr, new_value)
except (AttributeError):
self._param_dict[attr] = old_value
raise AttributeError("{} cannot be set after cpp"
" initialization".format(attr))
def _setattr_typeparam(self, attr, value):
"""Hook for setting an attribute in `_typeparam_dict`."""
try:
for k, v in value.items():
self._typeparam_dict[attr][k] = v
except TypeError:
raise ValueError("To set {}, you must use a dictionary "
"with types as keys.".format(attr))
class _StatefulAttrBase(_HOOMDGetSetAttrBase, metaclass=Loggable):
"""Extends parent class to provide a mechanism for exporting object state.
Provides a means for getting object state, the ``state`` property and
``_get_state`` method (the method exists as a hook for later subclasses to
export their state). Also, provides a means for creating new objects from
another object's state, ``from_state``. The ``from_state`` method has a few
caveats. One of the largest is that any variable found in the
``self._param_dict`` and is placed in ``__params__`` is expected to be
accepted in the constructor. Another is that any parameters that are needed
at construction but are not in the objects ``_param_dict`` or
must be passed as a keyword argument to the method.
Currently ``from_state`` supports getting the state from a GSD file and a
Python ``dict``.
"""
def _typeparam_states(self):
"""Converts all typeparameters into a standard Python ``ict`` object."""
state = {name: tp.state for name, tp in self._typeparam_dict.items()}
return deepcopy(state)
def _get_state(self):
"""Hook to allow subclasses to overwrite state property."""
state = self._typeparam_states()
state['__params__'] = dict(self._param_dict)
return dict_filter(dict_map(state, _convert_values_to_log_form),
lambda x: x is not RequiredArg)
@log(category='state')
def state(self):
"""The state of the object.
The state counts as anything stored in the ``_param_dict`` and
``_typeparam_dict``.
"""
return self._get_state()
@classmethod
def from_state(cls, state, final_namespace=None, **kwargs):
"""Creates a new object from another object's state.
Args:
state (str or dict): A state dictionary for an object of this
type, a gsd file name, or a dictionary outputted by
`hoomd.logging.Logger`.
final_namespace (str): The name for the key of the parent dictionary
from where the state is stored. This is to allow for users to
specify the property state information in the case where
multiple of the same object have their state information stored
in the same location. As an example if two LJ pair potentials
are stored, the final namespaces would be ``LJ`` and ``LJ_1``.
frame (int): Only accepted when a gsd file name is passed for
``state``. The frame to access the state information. Is keyword
only.
"""
# resolve the namespace
namespace = list(cls._export_dict.values())[0].namespace
if final_namespace is not None:
namespace = namespace[:-1] + (final_namespace,)
namespace = namespace + ('state',)
# recover state dictionary
state_dict, unused_args = cls._get_state_dict(state,
namespace,
**kwargs)
return cls._from_state_with_state_dict(state_dict, **unused_args)
@classmethod
def _get_state_dict(cls, data, namespace, **kwargs):
"""Get the state dictionary from the accepted outputs of from_state.
Deals with GSD files, namespace dicts (the output of hoomd loggers), and
state dictionaries.
"""
# Filenames
if isinstance(data, str):
if data.endswith('gsd'):
state, kwargs = cls._state_from_gsd(data, namespace, **kwargs)
# Dictionaries and like objects
elif isinstance(data, NamespaceDict):
state = deepcopy(data[namespace])
elif isinstance(data, Mapping):
try:
# try to grab the namespace
state = deepcopy(NamespaceDict(data)[namespace])
except KeyError:
# if namespace can't be found assume that dictionary is the
# state dictionary (This assumes that values are of the form
# (value, flag)
try:
state = dict_map(data, lambda x: x[0])
except TypeError:
# if the map fails, we then assume that the dictionary is
# one without the flag information on the data. This could
# be the case if a logger backend stores the data and that
# returned data is fed in.
state = deepcopy(data)
# Data is of an unusable type
else:
raise ValueError("Object {} cannot be used to get state."
"".format(data))
return (state, kwargs)
@classmethod
def _state_from_gsd(cls, filename, namespace, **kwargs):
"""Get state dictionary from GSD file."""
if 'frame' not in kwargs.keys():
frame = -1
else:
frame = kwargs.pop('frame')
# Grab state keys from gsd
reader = GSDStateReader(filename, frame)
namespace_str = 'log/' + '/'.join(namespace)
state_chunks = reader.getAvailableChunks(namespace_str)
state_dict = NamespaceDict()
chunk_slice = slice(len(namespace_str) + 1, None)
# Build up state dict
for state_chunk in state_chunks:
state_dict_key = tuple(state_chunk[chunk_slice].split('/'))
state_dict[state_dict_key] = \
_handle_gsd_arrays(reader.readChunk(state_chunk))
return (state_dict._dict, kwargs)
@classmethod
def _from_state_with_state_dict(cls, state, **kwargs):
"""Using the state dictionary create a new object."""
# Initialize object using params from state and passed arguments
params = state.get('__params__', {})
params.update(kwargs)
obj = cls(**params)
state.pop('__params__', None)
# Add typeparameter information
for name, tp_dict in state.items():
if '__default__' in tp_dict.keys():
obj._typeparam_dict[name].default = tp_dict['__default__']
del tp_dict['__default__']
# Parse the stringified tuple back into tuple
if obj._typeparam_dict[name]._len_keys > 1:
tp_dict = str_to_tuple_keys(tp_dict)
setattr(obj, name, tp_dict)
return obj
class _DependencyRelation:
"""Defines a dependency relationship between Python objects.
For the class to work all dependencies must occur between objects of
subclasses of this class. This is not an abstract base class since many
object that use this class may not deal directly with dependencies.
Note:
We could be more specific in the inheritance of this class to only use
it when the class needs to deal with a dependency.
"""
def __init__(self):
self._dependents = []
self._dependencies = []
def _add_dependent(self, obj):
"""Adds a dependent to the object's dependent list."""
if obj not in self._dependencies:
self._dependents.append(obj)
obj._dependencies.append(self)
def _notify_disconnect(self, *args, **kwargs):
"""Notify that an object is being removed from all relationships.
Notifies dependent object that it is being removed, and removes itself
from its dependencies' list of dependents. Uses ``args`` and
``kwargs`` to allow flexibility in what information is given to
dependents from dependencies.
Note:
This implementation does require that all dependents take in the
same information, or at least that the passed ``args`` and
``kwargs`` can be used for all dependents'
``_handle_removed_dependency`` method.
"""
for dependent in self._dependents:
dependent.handle_detached_dependency(self, *args, **kwargs)
self._dependents = []
for dependency in self._dependencies:
dependency._remove_dependent(self)
self._dependencies = []
def _handle_removed_dependency(self, obj, *args, **kwargs):
"""Handles having a dependency removed.
Must be implemented by objects that have dependencies. Uses ``args`` and
``kwargs`` to allow flexibility in what information is given to
dependents from dependencies.
"""
pass
def _remove_dependent(self, obj):
"""Removes a dependent from the list of dependencies."""
self._dependencies.remove(obj)
class _HOOMDBaseObject(_StatefulAttrBase, _DependencyRelation):
"""Handles attaching/detaching to a simulation.
``_StatefulAttrBase`` handles getting and setting attributes as well as
providing an API for getting object state and creating new objects from that
state information. We overwrite ``_getattr_param`` and ``_setattr_param``
hooks to handle internal C++ objects. For a similar reason, we overwrite the
``state`` property.
``_DependencyRelation`` handles dealing with dependency relationships
between objects.
The class's metaclass `hoomd.logging.Loggable` handles the logging
infrastructure for HOOMD-blue objects.
This class's main features are handling attaching and detaching from
simulations and adding and removing from containing object such as methods
for MD integrators and updaters for the operations list. Attaching is the
idea of creating a C++ object that is tied to a given simulation while
detaching is removing an object from its simulation.
"""
_reserved_default_attrs = {**_HOOMDGetSetAttrBase._reserved_default_attrs,
'_cpp_obj': lambda: None,
'_dependents': lambda: [],
'_dependencies': lambda: []}
_skip_for_equality = set(['_cpp_obj', '_dependent_list'])
def _getattr_param(self, attr):
if self._attached:
return getattr(self._cpp_obj, attr)
else:
return self._param_dict[attr]
def _setattr_param(self, attr, value):
self._param_dict[attr] = value
if self._attached:
new_value = self._param_dict[attr]
try:
setattr(self._cpp_obj, attr, new_value)
except (AttributeError):
raise AttributeError("{} cannot be set after cpp"
" initialization".format(attr))
def __eq__(self, other):
other_keys = set(other.__dict__.keys())
for key in self.__dict__.keys():
if key in self._skip_for_equality:
continue
else:
if key not in other_keys \
or self.__dict__[key] != other.__dict__[key]:
return False
return True
def _detach(self):
if self._attached:
self._unapply_typeparam_dict()
self._update_param_dict()
self._cpp_obj.notifyDetach()
self._cpp_obj = None
self._notify_disconnect(self._simulation)
return self
def _attach(self):
self._apply_param_dict()
self._apply_typeparam_dict(self._cpp_obj, self._simulation)
# pass the system communicator to the object
if self._simulation._system_communicator is not None:
self._cpp_obj.setCommunicator(self._simulation._system_communicator)
@property
def _attached(self):
return self._cpp_obj is not None
def _add(self, simulation):
self._simulation = simulation
def _remove(self):
del self._simulation
@property
def _added(self):
return hasattr(self, '_simulation')
def _apply_param_dict(self):
for attr, value in self._param_dict.items():
try:
setattr(self, attr, value)
except AttributeError:
pass
def _apply_typeparam_dict(self, cpp_obj, simulation):
for typeparam in self._typeparam_dict.values():
try:
typeparam._attach(cpp_obj, simulation)
except ValueError as verr:
raise ValueError("In TypeParameter {}:"
" ".format(typeparam.name) + verr.args[0])
def _update_param_dict(self):
for key in self._param_dict.keys():
self._param_dict[key] = getattr(self, key)
@log(category='state')
def state(self):
"""The state of the object.
Provides a mapping of attributes to their values for use in storing
objects state for later object reinitialization. An object's state can
be used to create an identical object using the `from_state` method
(some object require other parameters to be passed in `from_state`
besides the state mapping).
"""
self._update_param_dict()
return super()._get_state()
def _unapply_typeparam_dict(self):
for typeparam in self._typeparam_dict.values():
typeparam._detach()
def _add_typeparam(self, typeparam):
self._typeparam_dict[typeparam.name] = typeparam
def _extend_typeparam(self, typeparams):
for typeparam in typeparams:
self._add_typeparam(typeparam)
@property
def _children(self):
"""A set of child objects.
These objects do not appear directly in any of the operations lists but
are owned in lists or members of those operations.
"""
return []
class Operation(_HOOMDBaseObject):
"""Represents operations that are added to an `hoomd.Operations` object.
Operations in the HOOMD-blue data scheme are objects that *operate* on a
`hoomd.Simulation` object. They broadly consist of 5 subclasses: `Updater`,
`Writer`, `Compute`, `Tuner`, and `hoomd.integrate.BaseIntegrator`. All
HOOMD-blue operations inherit from one of these five base classes. To find
the purpose of each class see its documentation.
"""
pass
class _TriggeredOperation(Operation):
_cpp_list_name = None
_override_setattr = {'trigger'}
def __init__(self, trigger):
trigger_dict = ParameterDict(trigger=Trigger)
trigger_dict['trigger'] = trigger
self._param_dict.update(trigger_dict)
@property
def trigger(self):
return self._param_dict['trigger']
@trigger.setter
def trigger(self, new_trigger):
# Overwrite python trigger
old_trigger = self.trigger
self._param_dict['trigger'] = new_trigger
new_trigger = self.trigger
if self._attached:
sys = self._simulation._cpp_sys
triggered_ops = getattr(sys, self._cpp_list_name)
for index in range(len(triggered_ops)):
op, trigger = triggered_ops[index]
# If tuple is the operation and trigger according to memory
# location (python's is), replace with new trigger
if op is self._cpp_obj and trigger is old_trigger:
triggered_ops[index] = (op, new_trigger)
def _attach(self):
super()._attach()
def _update_param_dict(self):
if self._attached:
for key in self._param_dict:
if key == 'trigger':
continue
self._param_dict[key] = getattr(self._cpp_obj, key)
class Updater(_TriggeredOperation):
"""Base class for all HOOMD updaters.
An updater is an operation which modifies a simulation's state.
Note:
This class should not be instantiated by users. The class can be used
for `isinstance` or `issubclass` checks.
"""
_cpp_list_name = 'updaters'
class Writer(_TriggeredOperation):
"""Base class for all HOOMD analyzers.
An analyzer is an operation which writes out a simulation's state.
Note:
This class should not be instantiated by users. The class can be used
for `isinstance` or `issubclass` checks.
"""
_cpp_list_name = 'analyzers'
class Compute(Operation):
"""Base class for all HOOMD computes.
A compute is an operation which computes some property for another operation
or use by a user.
Note:
This class should not be instantiated by users. The class can be used
for `isinstance` or `issubclass` checks.
"""
pass
class Tuner(Operation):
"""Base class for all HOOMD tuners.
A tuner is an operation which tunes the parameters of another operation for
performance or other reasons. A tuner does not modify the current microstate
of the simulation. That is a tuner does not change quantities like
temperature, particle position, or the number of bonds in a simulation.
Note:
This class should not be instantiated by users. The class can be used
for `isinstance` or `issubclass` checks.
"""
pass
```
#### File: hoomd/pytest/test_triggeredops.py
```python
from hoomd.pytest.dummy import DummyCppObj, DummySimulation, DummyTrigger
from hoomd.pytest.dummy import DummyTriggeredOp
from hoomd.data.syncedlist import SyncedList
from hoomd.trigger import Periodic
def test_initialization():
triggered_op = DummyTriggeredOp(trigger=1)
assert type(triggered_op.trigger) == Periodic
assert triggered_op.trigger.period == 1
assert triggered_op.trigger.phase == 0
def test_custom_initialization():
triggered_op = DummyTriggeredOp(trigger=DummyTrigger())
assert type(triggered_op.trigger) == DummyTrigger
assert triggered_op.trigger(4)
def test_trigger_resetting():
triggered_op = DummyTriggeredOp(trigger=3)
triggered_op.trigger = DummyTrigger()
assert type(triggered_op.trigger) == DummyTrigger
assert triggered_op.trigger(4)
def test_attach():
triggered_op = DummyTriggeredOp(trigger=1)
sim = DummySimulation()
slist = SyncedList(lambda x: isinstance(x, DummyTriggeredOp),
lambda x: (x._cpp_obj, x.trigger))
slist.append(triggered_op)
triggered_op._cpp_obj = DummyCppObj()
slist._sync(sim, sim._cpp_sys.dummy_list)
assert len(sim._cpp_sys.dummy_list) == 1
assert len(sim._cpp_sys.dummy_list[0]) == 2
assert triggered_op._cpp_obj == sim._cpp_sys.dummy_list[0][0]
assert triggered_op.trigger == sim._cpp_sys.dummy_list[0][1]
def test_attach_trigger_resetting():
triggered_op = DummyTriggeredOp(trigger=1)
sim = DummySimulation()
slist = SyncedList(lambda x: isinstance(x, DummyTriggeredOp),
lambda x: (x._cpp_obj, x.trigger))
slist.append(triggered_op)
slist._sync(sim, sim._cpp_sys.dummy_list)
triggered_op.trigger = DummyTrigger()
assert len(sim._cpp_sys.dummy_list) == 1
assert len(sim._cpp_sys.dummy_list[0]) == 2
assert triggered_op._cpp_obj == sim._cpp_sys.dummy_list[0][0]
assert triggered_op.trigger == sim._cpp_sys.dummy_list[0][1]
assert type(triggered_op.trigger) == DummyTrigger
```
#### File: hoomd-blue/hoomd/snapshot.py
```python
import numpy as np
import hoomd
from hoomd import _hoomd
class _ConfigurationData:
def __init__(self, cpp_obj):
self._cpp_obj = cpp_obj
@property
def dimensions(self):
return self._cpp_obj._dimensions
@property
def box(self):
b = self._cpp_obj._global_box
L = b.getL()
return (L.x, L.y, L.z,
b.getTiltFactorXY(),
b.getTiltFactorXZ(),
b.getTiltFactorYZ())
@box.setter
def box(self, box):
try:
new_box = hoomd.Box.from_box(box)
except Exception:
raise ValueError(
f"{box} is not convertible to a hoomd.Box object using "
"hoomd.Box.from_box.")
self._cpp_obj._dimensions = new_box.dimensions
self._cpp_obj._global_box = new_box._cpp_obj
class Snapshot:
def __init__(self, communicator=None):
if communicator is None:
self._comm = hoomd.communicator.Communicator()
else:
self._comm = communicator
self._cpp_obj = _hoomd.SnapshotSystemData_double()
@property
def exists(self):
return self._comm.rank == 0
@property
def configuration(self):
return _ConfigurationData(self._cpp_obj)
@property
def particles(self):
if self.exists:
return self._cpp_obj.particles
else:
return None
@property
def bonds(self):
if self.exists:
return self._cpp_obj.bonds
else:
return None
@property
def angles(self):
if self.exists:
return self._cpp_obj.angles
else:
return None
@property
def dihedrals(self):
if self.exists:
return self._cpp_obj.dihedrals
else:
return None
@property
def impropers(self):
if self.exists:
return self._cpp_obj.impropers
else:
return None
@property
def pairs(self):
if self.exists:
return self._cpp_obj.pairs
else:
return None
@property
def constraints(self):
if self.exists:
return self._cpp_obj.constraints
else:
return None
@classmethod
def _from_cpp_snapshot(cls, snapshot, communicator):
sp = cls()
sp._comm = communicator
sp._cpp_obj = snapshot
return sp
def replicate(self, nx, ny, nz):
self._cpp_obj.replicate(nx, ny, nz)
def _broadcast_box(self):
self._cpp_obj._broadcast_box(self._comm.cpp_mpi_conf)
@classmethod
def from_gsd_snapshot(cls, gsd_snap, communicator):
"""
Constructs a `hoomd.Snapshot` from a `gsd.hoomd.Snapshot` object.
Args:
gsd_snap (`gsd.hoomd.Snapshot`):
The gsd snapshot to convert to a `hoomd.Snapshot`.
communicator (hoomd.communicator.Communicator):
The MPI communicator to use for the snapshot. This prevents the
snapshot from being stored on every rank.
"""
gsd_snap.validate()
snap = cls(communicator=communicator)
def set_properties(
snap_section, gsd_snap_section, properties, array_properties):
for prop in properties:
gsd_prop = getattr(gsd_snap_section, prop, None)
if gsd_prop is not None:
setattr(snap_section, prop, gsd_prop)
for prop in array_properties:
gsd_prop = getattr(gsd_snap_section, prop, None)
if gsd_prop is not None:
getattr(snap_section, prop)[:] = gsd_prop
if communicator.rank == 0:
set_properties(
snap.particles,
gsd_snap.particles,
('N', 'types'),
('angmom', 'body', 'charge', 'diameter', 'image', 'mass',
'moment_inertia', 'orientation', 'position', 'typeid',
'velocity')
)
for section in (
'angles', 'bonds', 'dihedrals', 'impropers', 'pairs'
):
set_properties(
getattr(snap, section),
getattr(gsd_snap, section),
('N', 'types'),
('group', 'typeid')
)
set_properties(
snap.constraints,
gsd_snap.constraints,
('N',),
('group', 'value')
)
# Set box attribute
if gsd_snap.configuration.box is not None:
snap.configuration.box = gsd_snap.configuration.box
if gsd_snap.configuration.dimensions == 2:
snap.configuration.box[2] = 0
return snap
```
|
{
"source": "jglaser/ucx-py",
"score": 2
}
|
#### File: ucx-py/tests/test_libs_utils.py
```python
import array
import functools
import io
import mmap
import operator
import pytest
from ucp._libs.utils import get_buffer_data, get_buffer_nbytes
builtin_buffers = [
b"abcd",
array.array("i", [0, 1, 2, 3]),
array.array("I", [0, 1, 2, 3]),
array.array("f", [0, 1, 2, 3]),
array.array("d", [0, 1, 2, 3]),
memoryview(array.array("B", [0, 1, 2, 3, 4, 5])).cast("B", (3, 2)),
memoryview(b"abcd"),
memoryview(bytearray(b"abcd")),
io.BytesIO(b"abcd").getbuffer(),
mmap.mmap(-1, 5),
]
@pytest.mark.parametrize("buffer", builtin_buffers)
def test_get_buffer_data_builtins(buffer):
check_writable = False
ptr = get_buffer_data(buffer, check_writable=check_writable)
assert ptr != 0
check_writable = True
readonly = memoryview(buffer).readonly
if readonly:
with pytest.raises(ValueError):
get_buffer_data(buffer, check_writable=check_writable)
else:
get_buffer_data(buffer, check_writable=check_writable)
@pytest.mark.parametrize("buffer", builtin_buffers)
def test_get_buffer_nbytes_builtins(buffer):
nbytes = memoryview(buffer).nbytes
result = get_buffer_nbytes(buffer, check_min_size=None, cuda_support=True)
assert result == nbytes
with pytest.raises(ValueError):
get_buffer_nbytes(
memoryview(buffer)[::2], check_min_size=None, cuda_support=True
)
# Test exceptional cases with `check_min_size`
get_buffer_nbytes(buffer, check_min_size=nbytes, cuda_support=True)
with pytest.raises(ValueError):
get_buffer_nbytes(buffer, check_min_size=(nbytes + 1), cuda_support=True)
array_params = [
((2, 3), "i4", (12, 4)),
((2, 3), "u4", (12, 4)),
((2, 3), "f4", (12, 4)),
((2, 3), "f8", (24, 8)),
((2, 3), "f8", (8, 16)),
]
def create_array(xp, shape, dtype, strides):
if xp == "cupy":
iface_prop = "__cuda_array_interface__"
elif xp == "numpy":
iface_prop = "__array_interface__"
xp = pytest.importorskip(xp)
nelem = functools.reduce(operator.mul, shape, 1)
data = xp.arange(nelem, dtype=dtype)
arr = xp.ndarray(shape, dtype, data.data, strides=strides)
iface = getattr(arr, iface_prop)
return xp, arr, iface
@pytest.mark.parametrize("xp", ["cupy", "numpy"])
@pytest.mark.parametrize("shape, dtype, strides", array_params)
def test_get_buffer_data_array(xp, shape, dtype, strides):
xp, arr, iface = create_array(xp, shape, dtype, strides)
ptr = get_buffer_data(arr, check_writable=False)
assert ptr == iface["data"][0]
@pytest.mark.parametrize("xp", ["cupy", "numpy"])
@pytest.mark.parametrize("shape, dtype, strides", array_params)
def test_get_buffer_nbytes_array(xp, shape, dtype, strides):
xp, arr, iface = create_array(xp, shape, dtype, strides)
if arr.flags.c_contiguous:
nbytes = get_buffer_nbytes(arr, check_min_size=None, cuda_support=True)
assert nbytes == arr.nbytes
else:
with pytest.raises(ValueError):
get_buffer_nbytes(arr, check_min_size=None, cuda_support=True)
```
|
{
"source": "jglazner/daemon",
"score": 2
}
|
#### File: daemon/samples/sample.py
```python
import sys
import os
import time
import logging
import mimetypes
import traceback
from daemon import GenericDaemon
class SampleDaemon(GenericDaemon):
def __init__(self, pidfile, logger, **kwargs):
super(SampleDaemon, self).__init__(pidfile, logger, **kwargs)
def work(self):
self.log.info("I did some work!")
def run(self, loopForever=True):
try:
if loopForever:
while True:
self.work()
time.sleep(5)
else:
self.work()
except Exception, e:
self.log.error("A runtime exception occured. {0}".format(e))
raise e
if __name__ == "__main__":
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
fh = logging.FileHandler("sample_daemon.log")
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')
fh.setFormatter(formatter)
log.addHandler(fh)
daemon = SampleDaemon('/home/lam/workspace/mail/daemon/sample_daemon.pid', log)
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'run' == sys.argv[1]:
daemon.run(loopForever=False)
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
```
|
{
"source": "jgleissner/aws-parallelcluster-node",
"score": 2
}
|
#### File: common/schedulers/sge_commands.py
```python
import collections
import logging
import re
from xml.etree import ElementTree
from common import sge
from common.remote_command_executor import RemoteCommandExecutor
from common.schedulers.converters import ComparableObject, from_xml_to_obj
from common.sge import check_sge_command_output, run_sge_command
QConfCommand = collections.namedtuple("QConfCommand", ["command_flags", "successful_messages", "description"])
QCONF_COMMANDS = {
"ADD_ADMINISTRATIVE_HOST": QConfCommand(
command_flags="-ah",
successful_messages=[r".* added to administrative host list", r'adminhost ".*" already exists'],
description="add administrative hosts",
),
"ADD_SUBMIT_HOST": QConfCommand(
command_flags="-as",
successful_messages=[r".* added to submit host list", r'submithost ".*" already exists'],
description="add submit hosts",
),
"REMOVE_ADMINISTRATIVE_HOST": QConfCommand(
command_flags="-dh",
successful_messages=[
r".* removed .* from administrative host list",
r'denied: administrative host ".*" does not exist',
],
description="remove administrative hosts",
),
"REMOVE_SUBMIT_HOST": QConfCommand(
command_flags="-ds",
successful_messages=[r".* removed .* from submit host list", r'denied: submit host ".*" does not exist'],
description="remove submission hosts",
),
"REMOVE_EXECUTION_HOST": QConfCommand(
command_flags="-de",
successful_messages=[r".* removed .* from execution host list", r'denied: execution host ".*" does not exist'],
description="remove execution hosts",
),
}
# The state of the queue - one of u(nknown), a(larm), A(larm), C(alendar suspended), s(uspended),
# S(ubordinate), d(isabled), D(isabled), E(rror), c(configuration ambiguous), o(rphaned), P(reempted),
# or some combination thereof.
# Refer to qstat man page for additional details.
# o(rphaned) is not considered as busy since we assume a node in orphaned state is not present in ASG anymore
SGE_BUSY_STATES = ["u", "C", "s", "D", "E", "P"]
# This state is set by nodewatcher when the node is locked and is being terminated.
SGE_DISABLED_STATE = "d"
# If an o(rphaned) state is displayed for a queue instance, it indicates that the queue instance is no longer demanded
# by the current cluster queue configuration or the host group configuration. The queue instance is kept because jobs
# which have not yet finished are still associated with it, and it will vanish from qstat output when these jobs
# have finished.
SGE_ORPHANED_STATE = "o"
# The states q(ueued)/w(aiting) and h(old) only appear for pending jobs. Pending, unheld job`s are displayed as qw.
# The h(old) state indicates that a job currently is not eligible for execution due to a hold state assigned to it
# via qhold(1), qalter(1) or the qsub(1) -h option, or that the job is waiting for completion of the jobs for which job
# dependencies have been assigned to it job via the -hold_jid or -hold_jid_ad options of qsub(1) or qalter(1).
SGE_HOLD_STATE = "h"
# If the state is u, the corresponding sge_execd(8) cannot be contacted.
# An E(rror) state is displayed for a queue for various reasons such as failing to find executables or directories.
# If an o(rphaned) state is displayed for a queue instance, it indicates that the queue instance is no longer demanded
# by the current cluster queue configuration or the host group configuration. The queue instance is kept because jobs
# which have not yet finished are still associated with it, and it will vanish from qstat output when these jobs have
# finished.
SGE_ERROR_STATES = ["u", "E", "o"]
def exec_qconf_command(hosts, qhost_command):
if not hosts:
return []
hostnames = ",".join([host.hostname for host in hosts])
try:
logging.info("Executing operation '%s' for hosts %s", qhost_command.description, hostnames)
command = "qconf {flags} {hostnames}".format(flags=qhost_command.command_flags, hostnames=hostnames)
# setting raise_on_error to False and evaluating command output to decide if the execution was successful
output = check_sge_command_output(command, raise_on_error=False)
succeeded_hosts = []
# assuming output contains a message line for each node the command is executed for.
for host, message in zip(hosts, output.split("\n")):
if any(re.match(pattern, message) is not None for pattern in qhost_command.successful_messages):
succeeded_hosts.append(host)
return succeeded_hosts
except Exception as e:
logging.error(
"Unable to execute operation '%s' for hosts %s. Failed with exception %s",
qhost_command.description,
hostnames,
e,
)
return []
def add_hosts_to_group(hosts):
logging.info("Adding %s to @allhosts group", ",".join([host.hostname for host in hosts]))
command = "qconf -aattr hostgroup hostlist {hostname} @allhosts"
return _run_sge_command_for_multiple_hosts(hosts, command)
def add_host_slots(hosts):
logging.info("Adding %s to all.q queue", ",".join([host.hostname for host in hosts]))
command = 'qconf -aattr queue slots ["{hostname}={slots}"] all.q'
return _run_sge_command_for_multiple_hosts(hosts, command)
def remove_hosts_from_group(hosts):
logging.info("Removing %s from @allhosts group", ",".join([host.hostname for host in hosts]))
command = "qconf -dattr hostgroup hostlist {hostname} @allhosts"
return _run_sge_command_for_multiple_hosts(hosts, command)
def remove_hosts_from_queue(hosts):
logging.info("Removing %s from all.q queue", ",".join([host.hostname for host in hosts]))
command = "qconf -purge queue '*' all.q@{hostname}"
return _run_sge_command_for_multiple_hosts(hosts, command)
def install_sge_on_compute_nodes(hosts, cluster_user):
"""Start sge on compute nodes in parallel."""
command = (
"sudo sh -c 'ps aux | grep [s]ge_execd || "
"(cd {0} && {0}/inst_sge -noremote -x -auto /opt/parallelcluster/templates/sge/sge_inst.conf)'"
).format(sge.SGE_ROOT)
hostnames = [host.hostname for host in hosts]
result = RemoteCommandExecutor.run_remote_command_on_multiple_hosts(command, hostnames, cluster_user, timeout=20)
succeeded_hosts = []
for host in hosts:
if host.hostname in result and result[host.hostname]:
succeeded_hosts.append(host)
return succeeded_hosts
def lock_host(hostname):
logging.info("Locking host %s", hostname)
command = ["qmod", "-d", "all.q@{0}".format(hostname)]
run_sge_command(command)
def unlock_host(hostname):
logging.info("Unlocking host %s", hostname)
command = ["qmod", "-e", "all.q@{0}".format(hostname)]
run_sge_command(command)
def _run_sge_command_for_multiple_hosts(hosts, command_template):
"""Sequentially run an sge command on the master node for the given hostnames."""
succeeded_hosts = []
for host in hosts:
command = command_template.format(hostname=host.hostname, slots=host.slots)
try:
run_sge_command(command.format(hostname=host.hostname))
succeeded_hosts.append(host)
except Exception as e:
logging.error("Failed when executing command %s with exception %s", command, e)
return succeeded_hosts
def _run_qstat(full_format=False, hostname_filter=None, job_state_filter=None):
command = "qstat -xml -g dt -u '*'"
if full_format:
command += " -f"
if hostname_filter:
command += " -l hostname={0}".format(hostname_filter)
if job_state_filter:
command += " -s {0}".format(job_state_filter)
return check_sge_command_output(command)
def get_compute_nodes_info(hostname_filter=None, job_state_filter=None):
output = _run_qstat(full_format=True, hostname_filter=hostname_filter, job_state_filter=job_state_filter)
if not output:
return {}
root = ElementTree.fromstring(output)
queue_info = root.findall("./queue_info/*")
hosts_list = [SgeHost.from_xml(ElementTree.tostring(host)) for host in queue_info]
return dict((host.name, host) for host in hosts_list)
def get_jobs_info(hostname_filter=None, job_state_filter=None):
output = _run_qstat(full_format=False, hostname_filter=hostname_filter, job_state_filter=job_state_filter)
if not output:
return []
root = ElementTree.fromstring(output)
job_info = root.findall(".//job_list")
return [SgeJob.from_xml(ElementTree.tostring(host)) for host in job_info]
def get_pending_jobs_info(max_slots_filter=None, skip_if_state=None):
"""
Retrieve the list of pending jobs.
:param max_slots_filter: discard jobs that require a number of slots bigger than the given value
:param skip_if_state: discard jobs that are in the given state
:return: the list of filtered pending jos.
"""
pending_jobs = get_jobs_info(job_state_filter="p")
if max_slots_filter or skip_if_state:
filtered_jobs = []
for job in pending_jobs:
if max_slots_filter and job.slots > max_slots_filter:
logging.info(
"Skipping job %s since required slots (%d) exceed max slots (%d)",
job.number,
job.slots,
max_slots_filter,
)
elif skip_if_state and skip_if_state in job.state:
logging.info("Skipping job %s since in state %s", job.number, job.state)
else:
filtered_jobs.append(job)
return filtered_jobs
else:
return pending_jobs
class SgeJob(ComparableObject):
# <job_list state="running">
# <JB_job_number>89</JB_job_number>
# <JAT_prio>0.60500</JAT_prio>
# <JB_name>STDIN</JB_name>
# <JB_owner>centos</JB_owner>
# <state>sr</state>
# <JAT_start_time>2019-05-15T13:16:51</JAT_start_time>
# <master>SLAVE</master>
# <slots>1</slots>
# </job_list>
MAPPINGS = {
"JB_job_number": {"field": "number"},
"slots": {"field": "slots", "transformation": int},
"state": {"field": "state"},
"master": {"field": "node_type"},
"tasks": {"field": "array_index", "transformation": lambda x: int(x) if x is not None else None},
"queue_name": {"field": "hostname", "transformation": lambda name: name.split("@", 1)[1] if name else None},
}
def __init__(self, number=None, slots=0, state="", node_type=None, array_index=None, hostname=None):
self.number = number
self.slots = slots
self.state = state
self.node_type = node_type
self.array_index = array_index
self.hostname = hostname
@staticmethod
def from_xml(xml):
return from_xml_to_obj(xml, SgeJob)
class SgeHost(ComparableObject):
# <Queue-List>
# <name><EMAIL>.q@ip-10-0-0-16<EMAIL>-west-1.<EMAIL>.internal</name>
# <qtype>BIP</qtype>
# <slots_used>2</slots_used>
# <slots_resv>0</slots_resv>
# <slots_total>4</slots_total>
# <load_avg>0.01000</load_avg>
# <arch>lx-amd64</arch>
# <job_list state="running">
# <JB_job_number>89</JB_job_number>
# <JAT_prio>0.60500</JAT_prio>
# <JB_name>STDIN</JB_name>
# <JB_owner>centos</JB_owner>
# <state>r</state>
# <JAT_start_time>2019-05-15T13:16:51</JAT_start_time>
# <master>MASTER</master>
# <slots>1</slots>
# </job_list>
# <job_list state="running">
# <JB_job_number>95</JB_job_number>
# <JAT_prio>0.60500</JAT_prio>
# <JB_name>STDIN</JB_name>
# <JB_owner>centos</JB_owner>
# <state>s</state>
# <JAT_start_time>2019-05-15T13:16:51</JAT_start_time>
# <slots>1</slots>
# </job_list>
# </Queue-List>
MAPPINGS = {
"name": {"field": "name", "transformation": lambda name: name.split("@", 1)[1] if name else None},
"slots_used": {"field": "slots_used", "transformation": int},
"slots_total": {"field": "slots_total", "transformation": int},
"slots_resv": {"field": "slots_reserved", "transformation": int},
"state": {"field": "state"},
"job_list": {
"field": "jobs",
"transformation": lambda job: SgeJob.from_xml(ElementTree.tostring(job)),
"xml_elem_type": "xml",
},
}
def __init__(self, name=None, slots_total=0, slots_used=0, slots_reserved=0, state="", jobs=None):
self.name = name
self.slots_total = slots_total
self.slots_used = slots_used
self.slots_reserved = slots_reserved
self.state = state
self.jobs = jobs or []
@staticmethod
def from_xml(xml):
return from_xml_to_obj(xml, SgeHost)
```
|
{
"source": "JGL/EnergyMix",
"score": 3
}
|
#### File: EnergyMix/pico/unicornPackPlaying.py
```python
import picounicorn
"""
EnergyMix uses the Pico Unicorn Pack for the RPI Pico to display energy mix information
https://shop.pimoroni.com/products/pico-unicorn-pack
"""
picounicorn.init()
w = picounicorn.get_width()
h = picounicorn.get_height()
# https://www.pyblog.in/programming/print-formmating-in-python/#Stringformat_method
print("The width is of the unicorn is {0}, the height of the unicorn is {1}".format(w,h))
def setAllUnicornLEDsBlack():
for x in range(w):
for y in range(h):
picounicorn.set_pixel(x, y, 0, 0, 0)
print("Setting all pixels to black")
setAllUnicornLEDsBlack()
print("Press Button A to continue to the next demo")
while not picounicorn.is_pressed(picounicorn.BUTTON_A): # Wait for Button A to be pressed
pass
setAllUnicornLEDsBlack()
print("After setting black, setting pixels red starting from 0,1 then continuing x, x+1")
#sort this out!
picounicorn.set_pixel(0, 1, 255, 0, 0)
picounicorn.set_pixel(1, 2, 255, 0, 0)
picounicorn.set_pixel(2, 3, 255, 0, 0)
picounicorn.set_pixel(3, 4, 255, 0, 0)
picounicorn.set_pixel(4, 5, 255, 0, 0)
picounicorn.set_pixel(5, 6, 255, 0, 0)
print("Press Button B to continue to the next demo")
while not picounicorn.is_pressed(picounicorn.BUTTON_B): # Wait for Button B to be pressed
pass
setAllUnicornLEDsBlack()
print("After setting black, setting whole row to red")
for x in range(w):
picounicorn.set_pixel(x, 0, 255, 0, 0)
print("Press Button X to continue to the next demo")
while not picounicorn.is_pressed(picounicorn.BUTTON_X): # Wait for Button X to be pressed
pass
setAllUnicornLEDsBlack()
print("After setting black, setting whole column to red")
for y in range(h):
picounicorn.set_pixel(0, y, 255, 0, 0)
```
|
{
"source": "jglesner/fsqio",
"score": 2
}
|
#### File: python3-port-utils/pants/futurize.py
```python
import argparse
import itertools
import subprocess
import sys
import re
from glob import glob
from textwrap import dedent
from typing import List, NamedTuple
def main() -> None:
parser = create_parser()
args = parser.parse_args()
# preview changes needed for file
if not args.file_names:
target_root = determine_target_root(args.folder, args.contrib, args.test)
check_what_needs_changes(target_root, args.root_only)
return
# futurize files
for file_name in args.file_names:
paths = determine_paths(args, file_name)
if args.preview:
preview_changes(paths.file_path)
continue
futurize_diff = call_futurize(paths.file_path)
if not futurize_made_changes(futurize_diff):
continue
if new_imports_added(futurize_diff):
update_build_dependencies(paths.target_root, paths.pants_target_name, file_name)
call_pants_fmt(paths.pants_target_path)
prompt_review_of_diffs(futurize_diff)
if not args.no_tests and file_changed(paths.file_path):
call_pants_test(paths.pants_test_path)
# --------------------------------------------------
# Command line utils
# -------------------------------------------------
def get_stdout(command: List[str]) -> str:
return subprocess.run(
command,
stdout=subprocess.PIPE,
encoding='utf-8') \
.stdout.strip()
def get_stderr(command: List[str]) -> str:
return subprocess.run(
command,
stderr=subprocess.PIPE,
encoding='utf-8') \
.stderr.strip()
# --------------------------------------------------
# Setup
# -------------------------------------------------
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description='Run futurize script over targets.')
parser.add_argument('folder', help='Target folder name, e.g. backend/jvm')
parser.add_argument(
'file_names',
nargs='*',
default=[],
help='Specific .py file(s). Ignore this arg to see changes necessary in folder.'
)
parser.add_argument('-t', '--test', action='store_true', help='Operate on test targets.')
parser.add_argument('-p', '--preview', action='store_true', help='Do not write changes.')
parser.add_argument('-n', '--no-tests', action='store_true', help='Skip unit tests.')
parser.add_argument('-r', '--root-only', action='store_true', help='Do not recursively search subfolders.')
parser.add_argument('-c', '--contrib', action='store_true', help='Operate on targets in contrib/.')
return parser
class Paths(NamedTuple):
target_root: str
file_path: str
pants_target_name: str
pants_target_path: str
pants_test_path: str
SRC_BASE_ROOT = 'src/python/pants'
TEST_BASE_ROOT = 'tests/python/pants_test'
def determine_paths(args, file_name: str) -> Paths:
target_root = determine_target_root(args.folder, args.contrib, args.test)
test_root = determine_target_root(args.folder, args.contrib, is_test=True)
pants_target_name = determine_pants_target_name(target_root, file_name)
file_path = f'{target_root}/{file_name}'
pants_target_path = f'{target_root}:{pants_target_name}'
pants_test_path = f'{test_root}:{pants_target_name}'
return Paths(
target_root=target_root,
file_path=file_path,
pants_target_name=pants_target_name,
pants_target_path=pants_target_path,
pants_test_path=pants_test_path
)
def determine_target_root(folder: str, is_contrib: bool, is_test: bool) -> str:
if is_contrib:
target_folder_root = folder.split('/')[0]
base_root = (f'contrib/{target_folder_root}/{TEST_BASE_ROOT}/contrib'
if is_test
else f'contrib/{target_folder_root}/{SRC_BASE_ROOT}/contrib')
else:
base_root = TEST_BASE_ROOT if is_test else SRC_BASE_ROOT
return f'{base_root}/{folder}' if folder else base_root
def determine_pants_target_name(target_root: str, file_name: str) -> str:
file_map = get_stdout([
'./pants',
'filemap',
f'{target_root}:'
]).split('\n')
target_entry = next((line for line in file_map if file_name in line), None)
if target_entry is None:
raise SystemExit(dedent(f"""\n
ERROR: File name '{file_name}' invalid. Not found anywhere in {target_root}/BUILD."""))
pants_target_path = target_entry.split(' ')[1]
pants_target_name = pants_target_path.split(':')[1]
return pants_target_name
# --------------------------------------------------
# Futurize script
# -------------------------------------------------
FUTURIZE_BIN = 'build-support/pants_dev_deps.venv/bin/futurize'
def check_what_needs_changes(folder_root: str, root_only: bool) -> None:
file_paths = (glob(f'{folder_root}/*.py', recursive=False)
if root_only
else glob(f'{folder_root}/**/*.py', recursive=True))
futurize_output = get_stderr([
FUTURIZE_BIN,
'--stage2',
'--no-diffs'
] + file_paths) \
.split('\n')
errors_dropped = itertools.takewhile(
lambda line: not re.match('RefactoringTool:.*error:', line),
futurize_output)
ignore_unnecessary_lines = itertools.dropwhile(
lambda line: 'RefactoringTool: Files that need to be modified:' not in line,
errors_dropped)
remove_refactoring_text = [line.replace('RefactoringTool: ', '') for line in ignore_unnecessary_lines]
no_header = list(remove_refactoring_text)[1:]
if not no_header:
print('Folder is already Python 3 compatible 🐍 🎉')
return
split_by_warning: List[List[str]] = [list(group) for k, group
in itertools.groupby(no_header,
lambda line: 'Warnings/messages while refactoring:' in line)
if not k]
if len(split_by_warning) == 2: # warnings
print('Warnings while refactoring:\n' + '\n'.join(split_by_warning[1]) + '\n\n',
file=sys.stderr)
dropped_warnings = split_by_warning[0]
def drop_prefix(line: str) -> str:
return (line.split(f'{TEST_BASE_ROOT}/')[1]
if TEST_BASE_ROOT in line
else line.split(f'{SRC_BASE_ROOT}/')[1])
remove_path_prefix = [drop_prefix(line) for line in dropped_warnings]
if 'contrib' in folder_root:
remove_path_prefix = [line.split('contrib/')[1] for line in remove_path_prefix]
formatted_for_cli = ([f"{line.split('/')[-1]}" for line in remove_path_prefix]
if root_only
else [f"{'/'.join(line.split('/')[:-1])} {line.split('/')[-1]}" for line in remove_path_prefix])
delimiter = '\n' if not root_only else ' '
print(delimiter.join(sorted(formatted_for_cli)))
def preview_changes(file_path: str) -> None:
subprocess.run([
FUTURIZE_BIN,
'--stage2',
file_path
])
def call_futurize(file_path: str) -> str:
return get_stdout([
FUTURIZE_BIN,
'--stage2',
'--write',
'--nobackup',
file_path
])
# --------------------------------------------------
# Check for changes
# -------------------------------------------------
def file_changed(file_path: str) -> bool:
git_changes = get_stdout(['git', 'ls-files', '-m'])
return file_path in git_changes
def futurize_made_changes(futurize_output: str) -> bool:
return bool(futurize_output)
def new_imports_added(futurize_output: str) -> bool:
return 'import' in futurize_output
# --------------------------------------------------
# Update BUILD
# -------------------------------------------------
def _find_target_index_in_build(build_lines: List[str], pants_target_name: str, file_name: str) -> int:
index = next((i for i, line in enumerate(build_lines)
if f"name = '{pants_target_name}'" in line
or f"name='{pants_target_name}'" in line),
None)
if index is None: # mono-target
index = next((i for i, line in enumerate(build_lines) if file_name in line), None)
if index is None: # only one target block in file, and sources aren't specified
index = next(i for i, line in enumerate(build_lines) if 'python_' in line and '(' in line)
return index
def _future_dependency_already_added(lines: List[str], starting_index: int) -> bool:
for line in lines[starting_index:]:
if '3rdparty/python:future' in line:
return True
if ')\n' in line: # done with dependencies section
return False
def update_build_dependencies(folder_root: str, pants_target_name: str, file_name: str) -> None:
build_file = f'{folder_root}/BUILD'
with open(build_file, 'r') as f:
lines = list(f.readlines())
target_index = _find_target_index_in_build(lines, pants_target_name, file_name)
if _future_dependency_already_added(lines, target_index):
return
for i, line in enumerate(lines[target_index:]):
if 'dependencies = [' in line or 'dependencies=[' in line:
lines.insert(target_index + i + 1, " '3rdparty/python:future',\n")
break
if ')\n' in line: # dependencies section doesn't exist for target
lines.insert(target_index + i, ' dependencies = [\n')
lines.insert(target_index + i + 1, " '3rdparty/python:future',\n")
lines.insert(target_index + i + 2, ' ],\n')
break
with open(build_file, 'w') as f:
f.writelines(lines)
# --------------------------------------------------
# Pants goals
# -------------------------------------------------
def call_pants_fmt(pants_target_path: str) -> None:
subprocess.run([
'./pants',
'fmt',
pants_target_path
])
def call_pants_test(pants_test_target_path: str) -> None:
subprocess.run([
'./pants',
'test',
pants_test_target_path
])
# --------------------------------------------------
# Prompt review of diffs
# -------------------------------------------------
def prompt_review_of_diffs(futurize_output: str) -> None:
input(dedent(f"""\
----------------------------------------------------------------------
Review the file for changes and make modifications if necessary.
----------------------------------------------------------------------
{futurize_output}
----------------------------------------------------------------------
Input the enter key when ready to move on."""))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
```
#### File: python3-port-utils/pants/update_headers.py
```python
import argparse
from typing import List, Set, Sequence
from glob import glob
ENCODING_INDEX = 0
FUTURE_IMPORT_INDEX = 4
def main() -> None:
folders = create_parser().parse_args().folders
for fp in get_files(folders):
with open(fp, "r") as f:
lines = list(f.readlines())
if is_py2_header(lines[:FUTURE_IMPORT_INDEX + 1]):
rewrite(fp, lines)
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description='Use the new header without __future__ imports and # encoding.')
parser.add_argument('folders', nargs='*')
return parser
def get_files(folders: Sequence[str]) -> Set[str]:
return {
f
for folder in folders
for f in glob(f"{folder}/**/*.py", recursive=True)
if not f.endswith("__init__.py")
}
def is_py2_header(header: Sequence[str]) -> bool:
return "# coding=utf-8" in header[ENCODING_INDEX] and "from __future__" in header[FUTURE_IMPORT_INDEX]
def rewrite(path: str, lines: List[str]) -> None:
with open(path, "w") as f:
f.writelines(
lines[ENCODING_INDEX + 1:FUTURE_IMPORT_INDEX] + lines[FUTURE_IMPORT_INDEX + 2:]
)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
```
#### File: twofishes/scripts/geocoder.py
```python
import re, json, sys, urllib, urllib2
class Geocode:
# Initialize from a raw interpretation from the geocoder service.
def __init__(self, interpretation):
self.interp = interpretation
def bounds(self):
try:
return self.interp['feature']['geometry']['bounds']
except:
return None
def what(self):
return self.interp['what']
def lat(self):
return self.interp['feature']['geometry']['center']['lat']
def lng(self):
return self.interp['feature']['geometry']['center']['lng']
def displayName(self):
return self.interp['feature']['displayName']
def ids(self):
return self.interp['feature']['ids']
def geonameid(self):
for i in self.ids():
if i['source'] == 'geonameid':
return i['id']
return None
class Geocoder:
def __init__(self, host):
self.host = host
def geocode(self, query, otherParams = {}):
otherParams['query'] = query
url = 'http://%s/?%s' % (self.host, urllib.urlencode(otherParams))
request = urllib2.Request(url)
response = json.loads(urllib2.urlopen(request).read())
if len(response['interpretations']) > 0:
return Geocode(response['interpretations'][0])
else:
return None
```
#### File: buildgen/core/register.py
```python
from __future__ import absolute_import, division, print_function
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.task_registrar import TaskRegistrar as task
from fsqio.pants.buildgen.core.buildgen import Buildgen
from fsqio.pants.buildgen.core.buildgen_aggregate_targets import BuildgenAggregateTargets
from fsqio.pants.buildgen.core.buildgen_target_bag import BuildgenTargetBag
from fsqio.pants.buildgen.core.inject_target_bags import BuildgenInjectTargetBags
from fsqio.pants.buildgen.core.map_derived_targets import MapDerivedTargets
from fsqio.pants.buildgen.core.map_sources_to_addresses import MapSourcesToAddresses
from fsqio.pants.buildgen.core.subsystems.buildgen_subsystem import BuildgenSubsystem
def build_file_aliases():
return BuildFileAliases(
targets={
'buildgen_target_bag': BuildgenTargetBag,
},
)
def global_subsystems():
return (BuildgenSubsystem.Factory,)
def register_goals():
task(
name='map-derived-targets',
action=MapDerivedTargets,
).install()
task(
name='map-sources-to-addresses-mapper',
action=MapSourcesToAddresses,
).install()
task(
name='buildgen',
action=Buildgen,
).install()
# This is a noop from a task perspective - it only updates the target roots to properly attach
# transitive dependencies.
task(
name='add-target-bags',
action=BuildgenInjectTargetBags,
).install('test')
task(
name='aggregate-targets',
action=BuildgenAggregateTargets,
).install('buildgen')
```
#### File: pants/export/export_filtered.py
```python
from __future__ import (
absolute_import,
division,
generators,
nested_scopes,
print_function,
unicode_literals,
with_statement,
)
import json
import os
from pants.backend.project_info.tasks.export import ExportTask, get_buildroot
from pants.task.console_task import ConsoleTask
# Changing the behavior of this task may affect the IntelliJ Pants plugin.
# Please add @yic to reviews for this file.
class GenStubsAndExportTask(ExportTask):
"""Base class for generating a json-formattable blob of data about the target graph.
Subclasses can invoke the generate_targets_map method to get a dictionary of plain datastructures
(dicts, lists, strings) that can be easily read and exported to various formats.
Note this is subclassed form the original export, hot swapping sources for generated stubs.
"""
@classmethod
def prepare(cls, options, round_manager):
# TODO: this method is overriden explicitly for stub generation. When we change the approach we can remove
super(GenStubsAndExportTask, cls).prepare(options, round_manager)
if options.libraries or options.libraries_sources or options.libraries_javadocs:
round_manager.require_data('stubs')
@staticmethod
def _source_roots_for_target(target):
def mod_path_to_ide_gen(path):
"""
:param path:
:return: new path with the ide-gen location
"""
path_components = path.split('/')
try:
anchor_idx = path_components.index('spindle')
except ValueError:
# not a spindle file, just return original path
return path
path_components[anchor_idx - 1] = 'ide-gen'
path_components[anchor_idx] = 'spindle-stubs'
path_components[anchor_idx + 1] = 'current'
return '/'.join(path_components)
def root_package_prefix(source_file):
source = os.path.dirname(source_file)
if target.is_synthetic:
source_root = mod_path_to_ide_gen(os.path.join(get_buildroot(), target.target_base, source))
else:
source_root = os.path.join(get_buildroot(), target.target_base, source)
return source_root, source.replace(os.sep, '.')
return set(map(root_package_prefix, target.sources_relative_to_source_root()))
class GenStubsAndExport(GenStubsAndExportTask, ConsoleTask):
"""Export project information in JSON format.
Intended for exporting project information for IDE, such as the IntelliJ Pants plugin.
TODO: we can back this out entirely when we introduce stubs to pants more formally.
"""
def __init__(self, *args, **kwargs):
super(GenStubsAndExport, self).__init__(*args, **kwargs)
def console_output(self, targets, classpath_products=None):
graph_info = self.generate_targets_map(targets, classpath_products=classpath_products)
if self.get_options().formatted:
return json.dumps(graph_info, indent=4, separators=(',', ': ')).splitlines()
else:
return [json.dumps(graph_info)]
```
#### File: python/tasks/futurize_task.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from builtins import filter
from pants.backend.python.interpreter_cache import PythonInterpreterCache
from pants.backend.python.subsystems.python_setup import PythonSetup
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_target import PythonTarget
from pants.backend.python.targets.python_tests import PythonTests
from pants.backend.python.tasks.resolve_requirements_task_base import ResolveRequirementsTaskBase
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.python.python_repos import PythonRepos
from pants.util.memo import memoized_property
from pants.util.process_handler import subprocess
from typing import List, Optional, Set
class FuturizeTask(ResolveRequirementsTaskBase):
"""Invoke the futurize tool for Python."""
_PYTHON_SOURCE_EXTENSION = '.py'
def __init__(self, *args, **kwargs):
super(FuturizeTask, self).__init__(*args, **kwargs)
@classmethod
def register_options(cls, register):
super(FuturizeTask, cls).register_options(register)
register('--check', type=bool, default=False,
help='Determines if Pants stops due to error.')
register('--stage', type=int, default=1,
help='Stage of transformation (1 or 2) see futurize docs for more info')
register('--only', type=str, default=None)
@memoized_property
def _interpreter_cache(self):
return PythonInterpreterCache(
PythonSetup.global_instance(),
PythonRepos.global_instance(),
logger=self.context.log.debug
)
@classmethod
def supports_passthru_args(cls):
return True
@staticmethod
def is_non_synthetic_python_target(target):
# type: (PythonTarget) -> bool
return not target.is_synthetic and isinstance(target, (PythonLibrary, PythonBinary, PythonTests))
def _calculate_python_sources(self, targets, tag):
# type: (List[PythonTarget], Optional[str]) -> List[str]
"""Generate a set of source files from the given targets."""
python_eval_targets = list(filter(self.is_non_synthetic_python_target, targets))
sources = set() # type: Set[str]
for target in python_eval_targets:
if not tag or tag in target.tags:
sources.update(
source for source in target.sources_relative_to_buildroot()
if os.path.splitext(source)[1] == self._PYTHON_SOURCE_EXTENSION
)
return list(sources)
def execute(self):
# type: () -> None
opts = self.get_options()
tag = opts.only
sources = self._calculate_python_sources(self.context.target_roots, tag)
if not sources:
self.context.log.debug('No Python sources to check.')
return
futurize_opts = ['-j8']
if opts.stage == 1:
futurize_opts.append('-1')
elif opts.stage == 2:
futurize_opts.append('-2')
else:
raise TaskError('--stage can only have a value of 1 or 2, not {}'.format(opts.stage))
if not opts.check:
futurize_opts.extend(['-w', '-n', '--no-diff'])
cmd = ['.pvenvs/fs/bin/futurize'] + futurize_opts + self.get_passthru_args() + sources
self.context.log.debug('futurize command: {}'.format(' '.join(cmd)))
with self.context.new_workunit(
name='check',
labels=[WorkUnitLabel.TOOL, WorkUnitLabel.RUN],
log_config=WorkUnit.LogConfig(level=self.get_options().level, colors=self.get_options().colors),
cmd=' '.join(cmd)) as workunit:
proc = subprocess.Popen(cmd, stdout=workunit.output('stdout'), stderr=subprocess.STDOUT)
return_code = proc.wait()
if return_code != 0:
raise TaskError('futurize failed: code={}'.format(return_code))
```
#### File: rpmbuild/subsystems/remote_source_fetcher.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import os
from pants.binaries.binary_tool import BinaryToolBase
from pants.fs.archive import archiver_for_path
from pants.util.contextutil import temporary_dir
from pants.util.memo import memoized_method, memoized_property
class RemoteSourceFetcher(BinaryToolBase):
"""Fetcher for remote sources which uses BinaryToolBase pipeline."""
# This allows long-lived caching of remote downloads, which are painful to to over and over when they aren't changing.
# NOTE(mateo): Upstream Pants saw this subsystem and ended up adding a subset of the features.
# That subset needs to be audited and consumed, with a longer-term goal of patching upstream to add
# any of the following features we cannot live without.
#
# RemoteSources plugin provides the following features (uniquely or otherwise):
# * Long-lived caching of downloaded files
# - Only invalidated by version changes - otherwise considered cached
# - Kept outsidce .pants.d or artifact cache, alongside Pants downloaded tooling.
# - Atomic downloads so we aren't poisoned by corrupted downloads.
# * Addressable in BUILD files
# - These are considered "versioned" and can be referenced as dependencies.
# - RpmBuilder as canonical consumer - caching bootstrapped source bundles.
# * Fetched on demand, either directly or transitively
# - If you call `./pants rpmbuild src/redhat/libevent` only then should it bootstrap the source bundle.
# * Unpack attribute in the target
# - Extract as an addressable feature.
#
# These features mean that we can add new bootstrapped downloads strictly by editing BUILD files and they
# will be fetched on demand, and cached ~forever. This is incredibly powerful for engineers without direct
# Pants development experience.
# TODO(mateo): Either fully adapt the remote_sources plugin for the new BinaryToolBase interface or
# work with upstream until UnpackJars is robust enough for our use cases.
# The upstream interface uses this for the "name" because it expects a new Subsystem for every boostrapped
# tool. We set the name in the BUILD file, which is interpolated through overrides below.
options_scope = 'remote-fetcher'
def __init__(self, remote_target):
self.name = remote_target.namespace
self._filename = remote_target.filename
self._extract = remote_target.extract or False
self._version = remote_target.version
self.platform_dependent = remote_target.platform_dependent == "True"
def get_support_dir(self):
return 'bin/{}'.format(self.name)
def version(self, context=None):
"""Returns the version of the specified binary tool."""
return self._version
@property
def _relpath(self):
return os.path.join(self.get_support_dir(), self.name())
@property
def extracted(self):
return self._extract
def _construct_path(self, context=None):
fetched = self.select(context)
if not self._extract:
return fetched
unpacked_dir = os.path.dirname(fetched)
outdir = os.path.join(unpacked_dir, 'unpacked')
if not os.path.exists(outdir):
with temporary_dir(root_dir=unpacked_dir) as tmp_root:
# This is an upstream lever that pattern matches the filepath to an archive type.
archiver = archiver_for_path(fetched)
archiver.extract(fetched, tmp_root)
os.rename(tmp_root, outdir)
return os.path.join(outdir)
@memoized_method
def _select_for_version(self, version):
# Override, since we include the extension in the actual filename
# (a compromise so we could support downloading files with no extension).
return self._binary_util.select(
supportdir=self.get_support_dir(),
version=version,
name='{}'.format(self._filename),
platform_dependent=self.platform_dependent,
archive_type=self.archive_type)
@memoized_property
def path(self, context=None):
"""Fetch the binary and return the full file path.
Safe to call repeatedly, the fetch itself is idempotent.
"""
return self._construct_path()
```
#### File: spindle/targets/ssp_template.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
from pants.build_graph.resources import Resources
class SspTemplate(Resources):
"""Scala Server Pages (ssp) template."""
def __init__(self, entry_point=None, *args, **kwargs):
self.entry_point = entry_point
super(SspTemplate, self).__init__(*args, **kwargs)
```
#### File: wiki/subsystems/confluence_subsystem.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import urllib
from pants.subsystem.subsystem import Subsystem
from pants.util.memo import memoized_property
class ConfluenceSubsystem(Subsystem):
options_scope = 'confluence-wiki'
@staticmethod
def confluence_url_builder(page):
config = page.provides[0].config
title = config['title']
full_url = '{}/wiki/spaces/{}/{}'.format(
ConfluenceSubsystem.wiki_url,
config['space'],
urllib.quote_plus(title),
)
return title, full_url
@classmethod
def register_options(cls, register):
super(ConfluenceSubsystem, cls).register_options(register)
# TODO(mateo): This only supports a single wiki url, should a map of wiki_name:url.
# This is not trivial to unwind, the base plugin assumed self-hosted wiki and url builders.
register(
'--wiki-url',
default=None,
advanced=True,
help='Wiki hostname.',
)
register(
'--email-domain',
advanced=True,
help='Options default domain. For <EMAIL>, use @foo.com. Note: Overrides the email-domain option.',
)
@memoized_property
def wiki_url(self):
wiki_url = self.get_options().wiki_url
if wiki_url is None:
raise ValueError("No wiki URL set! Please set option --{}-wiki-url.".format(self.options_scope))
return wiki_url
@memoized_property
def email_domain(self):
email_domain = self.get_options().email_domain
if email_domain is None:
raise ValueError("No email domain is set! Please set option --{}-email-domain.".format(self.options_scope))
return email_domain
```
#### File: wiki/tasks/confluence_restful_publish.py
```python
from __future__ import absolute_import, division, print_function
import os
import shutil
import textwrap
from pants.base.exceptions import TaskError
from pants.build_graph.resources import Resources
from pants.contrib.confluence.tasks.confluence_publish import ConfluencePublish
from pants.contrib.confluence.util.confluence_util import ConfluenceError
from pants.util.memo import memoized_property
from fsqio.pants.wiki.subsystems.confluence_subsystem import ConfluenceSubsystem
from fsqio.pants.wiki.util.confluence_cloud import ConfluenceCloud
class ConfluenceRestfulPublish(ConfluencePublish):
"""Rest client for ConfluenceCloud, for use with hosted wikis."""
@classmethod
def subsystem_dependencies(cls):
return super(ConfluenceRestfulPublish, cls).subsystem_dependencies() + (ConfluenceSubsystem,)
@memoized_property
def confluence_subsystem(self):
return ConfluenceSubsystem.global_instance()
@memoized_property
def email_domain(self):
return self.confluence_subsystem.email_domain
@memoized_property
def url(self):
return self.confluence_subsystem.wiki_url
@memoized_property
def force(self):
return self.get_options().force
@memoized_property
def open(self):
return self.get_options().open
@memoized_property
def user(self):
return self.get_options().user
@classmethod
def register_options(cls, register):
# TODO(mateo): These options and the init are inlined from ConfluencePublish because
# that file set properties inside the init and cannot be decoupled from them.
# Moving those to properties and other contracts of being a good citizen superclass.
# pylint: disable=bad-super-call
super(ConfluencePublish, cls).register_options(register)
register(
'--user',
help='Confluence user name, defaults to unix user.',
)
register(
'--force',
type=bool,
help='Force publish the page even if its contents is identical to the contents on confluence.',
)
register(
'--open',
type=bool,
help='Attempt to open the published confluence wiki page in a browser.',
)
def __init__(self, *args, **kwargs):
self._wiki = None
# NOTE(mateo): Purposeful abuse of the super call to avoid legacy practices in the upstream task.
# pylint: disable=bad-super-call
super(ConfluencePublish, self).__init__(*args, **kwargs)
def api(self):
return 'confluence2'
def publish_page(self, address, space, title, content, parent=None):
body = textwrap.dedent('''
<!-- DO NOT EDIT - generated by pants from {} -->
{}
''').strip().format(address, content)
pageopts = dict(
versionComment='updated by pants!'
)
wiki = self.login()
existing = wiki.getpage(space, title)
if existing:
# NOTE: Disabled the no-op detection for publish (no consequences on user build time at all).
# We need the page to be generated before we attach resources.
# TODO(mateo): Restore or deep-six after we land on a solution for displaying inline attachments.
#
# if not self.force and wiki.get_content_value(existing).strip() == body.strip():
# return
pageopts['id'] = existing['id']
pageopts['version'] = existing['version']
try:
page = wiki.create_html_page(space, title, body, parent, **pageopts)
except ConfluenceError as e:
raise TaskError('Failed to update confluence: {}'.format(e))
# Copy any resource files into the html dist of the dependent page target.
# This is not required for Confluence attachment - if the final image tag
# doesn't work for both markdown and confluence, maybe just pass the source location to the API and otherwise
# leave the filesystem alone
page_target = self.context.build_graph.get_target(address)
outdir = os.path.join(self.get_options().pants_distdir, 'markdown', 'html')
page_outdir = os.path.join(outdir, page_target.sources_relative_to_target_base().rel_root)
for target in page_target.dependencies:
if isinstance(target, Resources):
# Copy next to rendered HTML in dist for local use and attach to newly published page for the wiki.
for resource_file in target.sources_relative_to_buildroot():
shutil.copy2(resource_file, page_outdir)
wiki.addattachment(page, resource_file)
return wiki.get_url(page)
def login(self):
if not self._wiki:
flagged = self.get_options().is_flagged('user')
# Use the passed user - first checking to see where the default is applied
# User is seeded by Pants but the options system does not recognize seeded values as being defaults.
user = self.user if flagged else self.user + self.email_domain
try:
self._wiki = ConfluenceCloud.login(self.url, user, self.api())
except ConfluenceError as e:
raise TaskError('Failed to login to confluence: {}'.format(e))
return self._wiki
```
|
{
"source": "jglez/code-solutions",
"score": 4
}
|
#### File: code-solutions/codesignal/makeArrayConsecutive.py
```python
def solution(statues):
sorted_statues = sorted(statues)
pointer = 0
while pointer != len(sorted_statues) - 1:
if (sorted_statues[pointer + 1] - sorted_statues[pointer]) > 1:
sorted_statues.insert(pointer + 1, sorted_statues[pointer] + 1)
pointer += 1
return len(sorted_statues) - len(statues)
```
#### File: code-solutions/codesignal/shapeArea.py
```python
def solution(n):
area = 1
currEdges = 4
for n in range(1, n):
area += currEdges
currEdges += 4
return area
```
#### File: codewars/8kyu/cockroachSpeed.py
```python
def cockroach_speed(s):
return s * 100000 // 3600
```
#### File: codewars/8kyu/firstNonConsecutive.py
```python
def first_non_consecutive(arr):
for index, item in enumerate(arr):
if arr.index(arr[index]) == len(arr) - 1:
break
if arr[index + 1] - arr[index] > 1:
return arr[index + 1]
return None
```
#### File: codewars/8kyu/thirdAngle.py
```python
def other_angle(angle1, angle2):
return 180 - angle1 - angle2
```
|
{
"source": "jglezt/game-of-pyfe",
"score": 4
}
|
#### File: game-of-pyfe/game_of_pyfe/utils.py
```python
import os
from typing import List
import numpy as np
def cls():
"""Clean the terminal."""
os.system("cls" if os.name == "nt" else "clear")
def validate_board(board: np.array) -> np.array:
"""Validate the Game of life board.
Validates that the board cumplies with the following parameters.
1. Has shape of (n, m).
2. Only contains 1's (life) and 0's (death).
Arguments
---------
board: Game of life board.
Raises
------
TypeError if does not cumply with shape.
ValueError if does not has only 1's and 0's.
Returns
-------
The original board.
"""
board_shape = board.shape
if not (len(board_shape) == 2 and board_shape[0] >= 2 and board_shape[1] >= 2):
raise TypeError("board does not contain the correct shape")
for i in range(board_shape[0]):
for j in range(board_shape[1]):
if not (board[i][j] == 0 or board[i][j] == 1):
raise ValueError(
"Board contains a {} in index [{}, {}]".format(board[i][j], i, j)
)
return board
def create_printable_board(board: np.array) -> List[List[int]]:
"""Format the board to be printed in the terminal.
For convinience and testability, the array contains the in representing
'space' for 0's and 'white box' for 1's
Arguments
---------
board: Game of life board.
Returns
-------
A list containing the lines of integers representing each cell
life state.
"""
black_square = 9608
space = 32
printable_board = board.copy()
printable_board[printable_board == 0] = space
printable_board[printable_board == 1] = black_square
return printable_board.tolist()
```
|
{
"source": "jgliss/aerocom_obs_props",
"score": 2
}
|
#### File: aerocom_obs_props/sorted_out/EVAL_SCRIPT.py
```python
import helpers as helpers
import os
from collections import OrderedDict as od
import numpy as np
import matplotlib.pyplot as plt
import pyaerocom as pya
import logging
### GLOBAL SETTINGS
YEARS = [2010, 2008, 9999]
MODEL_LIST = ['ECMWF_CAMS_REAN',
'CAM6-Oslo_NF2kNucl_7jun2018AK',
'OsloCTM2_INSITU',
'TM5_AP3-CTRL2016',
'TM5_AP3-INSITU']
MODEL_LIST = MODEL_LIST[2:]
GRIDDED_OBS = {'MODIS6.terra' : ['od550aer'],
'MODIS6.aqua' : ['od550aer']}
# will be filled during the import
READ_PROBLEMATIC = {}
# Obs data and variables
UNGRIDDED_OBS = {'AeronetSunV2Lev2.daily' : ['od550aer', 'ang4487aer'],
'AeronetSunV3Lev2.daily' : ['od550aer', 'ang4487aer'],
'AeronetSDAV2Lev2.daily' : ['od550lt1aer',
'od550gt1aer'],
'AeronetSDAV3Lev2.daily' : ['od550lt1aer',
'od550gt1aer'],
pya.const.AERONET_INV_V2L2_DAILY_NAME : 'abs550aer',
pya.const.AERONET_INV_V3L2_DAILY_NAME : 'abs550aer'}
### Paths and directories
MODEL_INFO_FILE = ('/lustre/storeA/project/aerocom/'
'aerocom-users-database/AEROCOM-PHASE-III/reference-list')
OUT_DIR = './output/'
OUT_DIR_SCAT = os.path.join(OUT_DIR, 'scatter_plots')
OUT_DIR_RESULTS = os.path.join(OUT_DIR, 'results_csv')
OUT_STATS = os.path.join(OUT_DIR, 'statistics_results.csv')
VARS = []
for k, v in UNGRIDDED_OBS.items():
if isinstance(v, str):
VARS.append(v)
else:
VARS.extend(v)
for k, v in GRIDDED_OBS.items():
if isinstance(v, str):
VARS.append(v)
else:
VARS.extend(v)
VARS = list(dict.fromkeys(VARS))
def chk_make_dir(base, name):
d = os.path.join(base, name)
if not os.path.exists(d):
os.mkdir(d)
return d
def init_output_directories(model_reader, obs_data, out_base_dir):
if not os.path.exists(out_base_dir):
os.mkdir(out_base_dir)
dirs = {}
for name, data in model_reader.results.items():
model_base = chk_make_dir(out_base_dir, name)
dirs[name] = {}
for y in data.years:
year_sub = chk_make_dir(model_base, str(y))
dirs[name][y] = {}
for obs_network in obs_data:
obs_sub = chk_make_dir(year_sub, obs_network)
dirs[name][y][obs_network] = obs_sub
chk_make_dir(obs_sub, 'series_plots')
return dirs
def append_result(out_file, stats, model, obs, var, year, ts_type):
with open(out_file, 'a') as f:
f.write('Model: {}, Obs: {}, Var: {}, Year: {}, Freq: {}\n'.format(
model, obs, var, year, ts_type))
for k, v in stats.items():
f.write('{}:\t{:.3f}\n'.format(k, v))
f.write('\n')
if __name__=="__main__":
if os.path.exists(OUT_STATS):
os.remove(OUT_STATS)
plt.close('all')
helpers.print_file(MODEL_INFO_FILE)
### OPTIONS
RELOAD = 1
RUN_EVAL = 1
EVAL_UNGRIDDED = 1
EVAL_GRIDDED_OBS = 1
TEST = 0
PLOT_STATIONS = 0
pya.change_verbosity('critical')
### DATA IMPORT
if RELOAD:
print('Importing model and obs data, this could take some time')
### Read gridded model data
read_models = pya.io.ReadGriddedMulti(MODEL_LIST)
read_models.read_individual_years(VARS, YEARS)
print('Reading satellite data')
### Read gridded obs data
grid_obs = [k for k in GRIDDED_OBS.keys()]
read_gridded_obs = pya.io.ReadGriddedMulti(grid_obs)
read_gridded_obs.read_individual_years(VARS, YEARS)
read_ungridded_obs = pya.io.ReadUngridded()
read_ungridded_obs.logger.setLevel(logging.INFO)
print('Reading ungridded obs data')
# Load networks individually for now (easier for analysis below)
ungridded_obs_all = od()
if EVAL_UNGRIDDED:
for network, vars_to_retrieve in UNGRIDDED_OBS.items():
ungridded_obs_all[network] = read_ungridded_obs.read_dataset(
network, vars_to_retrieve=vars_to_retrieve)
dirs = init_output_directories(read_models, ungridded_obs_all, OUT_DIR)
if RUN_EVAL:
### ANALYSIS
PLOT_STATIONS = 0
# temporal resolution
TS_TYPES = ['daily', 'monthly', 'yearly']
filter_name = 'WORLD-noMOUNTAINS'
for ts_type in TS_TYPES:
plotname = 'mALLYEAR{}'.format(ts_type)
for model_id, model_reader in read_models.results.items():
for year in YEARS:
if not year in model_reader.years:
continue
for var in VARS:
if not var in model_reader.vars:
continue
if EVAL_GRIDDED_OBS:
for obs_id, obs_reader in read_gridded_obs.results.items():
if var in obs_reader.vars and year in obs_reader.years:
if year == 9999:
msg =('Ignoring climatology data (model: {}, '
'obs: {}). '
'Not yet implemented'.format(model_id,
obs_id))
print(msg)
with open(OUT_STATS, 'a') as f:
f.write('\n{}\n\n'.format(msg))
continue
print('Analysing variable: {}\n'
'Model {} vs. obs {}\n'
'Year: {} ({} resolution)\n'
.format(var, model_id, obs_id,
year, ts_type))
model = model_reader.data_yearly[var][year]
obs = obs_reader.data_yearly[var][year]
start_str = str(year)
stop_str = '{}-12-31 23:59:59'.format(year)
data = pya.collocation.collocate_gridded_gridded(
model, obs,
start=start_str,
stop=stop_str,
ts_type=ts_type,
filter_name=filter_name)
stats = data.calc_statistics()
append_result(OUT_STATS, stats,
model_id, obs_id, var, year, ts_type)
add_note=False
if np.isnan(stats['R']):
if sum(data.data.values[1].flatten()) != 0:
raise Exception('Check...')
add_note = True
save_name_fig = data.save_name_aerocom + '_SCAT.png'
data.plot_scatter(savefig=True,
save_dir=OUT_DIR_SCAT,
save_name=save_name_fig)
data.to_csv(OUT_DIR_RESULTS)
if TEST:
raise Exception
plt.close('all')
if EVAL_UNGRIDDED:
for obs_id, ungridded_obs in ungridded_obs_all.items():
if not var in ungridded_obs.contains_vars:
continue
if year == 9999:
msg =('Ignoring climatology data (model: {}, '
'obs: {}). '
'Not yet implemented'.format(model_id,
obs_id))
print(msg)
with open(OUT_STATS, 'a') as f:
f.write('\n{}\n\n'.format(msg))
continue
print('Analysing variable: {}\n'
'Model {} vs. obs {}\n'
'Year: {} ({} resolution)\n'
.format(var, model_id, obs_id,
year, ts_type))
model = model_reader.data_yearly[var][year]
start_str = str(year)
stop_str = '{}-12-31 23:59:00'.format(year)
data = pya.collocation.collocate_gridded_ungridded_2D(
model, ungridded_obs, ts_type=ts_type,
start=start_str, stop=stop_str,
filter_name=filter_name)
data.to_csv(OUT_DIR_RESULTS)
stats = data.calc_statistics()
append_result(OUT_STATS, stats,
model_id, obs_id, var, year, ts_type)
add_note=False
if np.isnan(stats['R']):
if sum(data.data.values[1].flatten()) != 0:
raise Exception('Check...')
add_note = True
save_name_fig = data.save_name_aerocom + '_SCAT.png'
data.plot_scatter(savefig=True,
save_dir=OUT_DIR_SCAT,
save_name=save_name_fig)
plt.close('all')
```
|
{
"source": "jgliss/my_py3_scripts",
"score": 2
}
|
#### File: arve/from_arve/Experiment.py
```python
import copy
import math
import numpy as np
import pickle
import os
import shutil
from glob import glob
import multiprocessing
from subprocess import Popen,PIPE, STDOUT, call
import string, random
from netCDF4 import Dataset
import TomoCamera as TC
import time
import sys
def find_nearest_id(array,value):
idx=(np.abs(array-value)).argmin()
return idx
def Average_spc_Files(InputFiles, OutputFile, verbose=False):
# First check that all files have the same number of lines. If not
# the files are surely different.
i = 0
# for fn in glob(InputFiles):
for fn in InputFiles:
with open(fn) as fp:
nlin = sum(1 for line in fp)
if i==0:
nlin0=nlin
else:
if nlin != nlin0:
print 'nlin: ' + str(nlin) + ', not equal nlin0: ' + str(nlin0)
exit(0)
i = i + 1
# All well? Combine all the files
wvl = np.zeros([len(InputFiles),nlin])
ix = np.zeros([len(InputFiles),nlin],dtype=int)
iy = np.zeros([len(InputFiles),nlin],dtype=int)
iz = np.zeros([len(InputFiles),nlin],dtype=int)
rad = np.zeros([len(InputFiles),nlin])
s2 = np.zeros([nlin])
radavg = np.zeros([nlin])
i = 0
# for f in glob(InputFiles):
for f in InputFiles:
(wvl[i],ix[i],iy[i],iz[i],rad[i]) = read_rad_spc(f, verbose=False)
radavg[:] = radavg[:] + rad[i,:]
s2[:] = s2[:] + rad[i,:]*rad[i,:]
i = i + 1
s0 = i
l = 0
f = open(OutputFile,'w')
while l < nlin:
s1 = radavg[l]
arg = s0*s2[l] - s1*s1
if arg < 0.0:
print >> sys.stderr, l, arg, s0, s1, s2[l]
arg = 0.0
std = (1.0/s0)*math.sqrt(arg)
f.write('{0:8.2f} {1:3d} {2:3d} {3:3d} {4:9.4f} {5:9.4f}\n'.format(wvl[0,l], ix[0,l], iy[0,l], iz[0,l], s1/s0, std))
l = l + 1
f.close()
return
def CombineSingleProcessOuput(InputFiles, OutputFile, verbose=False):
fin = InputFiles[0]
finstd = fin.replace('.spc','.std.spc')
rad = np.loadtxt(fin)
std = np.loadtxt(finstd)
nwvl, ncol = rad.shape
f = open(OutputFile,'w')
iwvl=0
while iwvl < nwvl:
f.write('{0:8.2f} {1:3d} {2:3d} {3:3d} {4:9.4f} {5:9.4f}\n'.format(rad[iwvl,0], int(rad[iwvl,1]),
int(rad[iwvl,2]), int(rad[iwvl,3]),
rad[iwvl,4], std[iwvl,4]))
iwvl = iwvl + 1
f.close()
return
def read_rad_spc(fn, STD=False, verbose=False):
# Read MYSTIC mc.rad.spc file
if verbose:
print "Reading MYSTIC mc.rad.spc file: ", fn
sys.stdout.flush()
if STD:
wvl,ix,iy,iz,rad, std = np.loadtxt(fn, unpack=True)
return (wvl,ix,iy,iz,rad,std)
else:
wvl,ix,iy,iz,rad = np.loadtxt(fn, unpack=True)
return (wvl,ix,iy,iz,rad)
def Write2DMYSTICElevationFile(filename, nx, ny, dx, dy, elevation, verbose=False):
if verbose:
print "Write2DMYSTICElevationFile filename", filename
sys.stdout.flush()
fp=open(filename,'w')
fp.write('{:d} {:d} {:f} {:f}\n'.format(nx, ny, dx, dy))
ix=0
while ix < nx:
iy=0
while iy < ny:
fp.write('{:d} {:d} {:f}\n'.format(ix+1, iy+1, elevation ))
iy=iy+1
ix=ix+1
fp.close()
def zenith(lat, lon, year, month, day,hour,min=0, sec=0, stdlong=0,output=1, uvspecpath=''):
cmd = uvspecpath+'zenith '+str(day)+' '+str(month)+' '+str(hour)+' '+str(min)+' '+str(sec)+\
' '+'-y '+str(year)+' -a '+str(lat)+' -o '+str(lon)+' -s '+str(stdlong)+' -q'
res = Popen(cmd,shell=True,stdout=PIPE)
res.wait()
vals = res.communicate()
vals = vals[0].split()
sza = float(vals[1])
return sza
def Write3DMYSTICFile(filename, type='Generic', nx=0, ny=0, nz=0, dx=0, dy=0, z=None, LWCLimit=0.0, extLimit=0.0,
flag=0, verbose=False, **kwargs):
if verbose:
print "Write3DMYSTICFile filename", filename
sys.stdout.flush()
fp=open(filename,'w')
if type=='Generic':
fp.write('{:d} {:d} {:d} {:d}\n'.format(nx, ny, nz,flag))
fp.write('{:f} {:f} '.format(dx, dy))
for zz in z:
fp.write('{:f} '.format(zz))
fp.write('\n')
if flag==1:
tmpext = kwargs['ext']
tmpgg = kwargs['gg']
tmpssa = kwargs['ssa']
ix=0
il=0
while ix < nx:
iy=0
while iy < ny:
iz=0
while iz < nz:
# if tmpext[ix,iy,iz] > 0.0:
if tmpext[ix,iy,iz] > extLimit:
fp.write('{:d} {:d} {:d} {:f} {:f} {:f}\n'.format(ix+1, iy+1, iz+1, tmpext[ix,iy,iz], tmpgg[ix,iy,iz], tmpssa[ix,iy,iz] ))
il=il+1
iz=iz+1
iy=iy+1
ix=ix+1
# If no cloud still include a dummy line with no optical thickness to fool MYSTIC
# for background simulations.
if il==0:
ix=0
iy=0
iz=0
fp.write('{:d} {:d} {:d} {:f} {:f} {:f}\n'.format(ix+1, iy+1, iz+1, 0.0, 0.0, 0.0 ))
elif flag==3:
tmpLWC = kwargs['LWC']
tmpreff = kwargs['reff']
ix=0
il=0
while ix < nx:
iy=0
while iy < ny:
iz=0
# while iz < nz+1:
while iz < nz:
if tmpLWC[ix,iy,iz] > LWCLimit:
fp.write('{:d} {:d} {:d} {:g} {:f}\n'.format(ix+1, iy+1, iz+1, tmpLWC[ix,iy,iz], tmpreff[ix,iy,iz] ))
il=il+1
iz=iz+1
iy=iy+1
ix=ix+1
# If no cloud still include a dummy line with no optical thickness to fool MYSTIC
# for background simulations.
if il==0:
ix=0
iy=0
iz=0
fp.write('{:d} {:d} {:d} {:f} {:f}\n'.format(ix+1, iy+1, iz+1, 0.0, tmpreff[ix,iy,iz] ))
else:
print 'Write3DMYSTICFile: Unknown type'
exit
fp.close()
return
def make3DGaussian(sizex, sizey, sizez, sigmax=1, sigmay=1, sigmaz=1, center=None,verbose=False):
""" Calculate 3D Gaussian distribution with different standard deviations
in x, y, and z-directions.
size? is the size in pixels/indices in ?-direction
sigma ? is standard deviation in ?-direction
"""
if verbose==True:
print "make3DGaussian size", sizex, sizey, sizez
print "make3DGaussian sigma", sigmax, sigmay, sigmaz
sys.stdout.flush()
x = np.arange(0, sizex, 1, float)
y = x[:,np.newaxis]
if sizey>sizex:
y = np.insert(y,np.zeros(sizey-sizex),0,axis=0 )
elif sizey<sizex:
y = np.delete(y,np.arange(0,sizex-sizey),axis=0 )
y[:,0] = np.arange(0, len(y), 1, float)
z = y[:,np.newaxis]
if sizez>sizey:
z = np.insert(z,np.zeros(sizez-sizey),0,axis=0 )
elif sizez<sizey:
z = np.delete(z,np.arange(0,sizey-sizez),axis=0 )
z[:,0,0] = np.arange(0, len(z), 1, float)
if verbose==True:
print z
print "make3DGaussian x.shape", x.shape, y.shape, z.shape
sys.stdout.flush()
if center is None:
x0 = sizex // 2
y0 = sizey // 2
z0 = sizez // 2
else:
x0 = center[0]
y0 = center[1]
z0 = center[2]
# if verbose==True:
# print "x", x
# print "y", y
# print "z", z
# print "x0, y0, z0", x0, y0, z0
tmpG = (1/(2.*sigmax*sigmay*sigmaz))*np.exp(-0.5*(((x-x0)**2)/sigmax**2+((y-y0)**2)/sigmay**2+((z-z0)**2)/sigmaz**2))
# Transpose to comply with rest
tmpG = tmpG.T
if verbose==True:
print "make3DGaussian dens", tmpG.shape
sys.stdout.flush()
return tmpG
def make3DEllipsoid(sizex, sizey, sizez, x_centre, y_centre, z_centre,
ellipsoid_a, ellipsoid_b, ellipsoid_c,
x, y, z, verbose=False):
""" Set density to shape of a ellipsoid. Density is constant within.
size? is the size in pixels/indices in ?-direction
"""
tmpdens = np.zeros((sizex, sizey, sizez))
ix=0
for xx in x:
iy=0
for yy in y:
iz=0
for zz in z[1:z.shape[0]-1]:
ell = (((xx-x_centre)**2.)/(ellipsoid_a**2.))+\
(((yy-y_centre)**2.)/(ellipsoid_b**2.))+\
(((zz-z_centre)**2.)/(ellipsoid_c**2.))
if ell <=1.0:
tmpdens[ix,iy,iz]=1.0
iz=iz+1
iy=iy+1
ix=ix+1
if verbose==True:
print "make3DEllipsoid dens", tmpdens.shape
sys.stdout.flush()
return tmpdens
def make3DVerticalPlume(sizex, sizey, sizez, iz_start, iz_end,
iBottomRadius, iTopRadius, sigmax, sigmay, scale_factor=1,
center=None, verbose=False):
""" Calculate 3D Gaussian distribution with different standard deviations
in x, y, and z-directions.
size? is the size in pixels/indices in ?-direction
sigma ? is standard deviation in ?-direction
"""
if verbose==True:
print "size", sizex, sizey, sizey
print "iz_start", iz_start, iz_end, iBottomRadius, iTopRadius
sys.stdout.flush()
tmpdens = np.zeros((sizex, sizey, sizez))
x = np.arange(0, sizex, 1, float)
y = x[:,np.newaxis]
if sizey>sizex:
y = np.insert(y,np.zeros(sizey-sizex),0,axis=0 )
elif sizey<sizex:
y = np.delete(y,np.arange(0,sizex-sizey),axis=0 )
y[:,0] = np.arange(0, len(y), 1, float)
if center is None:
x0 = sizex // 2
y0 = sizey // 2
else:
x0 = center[0]
y0 = center[1]
if verbose==True:
print "x", x
print "y", y
print "x0, y0, z0", x0, y0
sys.stdout.flush()
a = (iBottomRadius-iTopRadius)/(iz_start-iz_end)
b = iBottomRadius-a*iz_start
iz = iz_start
ii =0
while iz<iz_end:
ir = a*iz+b
this_sigmax = ir*sigmax
this_sigmay = ir*sigmay
# print iz, ir, a, b
tmpG = (1/(2.*this_sigmax*this_sigmay))*np.exp(-0.5*(((x-x0)**2)/this_sigmax**2+((y-y0)**2)/this_sigmay**2))
tmpdens[:,:,ii] = tmpG*scale_factor/tmpG.max()
# print tmpG.shape, tmpG.max()
# exit()
ii=ii+1
iz=iz+1
if verbose==True:
print x
print x.shape, y.shape, z.shape
sys.stdout.flush()
return tmpdens
def make3DHorizontalPlume(sizex, sizey, sizez, iz_start, iz_end,
iBottomRadius, iTopRadius, sigmax, sigmay, scale_factor=1,
center=None, verbose=False):
""" Calculate 3D Gaussian distribution with different standard deviations
in x, y, and z-directions.
size? is the size in pixels/indices in ?-direction
sigma ? is standard deviation in ?-direction
"""
if verbose==True:
print "size", sizex, sizey, sizey
print "iz_start", iz_start, iz_end, iBottomRadius, iTopRadius
sys.stdout.flush()
tmpdens = np.zeros((sizex, sizey, sizez))
x = np.arange(0, sizex, 1, float)
y = x[:,np.newaxis]
if sizey>sizex:
y = np.insert(y,np.zeros(sizey-sizex),0,axis=0 )
elif sizey<sizex:
y = np.delete(y,np.arange(0,sizex-sizey),axis=0 )
y[:,0] = np.arange(0, len(y), 1, float)
if center is None:
x0 = sizex // 2
y0 = sizey // 2
else:
x0 = center[0]
y0 = center[1]
if verbose==True:
print "x", x
print "y", y
print "x0, y0, z0", x0, y0
sys.stdout.flush()
a = (iBottomRadius-iTopRadius)/(iz_start-iz_end)
b = iBottomRadius-a*iz_start
iz = iz_start
ii =0
while iz<iz_end:
ir = a*iz+b
this_sigmax = ir*sigmax
this_sigmay = ir*sigmay
# print iz, ir, a, b
tmpG = (1/(2.*this_sigmax*this_sigmay))*np.exp(-0.5*(((x-x0)**2)/this_sigmax**2+((y-y0)**2)/this_sigmay**2))
tmpdens[:,:,ii] = tmpG*scale_factor/tmpG.max()
# print tmpG.shape, tmpG.max()
# exit()
ii=ii+1
iz=iz+1
if verbose==True:
print x
print x.shape, y.shape, z.shape
sys.stdout.flush()
return tmpdens
def make3DBox(sizex, sizey, sizez, verbose=False, scale_factor=1.0):
""" Make homogeneous 3D 3x3x3 cube.
"""
if verbose==True:
print "make3DBox size", sizex, sizey, sizez
sys.stdout.flush()
tmpdens = np.ones((sizex, sizey, sizez))
tmpdens = tmpdens*scale_factor
if verbose==True:
print "make3DBox", tmpdens.shape
print "make3DBox", tmpdens
sys.stdout.flush()
return tmpdens
def make3DCell(sizex, sizey, sizez, verbose=False, scale_factor=1.0):
""" Calculate 3D distribution for an SO2 cell. Here a 3x3x3 cube
with a hole in the middle for the camera.
"""
if verbose==True:
print "make3DCell size", sizex, sizey, sizez
sys.stdout.flush()
tmpdens = np.ones((sizex, sizey, sizez))
tmpdens = tmpdens*scale_factor
x0 = sizex // 2
y0 = sizey // 2
z0 = sizez // 2
tmpdens[x0,y0,z0] = 0.0
if verbose==True:
print "make3DCell", tmpdens.shape
print "make3DCell", tmpdens
sys.stdout.flush()
return tmpdens
class ConversionFactors:
def __init__(self):
self.nm2cm = 1.e-07 # Convert from nm to cm
self.nm2um = 1.e-03 # Convert from nm to cm
self.um2m = 1.e-06 # Convert from um to m
self.cm2km = 1.e-05 # Factor from cm to km
self.m2km = 1.e-03 # Factor from m to km
self.m2cm = 1.e+02 # Factor from m to cm
self.km2m = 1000. # Factor from km to m
self.km2cm = 100000. # Factor from km to m
self.gm3togcm3=1e-06 # Convert from g/m**3 to g/cm**3
self.kgtog=1.0e+03 # Convert from kg to g
self.m3tocm3=1.0e+06 # Convert from m**3 to cm**3
class Camera:
def __init__(self,RandString=''):
self.Type='Camera'
self.verbose=False
self.name='TestCamera'
self.savename='Camera'
# Localisation of camera in m
self.x = 0.0
self.y = 0.0
self.z = 0.0
# Viewing direction
self.umu = 1.0 # Viewing the horizon
self.phi = 0.0 # Looking north
# Number of pixels in horizontal and vertical
self.h_pixels=0
self.v_pixels=0
# Field of view of camera: phi1 phi2 theta1 theta2
# phi = 0 is looking to the south
# phi = 180 is with the sun in the back if phi0=0
self.phi1 = 0.0
self.phi2 = 0.0
self.theta1 = 0.0
self.theta2 = 0.0
self.wavelength_grid_file='../Data/XSections/uvspec_SO2_wavelength_grid_file'
self.wavelength=-9999
self.wavelength1=-9999
self.wavelength2=-9999
if RandString == '':
self.RandString= 'Cam'+''.join((random.sample(string.ascii_lowercase, 5)))
else:
self.RandString= 'Cam'+RandString
return
def info(self,latex=False):
print
print self.savename+' name:', self.name
print '(All dimensions are in units of m)'
print 'Localisation x={:f}, y={:f}, z={:f}'.format(self.x, self.y, self.z)
print 'Pixels h_pixels={:d}, v_pixels={:f}'.format(self.h_pixels, self.v_pixels)
print 'FOV phi1={:f}, phi2={:f}, theta1={:f}, theta2={:f}'.format(self.phi1, self.phi2, self.theta1, self.theta2)
sys.stdout.flush()
if latex:
print '& {:5.1f} & {:5.1f} & {:5.1f} & {:5.1f} & {:6.1f} & {:6.1f} & {:6.1f} & {:6.1f} & {:d}& {:d}\\\\'.format(self.wavelength1, self.x, self.y, self.z, self.phi1, self.phi2, self.theta1, self.theta2, self.h_pixels, self.v_pixels)
sys.stdout.flush()
print
return
def save(self,folder):
pkl_file = open(folder+self.savename+self.name+'.pkl', 'wb')
pickle.dump(self,pkl_file )
pkl_file.close()
return
def SetRTInput(self, UVS):
print "Cam SetRTInput"
sys.stdout.flush()
UVS.inp['mc_sensorposition'] = '{:8.1f} {:8.1f} {:8.1f}'.format(self.x, self.y, self.z)
UVS.inp['mc_sample_grid'] = '{:d} {:d}'.format(self.h_pixels, self.v_pixels)
UVS.inp['mc_panorama_view'] = '{:f} {:f} {:f} {:f}'.format(self.phi1, self.phi2, self.theta1, self.theta2)
UVS.inp['mc_panorama_alignment'] = 'mu'
UVS.inp['umu'] = '{:f}'.format(self.umu)
UVS.inp['phi'] = '{:f}'.format(self.phi)
# UVS.inp['mc_panorama_alignment'] = 'sun'
# UVS.inp['mc_panorama'] = 'weight_with_cos'
# UVS.inp['mc_panorama'] = 'with_direct_rad'
# UVS.inp['umu'] = '{:f}'.format((np.cos(np.deg2rad(0.5*(self.theta1+self.theta2)))))
# UVS.inp['phi'] = '{:f}'.format(0.5*(self.phi1+self.phi2)-UVS.inp['phi0'])
if self.wavelength1 != self.wavelength2:
UVS.inp['wavelength_grid_file'] = self.wavelength_grid_file
if not 'mol_abs_param' in UVS.inp:
UVS.inp['mol_abs_param'] = 'crs'
try:
self.wavelength
UVS.inp['wavelength'] = self.wavelength
except:
pass
try:
self.wavelength1
self.wavelength2
UVS.inp['wavelength'] = str(self.wavelength1)+' '+str(self.wavelength2)
except:
print "Both wavelength1 and wavelength2 must be given"
exit()
# try:
# self.filterfunction
# UVS.inp['filter_function_file'] = self.filterfunction
# UVS.inp['output_process'] = 'integrate'
# except:
# pass
return
class Spectrometer(Camera):
def __init__(self, RunName=''):
self.Type='Spectrometer'
self.verbose=False
self.name='Spectrometer'
self.savename='Spectrometer'
# Localisation of camera in m
self.x = 0.0
self.y = 0.0
self.z = 0.0
# Number of pixels in horizontal and vertical
self.h_pixels=1
self.v_pixels=1
# Field of view of camera: phi1 phi2 theta1 theta2
# phi = 0 is looking to the south
# phi = 180 is with the sun in the back if phi0=0
self.phi1 = 0.0
self.phi2 = 0.0
self.theta1 = 0.0
self.theta2 = 0.0
self.mol_modify_o3=-9999
self.crs_o3= '../Data/XSections/O3_Serdyuchenko_2014_223K_213-1100nm2013version.txt'
self.crs_o4= '../Data/XSections/o4_thalman_volkamer_293K.dat'
self.slitfunction = ''
self.wavelength_grid_file='../Data/XSections/uvspec_SO2_wavelength_grid_file'
self.RandString= 'Spec'+RunName+'_'+''.join(random.sample(string.ascii_lowercase, 5))
return
def SetRTInput(self, UVS):
print self.savename+" SetRTInput"
sys.stdout.flush()
UVS.inp['mc_sensorposition'] = '{:8.1f} {:8.1f} {:8.1f}'.format(self.x, self.y, self.z)
UVS.inp['mc_sample_grid'] = '{:d} {:d}'.format(self.h_pixels, self.v_pixels)
UVS.inp['mc_panorama_view'] = '{:f} {:f} {:f} {:f}'.format(self.phi1, self.phi2, self.theta1, self.theta2)
UVS.inp['wavelength'] = str(self.wavelength1)+' '+str(self.wavelength2)
UVS.inp['mol_abs_param'] = 'crs'
UVS.inp['crs_file O4'] = self.crs_o4
UVS.inp['crs_file O3'] = self.crs_o3
if self.mol_modify_o3>0.0:
UVS.inp['mol_modify O3'] = self.mol_modify_o3+' DU'
# Do this in a separate call to conv and spline after running uvspec
# if self.slitfunction != '':
# UVS.inp['slit_function_file'] = self.slitfunction
UVS.inp['wavelength_grid_file'] = self.wavelength_grid_file
return
def CalculateColumnDensity(self):
"""
NOTE: It is assumed that integration is along x-axis for the
center pixels in the y-direction.
"""
for Plu in self.PlumeList:
if 'SO2' in Plu.name:
fact = 100. # Convert from m to cm to column in cm-2
else:
fact = 1.0
# print "CalculateCol", Plu.name, fact
# Calculate line integral for Spectrometer using Tomography software
nx=Plu.nx
dx=Plu.dx*1000 # # In meters
x0=0
nz=Plu.nz
z0=Plu.z[0]*1000.0
dz=Plu.dz*1000.0 #100. # In meters
# RR=ReconstructionRegion
RR = TC.Area(nx=nx,dx=dx, x0=x0, nz=nz, dz=dz, z0=z0 )
RR.zmin = RR.z0
RR.zmax = RR.z0 + RR.dz*RR.nz
RR.Image = np.zeros([nz,nx])
# print RR.Image.shape, Plu.dens.shape
ycenter = int(Plu.dens.shape[1]/2) # Do this for center slice in y-direction
RR.Image=Plu.dens[:,ycenter,1:] # Plu is one pixel larger in z-direction, inconsistent.
RR.Image=RR.Image.T # Sigh, why do I use different conventions for x and y......
indices=np.argmax(RR.Image)
#maxind=np.unravel_index(indices, RR.Image.shape)
#print "RR", indices, maxind
indices=np.argmax(Plu.dens)
maxind=np.unravel_index(indices, Plu.dens.shape)
# print "Plu dens", indices, maxind
# print "RR.Image.max()", RR.Image.min(), RR.Image.max(), Plu.dens[maxind]
theta1 = self.theta1-90 # And different conventions for the angles.....
theta2 = self.theta2-90
Nrays=9
Camera1=TC.TomoCamera(x=-Plu.x_start*1000-Plu.x_length*1000/2, z=self.z, theta1=theta1, theta2=theta2, Nrays=Nrays,Name='Cam 1')
Camera1.Rays()
iRay=0
sumtmpRq=0
while iRay<Camera1.Nrays:
tmpRq, tmpTotalLength, tmpind, tmpN =Camera1.CalculateLineIntegral(RR, iRay)
Camera1.Sinogram[iRay]=tmpRq
# print '{:e}'.format(tmpRq)
sumtmpRq=sumtmpRq+tmpRq
iRay=iRay+1
Plu.ColumnDensity = fact*sumtmpRq/Nrays
# fnTestLineIntegral='tmpgabba_'
# tmpfn = fnTestLineIntegral+'Cam1.dat'
# print 'tmpfn', tmpfn
# Camera1.WriteLineIntegralToFile(tmpfn)
return
class Domain:
def __init__(self):
self.verbose=False
self.name='TestDomain'
# Domain size, all in km
self.x_start = 0
self.x_end = 0.4
self.dx = 0.001
self.nx=0
self.x =None
self.y_start = 0
self.y_end = 0.8
self.dy = 0.001
self.ny=0
self.y =None
self.z_start = 0.150
self.z_end = 0.350
self.dz = 0.001
self.nz=0
self.z =None
self.x_centre = 0.5*(self.x_start+self.x_end)
self.y_centre = 0.5*(self.y_start+self.y_end)
self.z_centre = 0.5*(self.z_start+self.z_end)
return
def finalize(self):
self.nx = int(np.rint((self.x_end-self.x_start)/self.dx))
self.x = np.linspace(self.x_start,self.x_end,self.nx+1)
self.x_size = self.x_end-self.x_start
self.ny = int(np.rint((self.y_end-self.y_start)/self.dy))
self.y = np.linspace(self.y_start,self.y_end,self.ny+1)
self.y_size = self.y_end-self.y_start
self.nz = int(np.rint((self.z_end-self.z_start)/self.dz))
self.z = np.linspace(self.z_start,self.z_end,self.nz+1)
self.z_size = self.z_end-self.z_start
self.x_centre = 0.5*(self.x_start+self.x_end)
self.y_centre = 0.5*(self.y_start+self.y_end)
self.z_centre = 0.5*(self.z_start+self.z_end)
return
def info(self):
print
print 'Domain name:', self.name
print '(All dimensions are in units of km)'
print 'x_start {:f}, x_end {:f}, dx {:f}, nx {:d}'.format(self.x_start, self.x_end, self.dx, self.nx)
print 'y_start {:f}, y_end {:f}, dy {:f}, ny {:d}'.format(self.y_start, self.y_end, self.dy, self.ny)
print 'z_start {:f}, z_end {:f}, dz {:f}, nz {:d}'.format(self.z_start, self.z_end, self.dz, self.nz)
print 'x_centre {:f}, y_centre {:f}, z_centre {:f}'.format(self.x_centre, self.y_centre, self.z_centre)
print
sys.stdout.flush()
return
def save(self,folder):
pkl_file = open(folder+'Domain.pkl', 'wb')
pickle.dump(self,pkl_file )
pkl_file.close()
return
class Plume:
def __init__(self):
self.verbose=False
self.name='TestPlume'
self.randname = 'Plume'+''.join((random.sample(string.ascii_lowercase, 5)))
self.shape= '' # Either set by input to set_density or inside ReadLESNetCDF
self.x_start = None
self.x_end = None
self.x_length = None
self.y_start = None
self.y_end = None
self.y_length = None
self.z_start = None
self.z_end = None
self.z_length = None
self.set_density_flag=False
self.ext = 0.0
self.gg = 0.0
self.ssa = 0.0
self.cdf = ''
self.MYSTIC_profile_file_flag=0
return
def revise_domain(self, Domain, verbose=False):
if verbose:
print "Inside revise_domain"
sys.stdout.flush()
# Only need to revise z-direction as x- and y-direction should be ok
# For z-direction avoid first value as it has a half step.
tmpdz = self.z[2]-self.z[1]
tmpz_start=self.z[1]-tmpdz#*self.z.shape[0]
# Add 1 as we want altitude at levels.
tmpz = np.arange(tmpz_start, tmpz_start+tmpdz*float(self.z.shape[0]+1), tmpdz)
Domain.z = tmpz
Domain.dz = tmpdz
Domain.nz = Domain.z.shape[0]-1 # -1 as this should by number of layers, not levels
Domain.z_start=Domain.z[0]
Domain.z_end=Domain.z[Domain.nz]
Domain.z_size = Domain.z_end-Domain.z_end
return Domain
def finalize_size(self, Domain, verbose=False):
if verbose:
print "Inside finalize_size"
sys.stdout.flush()
self.x_end = self.x_start+self.x_length
self.y_end = self.y_start+self.y_length
self.z_end = self.z_start+self.z_length
self.x_centre = self.x_start+0.5*self.x_length
self.y_centre = self.y_start+0.5*self.y_length
self.z_centre = self.z_start+0.5*self.z_length
self.dx = Domain.dx
self.dy = Domain.dy
self.dz = Domain.dz
self.nx = int(np.rint((self.x_end-self.x_start)/self.dx))
self.x = np.linspace(self.x_start,self.x_end,self.nx+1)
self.ny = int(np.rint((self.y_end-self.y_start)/self.dy))
self.y = np.linspace(self.y_start,self.y_end,self.ny+1)
self.nz = int(np.ceil((self.z_end-self.z_start)/self.dz))
self.z = np.linspace(self.z_start,self.z_end,self.nz+1)
# Check that plume is entirely within domain
if self.x_start < Domain.x_start or self.x_start > Domain.x_end:
print 'Plume finalize: x_start {:f} outside domain {:f} {:f}'.format(self.x_start, Domain.x_start, Domain.x_end)
exit()
if self.x_end < Domain.x_start or self.x_end > Domain.x_end:
print 'Plume finalize: x_end {:f} outside domain {:f} {:f}'.format(self.x_end, Domain.x_start, Domain.x_end)
exit()
if self.y_start < Domain.y_start or self.y_start > Domain.y_end:
print 'Plume finalize: y_start {:f} outside domain {:f} {:f}'.format(self.y_start, Domain.y_start, Domain.y_end)
exit()
if self.y_end < Domain.y_start or self.y_end > Domain.y_end:
print 'Plume finalize: y_end {:f} outside domain {:f} {:f}'.format(self.y_end, Domain.y_start, Domain.y_end)
exit()
if self.z_start < Domain.z_start or self.z_start > Domain.z_end:
print 'Plume finalize: z_start {:f} outside domain {:f} {:f}'.format(self.z_start, Domain.z_start, Domain.z_end)
exit()
if self.z_end < Domain.z_start or self.z_end > Domain.z_end:
print 'Plume finalize: z_end {:f} outside domain {:f} {:f}'.format(self.z_end, Domain.z_start, Domain.z_end)
print self.z_end-Domain.z_start, self.z_end-Domain.z_end
exit()
# Get pixel indixes for plume within domain. These pixel indices correspond
# to those needed by MYSTIC
if self.shape=='User':
# self.ix_start = self.xadd*self.x.shape[0]
# self.ix_end = (self.xadd+1)*self.x.shape[0]
# self.ix_start = 0
# self.ix_end = self.x.shape[0]
id = find_nearest_id(Domain.x,self.x_start)
self.ix_start = id
self.ix_end = id+self.x.shape[0]
# self.iy_start = self.yadd*self.y.shape[0]
# self.iy_end = (self.yadd+1)*self.y.shape[0]
# self.iy_start = 0
# self.iy_end = self.y.shape[0]
id = find_nearest_id(Domain.y,self.y_start)
self.iy_start = id
self.iy_end = id+self.y.shape[0]
id = find_nearest_id(Domain.z,self.z_start)
self.iz_start = id
self.iz_end = id+self.z.shape[0]
# self.iz_start = 0
# self.iz_end = self.z.shape[0]
# self.iz_start = self.zadd*self.z.shape[0]
# self.iz_end = (self.zadd+1)*self.z.shape[0]
else:
self.ix_start = int((self.x_start-Domain.x_start)/Domain.dx)
self.ix_end = int((self.x_end-Domain.x_start)/Domain.dx+Domain.dx/2.)
self.ix_size = self.ix_end-self.ix_start
self.iy_start = int((self.y_start-Domain.y_start)/Domain.dy)
self.iy_end = int((self.y_end-Domain.y_start)/Domain.dy+Domain.dy/2.)
self.iy_size = self.iy_end-self.iy_start
self.iz_start = int((self.z_start-Domain.z_start)/Domain.dz)
self.iz_end = int((self.z_end-Domain.z_start)/Domain.dz+Domain.dz/2.)
self.iz_size = self.iz_end-self.iz_start
return
def info(self,latex=False, Domain=None):
CF=ConversionFactors()
print
print 'Plume name:', self.name
print '(All dimensions are in units of km)'
print 'x_start {:f}, x_end {:f}'.format(self.x_start, self.x_end)
print 'y_start {:f}, y_end {:f}'.format(self.y_start, self.y_end)
print 'z_start {:f}, z_end {:f}'.format(self.z_start, self.z_end)
if self.set_density_flag==True and 'Gauss' in self.shape:
print 'x_sigma: {:f} y_sigma: {:f} z_sigma: {:f}'.format(self.x_sigma, self.y_sigma, self.z_sigma)
if latex:
print '& {:4.1f} & {:4.1f} & {:6.4f} & {:4.1f} & {:4.1f} & {:6.4f} & {:4.1f} & {:4.1f} & {:6.4f}& {:8.2f}\\\\'.format(
self.x_start*CF.km2m, self.x_end*CF.km2m, self.x_sigma*CF.km2m,
self.y_start*CF.km2m, self.y_end*CF.km2m, self.y_sigma*CF.km2m,
self.z_start*CF.km2m, self.z_end*CF.km2m, self.z_sigma*CF.km2m, self.dens.max())
print 'Indices'
if Domain != None:
print 'ix_start {:d}, ix_end {:d}, {:f}, {:f}'.format(self.ix_start, self.ix_end, Domain.x[self.ix_start], Domain.x[self.ix_end-1])
print 'iy_start {:d}, iy_end {:d}, {:f}, {:f}'.format(self.iy_start, self.iy_end, Domain.y[self.iy_start], Domain.y[self.iy_end-1])
print 'iz_start {:d}, iz_end {:d}, {:f}, {:f}'.format(self.iz_start, self.iz_end, Domain.z[self.iz_start], Domain.z[self.iz_end-1])
else:
print 'ix_start {:d}, ix_end {:d}'.format(self.ix_start, self.ix_end)
print 'iy_start {:d}, iy_end {:d}'.format(self.iy_start, self.iy_end)
print 'iz_start {:d}, iz_end {:d}'.format(self.iz_start, self.iz_end)
print 'Dens'
print 'dens.shape {:s} {:f} {:e}'.format(self.dens.shape, self.dens.max(), self.dens.max())
# if self.set_density_flag==True:
# print 'ix_sigma: {:d} iy_sigma: {:d} iz_sigma: {:d}'.format(self.ix_sigma, self.iy_sigma, self.iz_sigma)
print
sys.stdout.flush()
return
def set_density(self, shape, **kwargs):
"""
Specify the shape and density of the plume. For each shape different
input is needed. These are given in the kwargs dictionary. For the
various shapes this is:
shape=='3DGaussian':
Required input:
x_sigma, y_sigma, z_sigma: standard deviations (km) in each direction
ext: Extinction (km-1). The maximum value in the density field will
be set to this value, and the rest scaled accordingly.
Optional input:
The pklfile and pklfile_z_integrated inputs are useful for checking
of plume shape and magnitude of density field and are used together
with separate scripts for plotting.
pklfile: dump pickle of density field to specified file
pklfile_z_integrated: dump pickle of density field integrated allong
z-axis to specified file
"""
self.set_density_flag=True
self.shape = shape
if kwargs=={}:
print "Plume set_density: No density information provided"
exit()
if 'ext' in kwargs and 'gg' in kwargs and 'ssa' in kwargs:
self.MYSTIC_profile_file_flag =1
elif 'ext' in kwargs and 'reff' in kwargs:
self.MYSTIC_profile_file_flag =2
elif 'LWC' in kwargs and 'reff' in kwargs:
self.MYSTIC_profile_file_flag =3
if self.MYSTIC_profile_file_flag == 1:
self.ext = kwargs['ext']
self.gg = kwargs['gg']
self.ssa = kwargs['ssa']
elif self.MYSTIC_profile_file_flag == 3:
self.LWC = kwargs['LWC']
self.reff = kwargs['reff']
if shape=='3DGaussian':
self.x_sigma = kwargs['x_sigma']
self.y_sigma = kwargs['y_sigma']
self.z_sigma = kwargs['z_sigma']
self.ix_sigma= int(self.x_sigma/self.dx)
self.iy_sigma= int(self.y_sigma/self.dy)
self.iz_sigma= int(self.z_sigma/self.dz)
if self.ix_sigma<=0: self.ix_sigma=1
if self.iy_sigma<=0: self.iy_sigma=1
if self.iz_sigma<=0: self.iz_sigma=1
# Gaussian distribution is made in pixel indices coordinates:
self.dens=make3DGaussian(self.ix_size, self.iy_size, self.iz_size,
sigmax=self.ix_sigma, sigmay=self.iy_sigma, sigmaz=self.iz_sigma, center=None)
# Scale plume to wanted extinction/optical depth
if self.MYSTIC_profile_file_flag == 1:
self.dens = self.dens*self.ext/self.dens.max()
elif self.MYSTIC_profile_file_flag == 3:
if self.LWC<0.0:
self.dens = self.dens
else:
self.dens = self.dens*self.LWC/self.dens.max()
elif shape=='3DVerticalPlume':
self.TopRadius = kwargs['TopRadius']
self.BottomRadius = kwargs['BottomRadius']
self.x_sigma = kwargs['x_sigma']
self.y_sigma = kwargs['y_sigma']
self.iTopRadius = self.TopRadius/self.dx # TopRadius in pixels
self.iBottomRadius = self.BottomRadius/self.dx # BottomRadius in pixels
if self.MYSTIC_profile_file_flag == 1:
dummy = self.ext
elif self.MYSTIC_profile_file_flag == 3:
dummy = self.LWC
self.dens=make3DVerticalPlume(self.ix_size, self.iy_size, self.iz_size,
self.iz_start, self.iz_end, self.iBottomRadius, self.iTopRadius,
self.x_sigma, self.y_sigma, scale_factor=dummy)
elif shape=='Box':
# print "self.nx", self.nx, self.ny, self.nz, self.x, self.z
# print "self.nx", self.ix_size, self.ix_end, self.ix_start, \
# self.iy_size, self.iy_end, self.iy_start, \
# self.iz_size, self.iz_end, self.iz_start
if self.MYSTIC_profile_file_flag == 1:
dummy = self.ext
elif self.MYSTIC_profile_file_flag == 3:
dummy = self.LWC
self.dens=make3DBox(self.ix_size, self.iy_size, self.iz_size,
scale_factor=dummy)
elif shape=='Cell':
# print "self.nx", self.nx, self.ny, self.nz, self.x, self.z
# print "self.nx", self.ix_size, self.ix_end, self.ix_start, \
# self.iy_size, self.iy_end, self.iy_start, \
# self.iz_size, self.iz_end, self.iz_start
if self.MYSTIC_profile_file_flag == 1:
dummy = self.ext
elif self.MYSTIC_profile_file_flag == 3:
dummy = self.LWC
self.dens=make3DCell(self.ix_size, self.iy_size, self.iz_size,
scale_factor=dummy)
elif shape=='SingleLayer':
print self.ix_size, self.iy_size, self.iz_size
tmpdens = np.zeros((self.ix_size, self.iy_size, self.iz_size))
print tmpdens.shape
ix=0
while ix < self.ix_size:
iy=0
while iy < self.iy_size:
iz=0
while iz < self.iz_size:
tmpdens[ix,iy,iz]=self.LWC
iz=iz+1
iy=iy+1
ix=ix+1
self.dens = tmpdens
elif shape=='Ellipsoid':
self.x_centre = kwargs['x_centre']
self.y_centre = kwargs['y_centre']
self.z_centre = kwargs['z_centre']
self.ellipsoid_a = kwargs['a']
self.ellipsoid_b = kwargs['b']
self.ellipsoid_c = kwargs['c']
self.ix_centre= int((self.x_centre-self.x_start)/self.dx)
self.iy_centre= int((self.y_centre-self.y_start)/self.dy)
self.iz_centre= int((self.z_centre-self.z_start)/self.dz)
# Ellipsoid shaped constant concentration is made in pixel indices coordinates:
self.dens=make3DEllipsoid(self.ix_size, self.iy_size, self.iz_size,
self.x_centre, self.y_centre, self.z_centre,
self.ellipsoid_a, self.ellipsoid_b, self.ellipsoid_c,
self.x, self.y, self.z)
# Scale plume to wanted extinction/optical depth
if self.MYSTIC_profile_file_flag == 1:
self.dens = self.dens*self.ext/self.dens.max()
elif self.MYSTIC_profile_file_flag == 3:
self.dens = self.dens*self.LWC/self.dens.max()
elif shape=='User':
# Scale plume to wanted extinction/optical depth
if self.MYSTIC_profile_file_flag == 1:
self.dens = self.dens*self.ext/self.dens.max()
elif self.MYSTIC_profile_file_flag == 3:
print "self.dens.max()", self.dens.max()
sys.stdout.flush()
if self.LWC<0.0:
self.dens = self.dens
else:
if self.dens.max() > 0.0:
print "self.dens.max()", self.dens.max()
self.dens = self.dens*self.LWC/self.dens.max()
else:
self.dens = self.dens*self.LWC
else:
print 'Plume set_density: Unknown plume density shape: ', shape
exit()
if 'pklfile' in kwargs.keys():
pkl_file = open(kwargs['pklfile'], 'wb')
pickle.dump(self.dens,pkl_file )
pkl_file.close()
if 'pklfile_z_integrated' in kwargs.keys():
tmpInt= np.trapz(self.dens[:,:,:], self.z[1:], axis=2)
pkl_file = open(kwargs['pklfile_z_integrated'], 'wb')
pickle.dump(tmpInt,pkl_file )
pkl_file.close()
return
def save(self,folder):
pkl_file = open(folder+'Plume'+self.name+'.pkl', 'wb')
pickle.dump(self,pkl_file )
pkl_file.close()
return
def SetRTInput(self, Domain, UVS):
print "Plume SetRTInput", UVS.IOdir+self.name+'.profile'
sys.stdout.flush()
self.ProfileInputFile = UVS.IOdir+self.name+'.profile'
UVS.inp['profile_file '+self.randname+' 3d '] = self.ProfileInputFile
if self.cdf == '':
UVS.inp['profile_properties '+self.randname] = 'mie interpolate'
else:
UVS.inp['profile_properties '+self.randname] = self.cdf+' interpolate'
# These large arrays are only local as this is the only place they
# are needed.
if self.MYSTIC_profile_file_flag==1:
tmpext = np.zeros([Domain.nx,Domain.ny, len(Domain.z)])
tmpgg = np.zeros([Domain.nx,Domain.ny, len(Domain.z)])
tmpssa = np.zeros([Domain.nx,Domain.ny, len(Domain.z)])
tmpext[self.ix_start:self.ix_end,self.iy_start:self.iy_end,self.iz_start:self.iz_end] = self.dens
tmpgg = np.where(tmpext>0, self.gg, 0.0)
tmpssa = np.where(tmpext>0, self.ssa, 0.0)
args = {
'ext': tmpext,
'gg' : tmpgg,
'ssa': tmpssa
}
elif self.MYSTIC_profile_file_flag==3:
tmpLWC = np.zeros([Domain.nx,Domain.ny, len(Domain.z)])
tmpreff = np.zeros([Domain.nx,Domain.ny, len(Domain.z)])
tmpLWC[self.ix_start:self.ix_end,self.iy_start:self.iy_end,self.iz_start:self.iz_end] = self.dens
tmpreff = np.where(tmpLWC>0, self.reff, 1.0)
args = {
'LWC': tmpLWC,
'reff' : tmpreff,
}
Write3DMYSTICFile(self.ProfileInputFile, type='Generic',
nx = Domain.nx, ny = Domain.ny, nz = Domain.nz,
dx = Domain.dx, dy = Domain.dy, z = Domain.z, LWCLimit=self.LWCLimit,
flag=self.MYSTIC_profile_file_flag, **args)
return
def ReadLESNetCDF(self, fn, timestep=0, verbose=False, scale_axis_factor=1.,
ROI=False, xlimits=(0,0), zlimits=(0,0), Qbl=-999):
"""
Qbl = SO2 emission rate in kg/s, see note by Soon-Young
"""
CF=ConversionFactors()
if verbose:
print "ReadLESNetCDF: fn:", fn
sys.stdout.flush()
ncfile = Dataset(fn,'r')
if verbose:
print ncfile.variables
sys.stdout.flush()
self.shape='User'
self.time = ncfile.variables['time'][:]
self.z = ncfile.variables['zu_3d'][:]*CF.m2km*scale_axis_factor
self.x = ncfile.variables['x'][:]*CF.m2km*scale_axis_factor
self.y = ncfile.variables['y'][:]*CF.m2km*scale_axis_factor
self.dens = ncfile.variables['s'][:]
self.dens = self.dens[timestep,:,:,:]
# print "self.dens", self.dens.shape
# LES densities are z,y,x, change to x,y,z
self.dens = np.swapaxes(self.dens,0,2)
# print "self.dens", self.dens.shape
# print "self.z", scale_axis_factor, CF.m2km, self.z
if ROI:
print "self.dens", self.dens.shape, self.x.shape, self.z.shape
sys.stdout.flush()
if xlimits[0]!=xlimits[1]:
print "GABBA", xlimits
indx = np.where((self.x >=xlimits[0] ) & (self.x <= xlimits[1]))
self.dens = self.dens[indx[0][0]:indx[0][len(indx[0])-1],:,:]
self.x = self.x[indx[0][0]:indx[0][len(indx[0])-1]]
print "self.dens", self.dens.shape, self.x.shape
if zlimits[0]!=zlimits[1]:
print "GABBA", zlimits
indz = np.where((self.z >=zlimits[0] ) & (self.z <= zlimits[1]))
self.dens = self.dens[:,:, indz[0][0]:indz[0][len(indz[0])-1]]
self.z = self.z[indz[0][0]:indz[0][len(indz[0])-1]]
print "self.dens", self.dens.shape, self.x.shape, self.z.shape
self.x_start=self.x[0]
self.x_length=self.x[len(self.x)-1]-self.x_start
self.y_start=self.y[0]
self.y_length=self.y[len(self.y)-1]-self.y_start
self.z_start=self.z[0]
self.z_length=self.z[len(self.z)-1]-self.z_start
if Qbl>0.0:
from scipy.constants import N_A
# 0.00001024 = (H_wt**2)/(H_bl**2) , see note by Soon-Young
# SO2: 64.066 g mol-1
molweight=64.066
ConcFact=Qbl*0.00001024*CF.kgtog*N_A/(CF.m3tocm3*molweight)
self.dens=self.dens*ConcFact
print "Qbl=", Qbl, N_A, self.dens.max()
ncfile.close()
return
def test(self):
print "#################################################################################"
print "TEST TEST TEST PLUME DENSITY CHANGED"
print "#################################################################################"
# print self.z
self.dens = self.dens*0.0
indz = np.where((self.z > 0.05) & (self.z < 0.300))
# indy = np.where((self.y > 1.6) & (self.y < 1.8))
indy = np.where((self.y > .15) & (self.y < .35))
indx = np.where((self.x > 0.502) & (self.x < 1.4))
# print "indz", indz[0]
# print "indy", indy[0]
# print "indx", indx[0]
self.dens[indx[0][0]:indx[0][len(indx[0])-1], indy[0][0]:indy[0][len(indy[0])-1], indz[0][0]:indz[0][len(indz[0])-1]]=0.0000025
# indz = np.where((self.z > 0.475) & (self.z < 0.625))
# indz = np.where((self.z > 0.1) & (self.z < 0.3))
# self.dens[indx[0][0]:indx[0][len(indx[0])-1], indy[0][0]:indy[0][len(indy[0])-1], indz[0][0]:indz[0][len(indz[0])-1]]=0.000000025
# indx = np.where((self.x > 1.2) & (self.x < 1.4))
# self.dens[indx[0][0]:indx[0][len(indx[0])-1], indy[0][0]:indy[0][len(indy[0])-1], indz[0][0]:indz[0][len(indz[0])-1]]=0.0
indx = np.where((self.x > .65) & (self.x < 1.0))
self.dens[indx[0][0]:indx[0][len(indx[0])-1], indy[0][0]:indy[0][len(indy[0])-1], indz[0][0]:indz[0][len(indz[0])-1]]=0.0
# indy = np.where((self.y > 1.7) & (self.y < 1.8))
# self.dens[indx[0][0]:indx[0][len(indx[0])-1], indy[0][0]:indy[0][len(indy[0])-1], indz[0][0]:indz[0][len(indz[0])-1]]=0.00025
indn = np.where(self.dens>0.0)
# print "len(indn[0])", len(indn[0])
indn = np.where(self.dens>0.5)
# print "len(indn[0])", len(indn[0])
def shiftxyz(self, x_shift=0.0, y_shift=0.0, z_shift=0.0):
"""
Shift location of plume in 3D. All units in km
"""
self.x = self.x+x_shift
self.y = self.y+y_shift
self.z = self.z+z_shift
self.x_shift = x_shift
self.y_shift = y_shift
self.z_shift = z_shift
self.x_start=self.x[0]
self.x_length=self.x[len(self.x)-1]-self.x_start
self.y_start=self.y[0]
self.y_length=self.y[len(self.y)-1]-self.y_start
self.z_start=self.z[0]
self.z_length=self.z[len(self.z)-1]-self.z_start
return
class Experiment():
def __init__(self, home):
self.home = home
self.uvspecpath = ''
self.verbose=False
self.CF = ConversionFactors()
self.Domain = Domain()
self.UVS = UVspec()
self.PlumeList = []
self.CameraList = []
self.SpectrometerList = []
self.n_processes=1
self.folder = './tmp/'
self.RandString= ''.join(random.sample(string.ascii_lowercase, 5))
self.UVS.RandString=self.RandString
return
def finalize(self):
try:
os.makedirs(self.folder)
except OSError:
if os.path.exists(self.folder):
# We are nearly safe
pass
else:
# There was an error on creation, so make sure we know about it
print 'Could not create {:s}'.format(self.folder)
raise
self.UVS.IOdir = self.folder
return
def save(self, FileName=''):
if FileName=='':
pkl_file = open(self.folder+'Experiment.pkl', 'wb')
else:
pkl_file = open(FileName, 'wb')
pickle.dump(self,pkl_file )
pkl_file.close()
self.Domain.save(self.folder)
for Plu in self.PlumeList:
Plu.save(self.folder)
for Cam in self.CameraList:
Cam.save(self.folder)
return
def Run(self, Instrument, RunUvspec=True, OldOutFiles=[], verbose=True, Wait=False):
if verbose:
print 'Experiment.Run InputFile {:s}'.format(self.InputFile)
sys.stdout.flush()
Convolve=False
if Instrument.Type=='Spectrometer':
try :
Instrument.slitfunction
Convolve=True
except:
Convolve=False
if Instrument.Type=='Camera':
try :
Instrument.filterfunction
Convolve=True
except:
Convolve=False
if verbose:
print "Experiment:Run Convolve", Convolve
if Convolve and Instrument.Type=='Spectrometer':
self.OutputFile = self.InputFile.replace('.inp','.out')
else:
self.OutputFile = self.InputFile.replace('.inp','.out')
if verbose:
print 'Experiment.Run OutputFile {:s}'.format(self.OutputFile)
sys.stdout.flush()
OutputFiles=[]
tmpoutputfiles=[]
(tmpoutputfiles, OutputFiles)=self.UVS.Run(self.InputFile, self.OutputFile, n_processes=self.n_processes,
uvspecpath=self.UVS.uvspecpath, RandString=Instrument.RandString,
Convolve=Convolve, Instrument=Instrument, OldOutFiles=OldOutFiles,
RunUvspec=RunUvspec, Wait=Wait)
if RunUvspec and Convolve:
for fnto, fnfrom in zip(OutputFiles, tmpoutputfiles):
print "fnto, fnfrom", fnto, fnfrom
sys.stdout.flush()
shutil.copy(fnfrom,fnto)
return OutputFiles
def WriteRTInput(self, Instrument, InputFile=None,verbose=False):
# Make new UVS for each call to this function to avoid
# keeping old input parameters from previous calls.
UVS = copy.deepcopy(self.UVS)
if verbose:
print "Exp WriteRTInput"
sys.stdout.flush()
Instrument.SetRTInput(UVS)
for Plume in Instrument.PlumeList:
print "Exp WriteRTInput", Instrument.name, Plume.name, Plume.dens.shape
sys.stdout.flush()
Plume.SetRTInput(self.Domain, UVS)
if not 'mc_minphotons' in self.UVS.inp:
UVS.inp['mc_minphotons'] = UVS.inp['mc_photons']
# if self.n_processes==1:
UVS.inp['mc_std'] = ''
try:
self.Domain.elevation
nx=2
ny=2
fn = self.folder+'MYSTIC2DElevation.dat'
Write2DMYSTICElevationFile(fn, nx, ny, self.Domain.x_size, self.Domain.y_size, self.Domain.elevation)
UVS.inp['mc_elevation_file'] = fn
except:
pass
if InputFile==None:
InputFile = self.UVS.IOdir+'uvspec'+Instrument.name+'.inp'
self.InputFile=InputFile
print "Writing uvspec input to file", self.InputFile
sys.stdout.flush()
UVS.WriteInputFile(self.InputFile)
return
class UVspec:
def __init__(self):
# Set some uvspec input that most likely will stay the same
self.IOdir = './'
self.inp = {}
self.inp["mc_backward"] = ''
self.inp["mc_vroom"] = 'on'
self.inp["albedo"] = '0.0'
return
def add_mc_basename_to_input_file(self,mc_basename,fn):
f = open(fn,'a')
f.write('{0:s}\n'.format('mc_basename '+mc_basename))
f.close()
def info(self):
print
print 'UVspec parameters:'
print 'IOdir: {:s}'.format(self.IOdir)
sys.stdout.flush()
return
def Run(self,inp, out, verbose=True, n_processes=2, uvspecpath='', RandString='gabba',
Convolve=False, Instrument=None, OldOutFiles='',RunUvspec=True, Wait=False):
debug=False # True #
if verbose:
if RunUvspec:
print "Running uvspec with input file: ", inp
else:
print "NOT running uvspec, using old output file: ", inp
print "Output to file : ", out
print "Number of processors : ", n_processes
print "Convolve : ", Convolve
sys.stdout.flush()
tmp_out_base = 'tmp_mystic_'+RandString+'.out_'
tmp_inp_base = 'tmp_mystic_'+RandString+'.inp_'
# Remove all old files
# OR NOT: Keep and remove manually in order to be able
# inspect problems in *.err files, AK 20160526
#FIXME
# if RunUvspec:
# # for filename in glob('gabba'+tmp_out_base+"*"):
# for filename in glob(tmp_out_base+"*"):
# if not debug:
# os.remove(filename)
# for filename in glob(tmp_inp_base+"*"):
# # for filename in glob('gabba'+tmp_inp_base+"*"):
# if not debug:
# os.remove(filename)
if RunUvspec:
jobs = []
tmpinputfiles=[]
tmpoutputfiles=[]
for i in range(n_processes):
# Copy input file to temporary input file to be able to add different
# mc_basenames to the file without destroying the input file
tmp_inp = tmp_inp_base+str(i)
tmpinputfiles.append(tmp_inp)
cmd = 'cp '+inp+' '+tmp_inp
Popen([r"cp",inp, tmp_inp]).wait()
mc_basename = tmp_out_base+'NP_'+str(i)
self.add_mc_basename_to_input_file(mc_basename,tmp_inp)
tmp_out = tmp_out_base+str(i)
print "tmp_out", tmp_out
ips = '{:d}'.format(i)
tmpoutputfile = tmp_out.replace('out_'+ips,'out_NP_'+ips)+'.rad.spc'
print "tmpoutputfile", tmpoutputfile
tmpoutputfiles.append(tmpoutputfile)
if verbose:
print 'Starting process:',i,' inp:',tmp_inp,' out:',tmp_out
sys.stdout.flush()
if not debug:
if RunUvspec:
p = multiprocessing.Process(target=self.worker, args=(tmp_inp,tmp_out,uvspecpath))
jobs.append(p)
p.start()
for j in jobs:
j.join()
else:
tmpoutputfiles=OldOutFiles
if verbose:
print 'All processes done. Read output, convolve, average and calculate std.'
sys.stdout.flush()
if Wait:
print "Waiting ....."
sys.stdout.flush()
time.sleep(60*3) # Sleep for 3 minutes to assure that files are put in right place
if Convolve:
finalrawoutputfiles=[]
tmpfilestoaverage=[]
if Instrument.Type=='Spectrometer':
# Convolve with slit function if given.
if verbose:
print 'Convolving with slit function:', Instrument.slitfunction
sys.stdout.flush()
ip=0
for tmpoutputfile in tmpoutputfiles:
ips = '{:d}'.format(ip)
rawoutputfile = inp.replace('.inp','.out_NP_'+ips+'.rad.spc')
print tmpoutputfile, rawoutputfile
sys.stdout.flush()
finalrawoutputfiles.append(rawoutputfile)
tmpoutconv='tmpoutconv_'+Instrument.RandString+'_'+ips
cmd = '/usr/bin/time -v '+self.uvspecpath+'conv '+tmpoutputfile+' '+Instrument.slitfunction+' > '+tmpoutconv+' 2> '+tmpoutconv+'.err'
if verbose:
print cmd
sys.stdout.flush()
p = call(cmd,shell=True,stdin=PIPE,stdout=PIPE)
tmpoutspline='tmpoutspline_'+Instrument.RandString+'_'+ips
cmd = '/usr/bin/time -v '+self.uvspecpath+'spline '+'-q -l -b '+str(Instrument.wavelength1)+' -s '+str(Instrument.wavelengthstep)+' '+tmpoutconv+' > ' + tmpoutspline+' 2> '+tmpoutspline+'.err'
if verbose:
print cmd
sys.stdout.flush()
p = call(cmd,shell=True,stdin=PIPE,stdout=PIPE)
tmpfilestoaverage.append(tmpoutspline)
# Copy MYSTIC output files to final destination
shutil.copy(tmpoutputfile,rawoutputfile)
ip=ip+1
elif Instrument.Type=='Camera':
nx = Instrument.h_pixels
ny = Instrument.v_pixels
tmpSplineXFile='tmpSplineXFile'+Instrument.RandString
# Any output file should do to get wavelength information, there should be at least one.
tmpdata = np.loadtxt(tmpoutputfiles[0])
nwl = int(tmpdata.shape[0]/(nx*ny))
tmpdata = np.reshape(tmpdata,(nwl,nx, ny, tmpdata.shape[1]))
# Interpolate filter function to MYSTIC output wavelengths
fx = open(tmpSplineXFile,'w')
wvls = tmpdata[:,0,0,0]
for wvl in wvls:
fx.write('{:f}\n'.format(wvl))
fx.close()
tmpSplineOutputFile='tmpSplineOutputFile'+Instrument.RandString
cmd = '/usr/bin/time -v '+self.uvspecpath+'spline '+'-q -l -x '+tmpSplineXFile+' '+Instrument.filterfunction+' > ' + tmpSplineOutputFile+' 2> '+tmpSplineOutputFile+'.err'
if verbose:
print cmd
sys.stdout.flush()
p = call(cmd,shell=True,stdin=PIPE,stdout=PIPE)
tmpfilterfunctionwvl, tmpfilterfunction = np.loadtxt(tmpSplineOutputFile,unpack=True)
###
# Include loop over all output files.
###
ip=0
for tmpoutputfile in tmpoutputfiles:
ips = '{:d}'.format(ip)
rawoutputfile = inp.replace('.inp','.out_NP_'+ips+'.rad.spc')
if verbose:
print "tmpoutputfile, rawoutputfile", tmpoutputfile, rawoutputfile
sys.stdout.flush()
finalrawoutputfiles.append(rawoutputfile)
tmpdata = np.loadtxt(tmpoutputfile)
tmpdata = np.reshape(tmpdata,(nwl,nx, ny, tmpdata.shape[1]))
tmpoutputfilefilter = tmpoutputfile.replace('.out','.out_NP_'+ips+'.rad.spc')
if verbose:
print "tmpoutputfilefilter", tmpoutputfilefilter
tmpfilestoaverage.append(tmpoutputfilefilter)
f= open(tmpoutputfilefilter,'w')
# For each pixel
ix=0
iz=0
while ix<nx:
iy=0
while iy<ny:
## Multiply MYSTIC radiances with filter function
tmprad = tmpdata[:,ix,iy,4]*tmpfilterfunction
# tmpstd = tmpdata[:,ix,iy,5]*tmpfilterfunction
## Integrate over wavelength
totrad = np.trapz(tmprad, x=wvls)
# totstd = np.trapz(tmpstd, x=wvls)
# f.write('{0:8.2f} {1:3d} {2:3d} {3:3d} {4:9.4f} {5:11.6f}\n'.format(wvls[0],ix,iy,iz,totrad,totstd))
f.write('{0:8.2f} {1:3d} {2:3d} {3:3d} {4:9.4f}\n'.format(wvls[0],ix,iy,iz,totrad))
iy=iy+1
ix=ix+1
f.flush() # Do this to make sure all os written
os.fsync(f.fileno()) # to file before continuing.
f.close()
ip=ip+1
else:
tmpfilestoaverage=tmpoutputfiles
finalrawoutputfiles=tmpoutputfiles
InputFiles = tmpfilestoaverage #tmp_out_base+'NP_'+'*'+'.rad.spc'
if n_processes==1:
if verbose:
print "InputFiles, OutputFileRaw", InputFiles, out, tmpoutputfiles
sys.stdout.flush()
CombineSingleProcessOuput(tmpoutputfiles, out, verbose=True)
else:
if verbose:
print "finalrawoutputfiles", finalrawoutputfiles
print "tmpoutputfiles", tmpoutputfiles
sys.stdout.flush()
Average_spc_Files(InputFiles, out, verbose=True)
return (tmpoutputfiles, finalrawoutputfiles)
def SingleRun(self,inp, out, verbose=False, uvspecpath=''):
if verbose:
print "Running uvspec with input file: ", inp
print "Output to file : ", out
sys.stdout.flush()
if 'xnilu_wrk' in uvspecpath:
uvspec='uvspec3D'
else:
uvspec='uvspec'
cmd = '/usr/bin/time -v '+uvspecpath+uvspec+' < '+inp+' > '+out+' 2> '+out+'.err'
if verbose:
print cmd
sys.stdout.flush()
#(uvspec < uvspec.inp > uvspec.out) >& uvspec.err
#FIXME
p = call(cmd,shell=True,stdin=PIPE,stdout=PIPE)
return
def worker(self, input,output, uvspecpath=''):
"""thread worker function"""
verbose = True
self.SingleRun(input,output,verbose=verbose, uvspecpath=uvspecpath)
return
def WriteInputFile(self, InputFile=None, verbose=False):
if verbose:
print "Writing uvspec input file", InputFile
sys.stdout.flush()
try:
f = open(InputFile,'w')
except:
print "Experiment.WriteRTFile: Not able to open uvspec input file."
exit()
for key in self.inp:
if verbose:
sys.stdout.write( key + ' ' + str(self.inp[key]) + '\n')
f.write( key + ' ' + str(self.inp[key]) + '\n')
f.flush() # Do this to make sure all input files are written
os.fsync(f.fileno()) # to file.
f.close()
return
#######################################################################
if __name__ == "__main__":
Exp = Experiment()
print Exp.Domain.name
Exp.CameraList.append(Camera())
for Cam in Exp.CameraList:
Cam.info()
```
#### File: arve/from_arve/UVspec.py
```python
import math
import os
import sys
import numpy as np
import multiprocessing
from subprocess import Popen,PIPE, STDOUT, call
from glob import glob
import time
# Where are we
myhost = os.uname()[1]
if myhost=='ak':
home = os.environ['HOME']+'/'
else:
home = '/xnilu_wrk/aky/'
class UVspec:
def __init__(self):
# Set some uvspec input that most likely will stay the same
self.inp = {
"data_files_path" : home+'develop/libRadtran/data/',
"quiet" : '',
}
self.inp["rte_solver"] = 'montecarlo'
self.inp["mc_backward"] = ''
self.inp["atmosphere_file"] = home+'develop/libRadtran/data/atmmod/afglms.dat'
self.inp["source"] = 'solar '+home+'develop/libRadtran/data/solar_flux/atlas_plus_modtran'
self.inp["mc_vroom"] = 'on'
self.inp["albedo"] = '0.0'
return
def WriteInputFile(self, InputFile=None, verbose=False):
self.InputFile=InputFile
if verbose:
print "Writing uvspec input file", InputFile
try:
f = open(InputFile,'w')
except:
print "UVspec.WriteInputFile: No uvspec input file name given."
exit()
for key in self.inp:
if verbose:
sys.stdout.write( key + ' ' + str(self.inp[key]) + '\n')
f.write( key + ' ' + str(self.inp[key]) + '\n')
f.close()
return
def add_mc_basename_to_input_file(self,mc_basename,fn):
f = open(fn,'a')
f.write('{0:s}\n'.format('mc_basename '+mc_basename))
f.close()
def worker(self, input,output):
"""thread worker function"""
verbose = 0
self.SingleRun(input,output,verbose)
return
def SingleRun(self,inp, out, verbose):
if verbose:
print "Running uvspec with input file: ", inp
print "Output to file : ", out
cmd = home+'/develop/libRadtran/bin/uvspec '+ ' < ' + inp + ' > ' + out
p = call(cmd,shell=True,stdin=PIPE,stdout=PIPE)
return
def Run(self,inp, out, verbose=False, n_processes=1):
debug=False # True
if verbose:
print "Running uvspec with input file: ", inp
print "Output to file : ", out
print "Number of processors : ", n_processes
tmp_out_base = 'tmp_mystic.out_'
tmp_inp_base = 'tmp_mystic.inp_'
# Remove all old files
for filename in glob(tmp_out_base+"*"):
if not debug:
os.remove(filename)
for filename in glob(tmp_inp_base+"*"):
if not debug:
os.remove(filename)
jobs = []
for i in range(n_processes):
# Copy input file to temporary input file to be able to add different
# mc_basenames to the file without destroying the input file
tmp_inp = tmp_inp_base+str(i)
cmd = 'cp '+inp+' '+tmp_inp
Popen([r"cp",inp, tmp_inp]).wait()
mc_basename = tmp_out_base+'NP_'+str(i)
self.add_mc_basename_to_input_file(mc_basename,tmp_inp)
tmp_out = tmp_out_base+str(i)
if verbose:
print 'Starting process:',i,' inp:',tmp_inp,' out:',tmp_out
if not debug:
p = multiprocessing.Process(target=self.worker, args=(tmp_inp,tmp_out))
jobs.append(p)
p.start()
for j in jobs:
j.join()
if verbose:
print 'All processes done. Read output, average and calculate std.'
InputFiles = tmp_out_base+'NP_'+'*'+'.rad.spc'
OutputFile= out
Average_spc_Files(InputFiles, OutputFile, verbose=True)
def zenith(lat, lon, year, month, day,hour,min=0, sec=0, stdlong=0,output=1, uvspecpath=''):
cmd = uvspecpath+'zenith '+str(day)+' '+str(month)+' '+str(hour)+' '+str(min)+' '+str(sec)+\
' '+'-y '+str(year)+' -a '+str(lat)+' -o '+str(lon)+' -s '+str(stdlong)+' -q'
res = Popen(cmd,shell=True,stdout=PIPE)
res.wait()
vals = res.communicate()
vals = vals[0].split()
sza = float(vals[1])
return sza
def get_vals(fn,option):
""" Returns the values for option in an input file.
Usage:
values = get_vals(input_filename,optionname)
Input:
filename uvspec input file
optionname name of uvspec option
Output:
values list of option values
Author: <NAME>
Date: 2011-05-23
"""
f = open(fn,'r')
vals = ''
for line in f:
l = line.split()
# This does not work with the new input options.....
# if ( l[0] == option ):
# vals = l[1:len(l)]
# print l, option
if option in line:
nopts = len(option.split())
vals = l[nopts:len(l)]
# print l, option, nopts, vals
f.close()
return vals
def Average_spc_Files(InputFiles, OutputFile, verbose=False):
# First check that all files have the same number of lines. If not
# the files are surely different.
i = 0
for fn in glob(InputFiles):
with open(fn) as fp:
nlin = sum(1 for line in fp)
if i==0:
nlin0=nlin
else:
if nlin != nlin0:
print 'nlin: ' + str(nlin) + ', not equal nlin0: ' + str(nlin0)
exit(0)
i = i + 1
# All well? Combine all the files
wvl = np.zeros([len(InputFiles),nlin])
ix = np.zeros([len(InputFiles),nlin],dtype=int)
iy = np.zeros([len(InputFiles),nlin],dtype=int)
iz = np.zeros([len(InputFiles),nlin],dtype=int)
rad = np.zeros([len(InputFiles),nlin])
s2 = np.zeros([nlin])
radavg = np.zeros([nlin])
i = 0
for f in glob(InputFiles):
(wvl[i],ix[i],iy[i],iz[i],rad[i]) = read_rad_spc(f, verbose=False)
radavg[:] = radavg[:] + rad[i,:]
s2[:] = s2[:] + rad[i,:]*rad[i,:]
i = i + 1
s0 = i
l = 0
f = open(OutputFile,'w')
while l < nlin:
s1 = radavg[l]
arg = s0*s2[l] - s1*s1
if arg < 0.0:
print >> sys.stderr, l, arg, s0, s1, s2[l]
arg = 0.0
std = (1.0/s0)*math.sqrt(arg)
f.write('{0:8.2f} {1:3d} {2:3d} {3:3d} {4:9.4f} {5:9.4f}\n'.format(wvl[0,l], ix[0,l], iy[0,l], iz[0,l], s1/s0, std))
l = l + 1
f.close()
return
def read_rad_spc(fn, STD=False, verbose=False):
# Read MYSTIC mc.rad.spc file
if verbose:
print "Reading MYSTIC mc.rad.spc file: ", fn
if STD:
wvl,ix,iy,iz,rad, std = np.loadtxt(fn, unpack=True)
return (wvl,ix,iy,iz,rad,std)
else:
wvl,ix,iy,iz,rad = np.loadtxt(fn, unpack=True)
return (wvl,ix,iy,iz,rad)
```
#### File: jgliss/my_py3_scripts/my_widgets.py
```python
import os
from collections import OrderedDict as od
import ipywidgets as ipw
from copy import deepcopy
import pandas as pd
import traitlets
try:
from tkinter import Tk, filedialog
tkinter_available = True
except:
tkinter_available =False
import helper_funcs as helpers
from traceback import format_exc
import numpy as np
import matplotlib.pyplot as plt
### WORKING
class TableEditor(object):
_base_layout = ipw.Layout(flex='0 1 auto', width='200px', height='150px')
_btn_width = "100px"
def __init__(self, df, save_dir=None, preconfig_file=None,
default_group=None, new_run_names=[], add_to_index_vars=[],
unstack_indices=[], run_level_idx=0, var_level_idx=2,
**plot_settings):
# Stuff for I/O
if save_dir is None:
save_dir = os.getcwd()
self.saveas_funs = dict(csv = self.save_csv,
xlsx = self.save_xlsx,
png = self.save_png)
self.plot_funs = dict(heatmap = self.plot_heatmap)
self.save_dir = save_dir
# the dataframe
self.df = df
self.df_edit = self.check_shape_init(df)
self._df_edit_last = self.df_edit
self.extractions = od()
self.var_level_idx = var_level_idx
self.run_level_idx = run_level_idx
# Predefined settings for things applied to dataframe
self.new_run_names_init = new_run_names
self.add_to_index_vars = add_to_index_vars
self.unstack_indices = unstack_indices
# Display of Dataframe
self.current_plot = None
self.heatmap_settings = od(cmap="bwr",
cmap_shifted=True)
# Settings for Variable selector
self.groups = od()
self.groups["flagged"] = self.flagged_vars
if preconfig_file:
self.groups.update(helpers.load_varconfig_ini(preconfig_file))
if default_group is None:
default_group = "flagged"
if not default_group in self.groups:
raise ValueError("No default group with ID {} in file {}".format(default_group, preconfig_file))
self.default_group = default_group
self.default_selection = self.groups[default_group]
self._buttons_edit_df = []
# init widgets and actions
self.init_widgets_renamer()
self.init_layout_renamer()
self.init_widgets_varselect()
self.init_layout_varselect()
self.init_layout_reshaper()
self.init_glob_widgets()
self.init_layout()
# initiate layout
self.apply_changes_rename()
self.crop_var_selection()
self.add_to_index(self.add_to_index_vars)
self.unstack(self.unstack_indices)
self.update_ui()
self.disp_current()
self.heatmap_settings.update(plot_settings)
if not tkinter_available:
self.btn_saveas.disabled = True
self.btn_saveas.tooltip = ("Please install tkinter to use this "
"feature. Until then, you can use save "
"button")
@property
def default_plot_fun(self):
return self.plot_funs["heatmap"]
@property
def column_names(self):
return list(self.df_edit.columns)
@property
def data_column_names(self):
df = self.df_edit
if isinstance(df.columns, pd.MultiIndex):
return list(df.columns.levels[0])
return list(df.columns)
@property
def index_level_names(self):
return self.df_edit.index.names
@property
def index_level_col_names(self):
return self.df_edit.columns.names[1:]
@property
def run_names(self):
#return sorted(self.df.index.get_level_values(self.level).unique().values)
return self.df_edit.index.get_level_values(self.run_level_idx).unique().values
@property
def flagged_vars(self):
lvl = self.var_level_idx
return list(self.df[self.df.Flag.astype(bool)].index.get_level_values(lvl).unique().values)
@property
def all_variables(self):
lvl = self.var_level_idx
return self.df.index.get_level_values(lvl).unique().values
def init_glob_widgets(self):
self.disp_table = ipw.Output()
self.output = ipw.Output()
btn_clear_output = ipw.Button(description="Clear output",
layout=ipw.Layout(width=self._btn_width))
btn_clear_output.on_click(self.on_clear_output)
btn_reset = ipw.Button(description="Reset",
tooltip="Reset all changes that were applied",
layout=ipw.Layout(width=self._btn_width))
btn_reset.on_click(self.on_reset)
tip = ("Save file in {} using filename specified in line above. "
"Allowed filetypes are: {}".format(self.save_dir,
list(self.saveas_funs.keys())))
btn_save = ipw.Button(description="Save",
tooltip=tip,
layout=ipw.Layout(width=self._btn_width))
btn_save.on_click(self.on_save)
btn_saveas = ipw.Button(description="Save as",
tooltip="Save current Dataframe as file",
layout=ipw.Layout(width=self._btn_width))
btn_plot = ipw.Button(description="Plot",
layout=ipw.Layout(width=self._btn_width))
btn_plot.on_click(self.on_plot)
btn_saveas.style.button_color = 'lightgreen'
btn_saveas.on_click(self.on_saveas)
self.btn_saveas = btn_saveas
self.glob_toolbar = ipw.HBox([btn_clear_output,
btn_reset,
btn_save,
btn_saveas,
btn_plot])
self.save_name = ipw.Text(placeholder='Insert save filename (e.g. test.csv)')
def init_layout(self):
self.edit_ui = ipw.Tab()
self.edit_ui.children = [self.layout_rename,
self.layout_varselect,
self.layout_reshaper]
self.edit_ui.set_title(0, "Rename run")
self.edit_ui.set_title(1, "Select variables")
self.edit_ui.set_title(2, "Reshape dataframe")
self.layout = ipw.VBox([self.edit_ui,
self.save_name,
self.glob_toolbar,
self.disp_table,
self.output],
layout = ipw.Layout(min_height="600px"))
# =============================================================================
# self.layout.children = [self.layout_varselect,
# self.layout_rename,
# self.layout_reshape,
# self.layout_display]
#
# =============================================================================
def init_widgets_renamer(self):
self.btn_apply_rename = ipw.Button(description='Apply')
self.btn_apply_rename.style.button_color = "lightgreen"
self.btn_apply_rename.on_click(self.on_click_apply_rename)
self.input_rows_rename = []
self.input_fields_rename = []
for i, name in enumerate(self.run_names):
try:
val = self.new_run_names_init[i]
except:
val = name
ipt = ipw.Text(value=val, placeholder='Insert new name',
disabled=False, layout=ipw.Layout(width='100px'))
row = ipw.HBox([ipw.Label(name, layout=ipw.Layout(width='100px')), ipt])
self.input_fields_rename.append(ipt)
self.input_rows_rename.append(row)
self._buttons_edit_df.extend([self.btn_apply_rename])
def init_layout_renamer(self):
self.layout_rename = ipw.HBox([ipw.VBox(self.input_rows_rename),
self.btn_apply_rename])
def init_widgets_varselect(self):
# Init all widgets for variable selector
self.btn_unselect_all = ipw.Button(description='Unselect all')
self.btn_select_all = ipw.Button(description='Select all')
self.btn_flagged = ipw.Button(description="Flagged")
self.btn_apply_varselect = ipw.Button(description='Apply')
self.btn_apply_varselect.style.button_color = 'lightgreen'
self.var_selector = ipw.SelectMultiple(description='',
options=self.all_variables,
value=self.default_selection,
layout=self._base_layout)
self.var_selector_disp = ipw.Textarea(value='',
description='',
disabled=True,
layout=self._base_layout)
self.group_selector = ipw.Dropdown(options=self.groups,
value=self.default_selection,
description='',
disabled=False)
# init all actions for widgets of variable selector
self.var_selector.observe(self.current_varselection)
self.group_selector.observe(self.on_change_dropdown)
#what happens when buttons are clicked
self.btn_select_all.on_click(self.on_select_all_vars_clicked)
self.btn_unselect_all.on_click(self.on_unselect_all_vars_clicked)
self.btn_apply_varselect.on_click(self.on_click_apply_varselect)
self._buttons_edit_df.extend([self.btn_select_all,
self.btn_unselect_all,
self.btn_apply_varselect])
def init_layout_varselect(self):
self.btns_varselect = ipw.VBox([self.btn_select_all,
self.btn_unselect_all,
ipw.Label(),
self.btn_apply_varselect])
l = ipw.HBox([ipw.VBox([ipw.Label("Predefined"), self.group_selector]),
ipw.VBox([ipw.Label("Index level {}".format(self.var_level_idx)),
self.var_selector]),
ipw.VBox([ipw.Label("Current selection"),
self.var_selector_disp]),
self.btns_varselect])
self.layout_varselect = l
self.current_varselection(1)
#self.layout = ipw.VBox([self.edit_area, self.output])
def init_layout_reshaper(self):
# COLUMN TO INDEX
col2idx_header = ipw.Label("Column to index")
self.col2idx_select = ipw.SelectMultiple(description='',
options=self.column_names,
value=(),
layout=self._base_layout)
col2idx_btn_apply = ipw.Button(description = "Add", layout=ipw.Layout(width=self._btn_width))
col2idx_btn_apply.on_click(self.on_add_col)
col2idx_btn_apply.tooltip = "Add selected columns to Multiindex"
col2idx_btn_apply.style.button_color = 'lightgreen'
col2idx_layout = ipw.VBox([col2idx_header,
self.col2idx_select,
ipw.HBox([col2idx_btn_apply])])
# UNSTACKING
unstack_header = ipw.Label("Unstack index")
self.unstack_select = ipw.SelectMultiple(description='',
options=self.index_level_names,
value=(),
layout=self._base_layout)
unstack_btn_apply = ipw.Button(description = "Apply", layout=ipw.Layout(width=self._btn_width))
unstack_btn_apply.on_click(self.on_unstack)
unstack_btn_apply.style.button_color = 'lightgreen'
unstack_btn_apply.tooltip = "Put selected indices into columns"
unstack_layout = ipw.VBox([unstack_header,
self.unstack_select,
ipw.HBox([unstack_btn_apply])])
# STACKING
stack_header = ipw.Label("Stack index")
self.stack_select = ipw.SelectMultiple(description='',
options=self.index_level_col_names,
value=(),
layout=self._base_layout)
stack_btn_apply = ipw.Button(description = "Apply", layout=ipw.Layout(width=self._btn_width))
stack_btn_apply.on_click(self.on_stack)
stack_btn_apply.style.button_color = 'lightgreen'
stack_btn_apply.tooltip = "Put selected indices into rows"
stack_layout = ipw.VBox([stack_header,
self.stack_select,
ipw.HBox([stack_btn_apply])])
# SELECT COLUMN
extract_header = ipw.Label("Extract column")
self.extract_select = ipw.Select(description='',
options=self.data_column_names,
layout=self._base_layout)
extract_btn_apply = ipw.Button(description="Apply",
layout=ipw.Layout(width=self._btn_width))
extract_btn_apply.on_click(self.on_extract)
extract_btn_apply.style.button_color = 'lightgreen'
extract_btn_apply.tooltip = "Extract currently selected column"
extract_btn_undo = ipw.Button(description="Undo",
layout=ipw.Layout(width=self._btn_width))
extract_btn_undo.on_click(self.on_extract_undo)
extract_btn_undo.tooltip = "Undo last column extraction"
extract_layout = ipw.VBox([extract_header,
self.extract_select,
ipw.HBox([extract_btn_undo,
extract_btn_apply])])
self.layout_reshaper = ipw.HBox([col2idx_layout,
unstack_layout,
stack_layout,
extract_layout])
self._buttons_edit_df.extend([col2idx_btn_apply,
unstack_btn_apply,
stack_btn_apply,
extract_btn_apply])
# Methods for renamer
def on_click_apply_rename(self, b):
self.apply_changes_rename()
self.disp_current()
def apply_changes_rename(self):
df = self.df_edit
mapping = od()
for i, name in enumerate(self.run_names):
repl = str(self.input_fields_rename[i].value)
mapping[name] = repl
self.df_edit = df.rename(index=mapping, level=self.run_level_idx)
self.output.append_display_data("Applying renaming: {}".format(mapping))
# Methods for variable selector
def on_unselect_all_vars_clicked(self, b):
self.unselect_all()
def on_select_all_vars_clicked(self, b):
self.select_all()
def on_change_dropdown(self, b):
self.select_current_group()
def unselect_all(self):
self.var_selector.value = ()
def select_all(self):
self.var_selector.value = self.var_selector.options
def select_current_group(self):
self.var_selector.value = self.group_selector.value
def current_varselection(self, b):
s=""
for item in self.var_selector.value:
s += "{}\n".format(item)
self.var_selector_disp.value = s
def crop_var_selection(self):
try:
self.df_edit = helpers.crop_selection_dataframe(self.df_edit,
self.var_selector.value,
levels=self.var_level_idx)
self.output.append_display_data("Applying variable selection: {}".format(self.var_selector.value))
except Exception as e:
self.output.append_display_data("WARNING: failed to extract selection.\nTraceback {}".format(format_exc()))
def on_click_apply_varselect(self, b):
self.crop_var_selection()
self.disp_current()
# Methods for reshaper
def update_ui(self):
"""Recreate user interface"""
if not isinstance(self.df_edit, pd.Series):
if isinstance(self.df_edit.columns, pd.MultiIndex):
self.col2idx_select.options = ("N/A", "Current dataframe is unstacked")
self.col2idx_select.disabled = True
for item in self.input_fields_rename:
item.disabled = True
self.btn_apply_rename.disabled=True
tip = ("Dataframe contains unstacked indices. Renaming can only be "
"applied for dataframe that has not been unstacked. You "
"may re-stack the dataframe using the tab 'Reshape dataframe'")
self.btn_apply_rename.tooltip = tip
self.btn_apply_varselect.disabled = True
self.btn_apply_varselect.tooltip = tip
else:
self.col2idx_select.options = self.column_names
self.col2idx_select.value=()
self.col2idx_select.disabled = False
for item in self.input_fields_rename:
item.disabled = False
self.btn_apply_rename.disabled=False
self.btn_apply_varselect.disabled=False
tip = ("Apply current settings")
self.btn_apply_rename.tooltip = tip
self.btn_apply_varselect.tooltip = tip
self.unstack_select.options = self.index_level_names
self.unstack_select.value = ()
self.stack_select.options = self.index_level_col_names
self.stack_select.value = ()
self.extract_select.options = self.data_column_names
self.disp_table.clear_output()
self.disp_current()
def on_add_col(self, b):
var_names = list(self.col2idx_select.value)
self.add_to_index(var_names)
self.update_ui()
def on_unstack(self, b):
level_names = list(self.unstack_select.value)
self.unstack(level_names)
self.update_ui()
def on_stack(self, b):
level_names = list(self.stack_select.value)
self.stack(level_names)
self.update_ui()
def on_extract(self, b):
val = str(self.extract_select.value)
self._df_edit_last = self.df_edit
self.df_edit = self.df_edit[val]
self.update_ui()
self.freeze_ui()
self.disp_current()
def freeze_ui(self, disable=True):
for btn in self._buttons_edit_df:
btn.disabled = disable
def on_extract_undo(self, b):
self.df_edit = self._df_edit_last
self.update_ui()
self.freeze_ui(False)
self.disp_current()
# global events
def on_clear_output(self, b):
self.output.clear_output()
def on_save(self, b):
self.save()
def on_saveas(self, b):
self.saveas()
def on_reset(self, b):
self.reset()
self.update_ui()
def on_plot(self, b):
self.plot()
def check_shape_init(self, df):
if isinstance(df.columns, pd.MultiIndex):
#print("Initial Dataframe is unstacked, stacking back")
return helpers.stack_dataframe_original_idx(df)
return deepcopy(df)
def add_to_index(self, var_names):
if isinstance(var_names, str):
var_names = [var_names]
for item in var_names:
self.df_edit = self.df_edit.set_index([self.df_edit.index, item])
def unstack(self, level_names):
self.df_edit = self.df_edit.unstack(level_names)
def stack(self, level_names):
self.df_edit = helpers.stack_dataframe(self.df_edit, level_names)
def reset(self):
self.df_edit = self.check_shape_init(self.df)
def disp_current(self):
#self.output.append_display_data(ipw.Label("PREVIEW current selection", fontsize=22))
self.disp_table.clear_output()
if isinstance(self.df_edit, pd.Series):
disp = self.df_edit
else:
disp = self.df_edit.head().style.set_caption("PREVIEW")
self.disp_table.append_display_data(disp)
#self.disp_table.append_display_data(preview)
#self.output
def plot_heatmap(self, ax):
try:
self.current_plot = helpers.df_to_heatmap(self.df_edit, ax=ax,
**self.heatmap_settings)
except Exception as e:
self.output.append_display_data("Failed to plot heatmap: Error "
"message: {}".format(repr(e)))
def plot(self):
self.disp_table.clear_output()
with self.disp_table:
fig, ax = plt.subplots(1,1, figsize=(14, 8))
self.plot_heatmap(ax=ax)
plt.show()
#self.default_plot_fun()
#self.disp_table.append_display_data()
def save_png(self, fpath):
if not self.current_plot:
self.default_plot_fun()
self.current_plot.figure.savefig(fpath)
def save_csv(self, fpath):
self.df_edit.to_csv(fpath)
def save_xlsx(self, fpath):
writer = pd.ExcelWriter(fpath)
self.df_edit.to_excel(writer)
writer.save()
writer.close()
def open_saveas_dialog(self):
"""Generate instance of tkinter.asksaveasfilename
"""
# Create Tk root
root = Tk()
# Hide the main window
root.withdraw()
# Raise the root to the top of all windows.
root.call('wm', 'attributes', '.', '-topmost', True)
# List of selected fileswill be set to b.value
filename = filedialog.asksaveasfilename(initialdir=self.save_dir,
title = "Save as",
filetypes = (("csv files","*.csv"),
("Excel files","*.xlsx"),
("PNG files", "*.png")))
return filename
def save(self):
savename = os.path.basename(self.save_name.value)
self.saveas(filename=os.path.join(self.save_dir, savename))
def saveas(self, filename=None):
msg = "Failed to save file, enter valid filename and type"
if filename is None:
if tkinter_available:
filename = self.open_saveas_dialog()
else:
msg = ("Failed to save table. Could not open file dialog "
"please install tkinter or insert valid name in "
"line above")
for ftype, func in self.saveas_funs.items():
if filename.lower().endswith(ftype):
try:
func(filename)
msg = "Succesfully saved: {}".format(filename)
except Exception as e:
msg = ("Failed to save {}. Error {}".format(filename, repr(e)))
break
self.output.append_display_data(msg)
def __call__(self):
return self.layout
class IndexRenamer(object):
output = ipw.Output()
def __init__(self, df, level=0, suggestions=[]):
self.df = df
self._df_edit = df
self.level = level
self.suggestions = suggestions
self.init_widgets()
self.init_actions()
self.init_layout()
self.renamed_info = od()
self.apply_changes()
@property
def names(self):
#return sorted(self.df.index.get_level_values(self.level).unique().values)
return self.df.index.get_level_values(self.level).unique().values
@property
def df_edit(self):
return deepcopy(self._df_edit)
def init_widgets(self):
self.btn_apply = ipw.Button(description='Apply')
self.btn_apply.style.button_color = "lightgreen"
self.input_rows = []
self.input_fields = []
for i, name in enumerate(self.names):
try:
val = self.suggestions[i]
except:
val = name
ipt = ipw.Text(value=val, placeholder='Insert new name',
disabled=False, layout=ipw.Layout(width='100px'))
row = ipw.HBox([ipw.Label(name, layout=ipw.Layout(width='100px')), ipt])
self.input_fields.append(ipt)
self.input_rows.append(row)
def init_actions(self):
#what happens when the state of the selection is changed (display current selection)
self.btn_apply.on_click(self.on_click_apply)
def init_layout(self):
edit_area = ipw.HBox([ipw.VBox(self.input_rows), self.btn_apply])
self.layout = ipw.VBox([edit_area, self.output])
def on_click_apply(self, b):
self.apply_changes()
def disp_current(self):
self.output.clear_output()
#self.output.append_display_data(ipw.Label("PREVIEW current selection", fontsize=22))
self.output.append_display_data(self._df_edit.style.set_caption("PREVIEW"))
self.output
def apply_changes(self):
df = self.df
mapping = od()
for i, name in enumerate(self.names):
repl = str(self.input_fields[i].value)
mapping[name] = repl
self._df_edit = df.rename(index=mapping, level=self.level)
self.disp_current()
def __call__(self):
return self.layout
class SelectVariable(object):
output = ipw.Output()
def __init__(self, df, level, preconfig_file=None, default_group=None):
#df.sort_index(inplace=True)
self.df = df
self._df_edit = df
self.level = level
self.groups = od()
self.groups["flagged"] = self.flagged_vars
if preconfig_file:
self.groups.update(helpers.load_varconfig_ini(preconfig_file))
if default_group is None:
default_group = "flagged"
if not default_group in self.groups:
raise ValueError("No default group with ID {} in file {}".format(default_group, preconfig_file))
self.default_selection = self.groups[default_group]
self.vals = self.df.index.get_level_values(self.level).unique().values
self._base_layout = ipw.Layout(flex='0 1 auto',
height='200px',
min_height='200px',
width='auto')
self.init_widgets()
self.init_actions()
self.init_layout()
self.select_current_group()
self.print_current(1)
self.crop_selection()
self.disp_current()
@property
def df_edit(self):
return deepcopy(self._df_edit)
@property
def flagged_vars(self):
return list(self.df[self.df.Flag].index.get_level_values(self.level).unique().values)
def init_widgets(self):
self.btn_unselect_all = ipw.Button(description='Unselect all')
self.btn_select_all = ipw.Button(description='Select all')
self.btn_flagged = ipw.Button(description="Flagged")
self.btn_apply = ipw.Button(description='Apply')
self.btn_apply.style.button_color = 'lightgreen'
self.var_selector = ipw.SelectMultiple(description='',
options=self.vals,
value=self.flagged_vars,
min_width='150px',
layout=self._base_layout)
self.current_disp = ipw.Textarea(value='',
description='',
disabled=True,
layout=self._base_layout)
#groups = [key for key, val in self.groups.items()]
self.group_selector = ipw.Dropdown(options=self.groups,
value=self.default_selection,
description='',
disabled=False)
#self.output = ipw.Output()
def init_actions(self):
#what happens when the state of the selection is changed (display current selection)
self.var_selector.observe(self.print_current)
self.group_selector.observe(self.on_change_dropdown)
#what happens when buttons are clicked
self.btn_select_all.on_click(self.on_select_all_vars_clicked)
self.btn_unselect_all.on_click(self.on_unselect_all_vars_clicked)
self.btn_apply.on_click(self.on_click_apply)
def init_layout(self):
self.btns = ipw.VBox([self.btn_select_all,
self.btn_unselect_all,
ipw.Label(),
self.btn_apply])
self.edit_area = ipw.HBox([ipw.VBox([ipw.Label("Predefined"), self.group_selector]),
ipw.VBox([ipw.Label("Index level {}".format(self.level)), self.var_selector]),
ipw.VBox([ipw.Label("Current selection"), self.current_disp]),
self.btns])
self.layout = ipw.VBox([self.edit_area, self.output])
def on_unselect_all_vars_clicked(self, b):
self.unselect_all()
def on_select_all_vars_clicked(self, b):
self.select_all()
def on_change_dropdown(self, b):
self.select_current_group()
def unselect_all(self):
self.var_selector.value = ()
def select_all(self):
self.var_selector.value = self.var_selector.options
def select_current_group(self):
self.var_selector.value = self.group_selector.value
def disp_current(self):
self.output.clear_output()
#self.output.append_display_data(ipw.Label("PREVIEW current selection", fontsize=22))
self.output.append_display_data(self._df_edit.head().style.set_caption("PREVIEW HEAD"))
self.output
def crop_selection(self):
try:
self._df_edit = helpers.crop_selection_dataframe(self.df,
self.var_selector.value,
levels=self.level)
except Exception as e:
print("WARNING: failed to extract selection.\nTraceback {}".format(format_exc()))
def on_click_apply(self, b):
self.crop_selection()
self.disp_current()
def print_current(self, b):
s=""
for item in self.var_selector.value:
s += "{}\n".format(item)
self.current_disp.value = s
def __repr__(self):
return repr(self.layout)
def __call__(self):
return self.layout
class EditDictCSV(object):
"""Widget that can be used to interactively edit a CSV file
The CSV is supposed to be created from a "simple" dictionary with entries
strings.
"""
output = ipw.Output()
def __init__(self, csv_loc):
self.csv_loc = csv_loc
self.load_csv()
self.init_widgets()
self.init_actions()
self.init_layout()
def init_widgets(self):
self.btn_update = ipw.Button(description='Update',
tooltip=('Updates the current dictionary based on values in text fields'
'(for further analysis, use Save csv button to write to CSV)'))
self.btn_reload = ipw.Button(description='Reload',
tooltip='Reloads information from file var_info.csv')
self.btn_save = ipw.Button(description='Update and save',
tooltip='Updates current selection and writes to CSV')
self.btn_save.style.button_color = "lightgreen"
self.input_rows = []
self.input_fields = {}
for name, val in self.var_dict.items():
ipt = ipw.Text(value=val, placeholder='Insert new name',
disabled=False,
layout = ipw.Layout(min_width="200px"))
row = ipw.HBox([ipw.Label(name, layout=ipw.Layout(min_width="200px")), ipt])
self.input_fields[name] = ipt
self.input_rows.append(row)
def init_actions(self):
self.btn_update.on_click(self.on_click_update)
self.btn_reload.on_click(self.on_click_load_csv)
self.btn_save.on_click(self.on_click_save)
def init_layout(self):
vbox_buttons = ipw.VBox([self.btn_reload,
self.btn_update,
self.btn_save])
self.layout = ipw.HBox([ipw.VBox(self.input_rows), vbox_buttons,
self.output])
def on_click_update(self, b):
self.apply_changes()
def on_click_load_csv(self, b):
self.load_csv()
self.update_info_fields()
def on_click_save(self, b):
self.save_csv()
def save_csv(self):
self.apply_changes()
helpers.save_varinfo_dict_csv(self.var_dict, self.csv_loc)
def load_csv(self):
if self.csv_loc is None or not os.path.exists(self.csv_loc):
raise IOError("Please provide path to csv file")
try:
self.var_dict = helpers.load_varinfo_dict_csv(self.csv_loc)
except Exception as e:
self.write_to_output(format_exc())
def update_info_fields(self):
for key, val in self.var_dict.items():
self.input_fields[key].value = val
def write_to_output(self, msg):
self.output.append_display_data(msg)
self.output
def apply_changes(self):
new = od()
for key, edit in self.input_fields.items():
new[key] = edit.value
self.var_dict = new
def __call__(self):
return self.layout
class SaveAsButton(ipw.Button):
"""A file widget that leverages tkinter.filedialog.
Based on and modified from ``SelectFilesButton`` (see below) or here:
https://codereview.stackexchange.com/questions/162920/file-selection-button-for-jupyter-notebook
"""
def __init__(self, save_dir=None):
super(SaveAsButton, self).__init__()
# Add the selected_files trait
self.add_traits(files=traitlets.traitlets.List())
if not save_dir:
save_dir = os.getcwd()
self.save_dir = save_dir
# Create the button.
self.description = "Save as"
self.icon = "square-o"
self.style.button_color = "orange"
self.file_name = ""
# Set on click behavior.
self.on_click(self.saveas)
def saveas(self, b):
"""Generate instance of tkinter.asksaveasfilename
Parameters
----------
b : obj:
An instance of ipywidgets.widgets.Button
"""
# Create Tk root
root = Tk()
# Hide the main window
root.withdraw()
# Raise the root to the top of all windows.
root.call('wm', 'attributes', '.', '-topmost', True)
# List of selected fileswill be set to b.value
self.file_name = filedialog.asksaveasfilename(initialdir=self.save_dir,
title = "Save as",
filetypes = (("csv files","*.csv"),("all files","*.*")))
self.description = "Files Selected"
self.icon = "check-square-o"
self.style.button_color = "lightgreen"
### DOWNLOADED
class SelectFilesButton(ipw.Button):
"""A file widget that leverages tkinter.filedialog.
Downloaded here: https://codereview.stackexchange.com/questions/162920/file-selection-button-for-jupyter-notebook
"""
def __init__(self):
super(SelectFilesButton, self).__init__()
# Add the selected_files trait
self.add_traits(files=traitlets.traitlets.List())
# Create the button.
self.description = "Select Files"
self.icon = "square-o"
self.style.button_color = "orange"
# Set on click behavior.
self.on_click(self.select_files)
@staticmethod
def select_files(b):
"""Generate instance of tkinter.filedialog.
Parameters
----------
b : obj:
An instance of ipywidgets.widgets.Button
"""
# Create Tk root
root = Tk()
# Hide the main window
root.withdraw()
# Raise the root to the top of all windows.
root.call('wm', 'attributes', '.', '-topmost', True)
# List of selected fileswill be set to b.value
b.files = filedialog.askopenfilename(multiple=True)
b.description = "Files Selected"
b.icon = "check-square-o"
b.style.button_color = "lightgreen"
class FileBrowser(object):
"""Widget for browsing files interactively
The widget was downloaded and modified from here:
https://gist.github.com/DrDub/6efba6e522302e43d055#file-selectfile-py
"""
def __init__(self, path=None):
if path is None:
path = os.getcwd()
self.path = path
self._update_files()
def _update_files(self):
self.files = list()
self.dirs = list()
if(os.path.isdir(self.path)):
for f in os.listdir(self.path):
ff = self.path + "/" + f
if os.path.isdir(ff):
self.dirs.append(f)
else:
self.files.append(f)
def _update(self, box):
def on_click(b):
if b.description == '..':
self.path = os.path.split(self.path)[0]
else:
self.path = self.path + "/" + b.description
self._update_files()
self._update(box)
buttons = []
if self.files:
button = ipw.Button(description='..', background_color='#d0d0ff')
button.on_click(on_click)
buttons.append(button)
for f in self.dirs:
button = ipw.Button(description=f, background_color='#d0d0ff')
button.on_click(on_click)
buttons.append(button)
for f in self.files:
button = ipw.Button(description=f)
button.on_click(on_click)
buttons.append(button)
box.children = tuple([ipw.HTML("<h2>%s</h2>" % (self.path,))] + buttons)
def __call__(self):
box = ipw.VBox()
self._update(box)
return box
### UNDER DEVELOPMENT
class SelectVariableNew(object):
output = ipw.Output()
def __init__(self, df, num_cols=3):
#df.sort_index(inplace=True)
self.df = df
#self.vals = tuple(self.df.index.levels[2].values)
self.vals = self.df.index.get_level_values("Variable").unique().values
self._df_edit = df
self.num_cols = num_cols
self.init_widgets()
self.init_actions()
self.init_layout()
#self.print_current(1)
self.highlight_selection(1)
#self.crop_selection()
self.disp_current()
@property
def df_edit(self):
return deepcopy(self._df_edit)
@property
def flagged_vars(self):
return list(self.df[self.df.Flag].index.get_level_values("Variable").unique().values)
def init_widgets(self):
self.btn_unselect_all = ipw.Button(description='Unselect all')
self.btn_select_all = ipw.Button(description='Select all')
self.btn_flagged = ipw.Button(description="Flagged")
self.btn_apply = ipw.Button(description='Apply')
self.btn_apply.style.button_color = 'lightgreen'
self.items = []
self.input_fields = []
for num, name in enumerate(self.vals):
ipt = ipw.Button(description=name, disabled=True, layout=ipw.Layout(width='150px'))
order = ipw.Text(value=str(num+1), disabled=False, layout=ipw.Layout(width='50px'))
order.observe(self.on_change_input_field)
self.items.append(ipt)
self.input_fields.append(order)
#self.output = ipw.Output()
def init_actions(self):
#what happens when the state of the selection is changed (display current selection)
#self.var_selector.observe(self.print_current)
#what happens when buttons are clicked
self.btn_select_all.on_click(self.on_select_all_vars_clicked)
self.btn_unselect_all.on_click(self.on_unselect_all_vars_clicked)
self.btn_flagged.on_click(self.on_flagged_clicked)
self.btn_apply.on_click(self.on_click_apply)
def init_layout(self):
self.btns = ipw.HBox([self.btn_select_all,
self.btn_unselect_all,
self.btn_flagged,
self.btn_apply])
self.init_input_area()
self.layout = ipw.VBox([self.btns, self.edit_area, self.output])
def init_input_area(self):
num_cols = self.num_cols
items = self.items
input_fields = self.input_fields
col_vals = np.array_split(np.arange(len(items)), num_cols)
cols = []
for ival in col_vals:
col_rows = []
for val in ival:
col_rows.append(ipw.HBox([items[val], input_fields[val]]))
cols.append(ipw.VBox(col_rows))
self.edit_area = ipw.HBox(cols)
def on_unselect_all_vars_clicked(self, b):
self.unselect_all()
def on_select_all_vars_clicked(self, b):
self.select_all()
def on_flagged_clicked(self, b):
self.select_flagged()
def on_change_input_field(self, b):
print(b.new.value)
@property
def current_order(self):
nums = []
for item in self.input_fields:
nums.append(item.value)
def highlight_selection(self, b):
for i, item in enumerate(self.input_fields):
try:
int(item.value)
self.items[i].style.button_color = "#e6ffee"
except:
self.items[i].style.button_color = "white"
def unselect_all(self):
pass
#self.var_selector.value = ()
def select_all(self):
pass
#self.var_selector.value = self.var_selector.options
def select_flagged(self):
pass
#self.var_selector.value = self.flagged_vars
def disp_current(self):
self.output.clear_output()
#self.output.append_display_data(ipw.Label("PREVIEW current selection", fontsize=22))
self.output.append_display_data(self._df_edit.head().style.set_caption("PREVIEW HEAD"))
self.output
def crop_selection(self):
raise NotImplementedError
def on_click_apply(self, b):
self.crop_selection()
self.disp_current()
def __repr__(self):
return repr(self.layout)
def __call__(self):
return self.layout
class ReshapeAndSelect(object):
"""Widget that can be used to reshape a Dataframe and select individual data columns"""
output = ipw.Output()
def __init__(self, df):
raise NotImplementedError()
self.df = df
self._df_edit = df
self.index_names = df.index.names
self.col_names = df.columns
@property
def df_edit(self):
return deepcopy(self._df_edit)
@property
def flagged_vars(self):
return list(self.df[self.df.Flag].index.get_level_values("Variable").unique().values)
def init_widgets(self):
self.btn_unselect_all = ipw.Button(description='Unselect all')
self.btn_select_all = ipw.Button(description='Select all')
self.btn_flagged = ipw.Button(description="Flagged")
self.btn_apply = ipw.Button(description='Apply')
self.btn_apply.style.button_color = 'lightgreen'
self.var_selector = ipw.SelectMultiple(description="Variables",
options=self.vals,
value=self.flagged_vars,
min_width='150px',
layout=self.box_layout)
self.current_disp = ipw.Textarea(value='',
description='Current:',
disabled=True,
layout=self.box_layout)
#self.output = ipw.Output()
def init_actions(self):
#what happens when the state of the selection is changed (display current selection)
self.var_selector.observe(self.print_current)
#what happens when buttons are clicked
self.btn_select_all.on_click(self.on_select_all_vars_clicked)
self.btn_unselect_all.on_click(self.on_unselect_all_vars_clicked)
self.btn_flagged.on_click(self.on_flagged_clicked)
self.btn_apply.on_click(self.on_click_apply)
def init_display(self):
self.btns = ipw.VBox([self.btn_select_all,
self.btn_unselect_all,
self.btn_flagged,
ipw.Label(),
self.btn_apply])
self.edit_area = ipw.HBox([self.var_selector,
self.current_disp,
self.btns])
self.layout = ipw.VBox([self.edit_area, self.output])
def on_unselect_all_vars_clicked(self, b):
self.unselect_all()
def on_select_all_vars_clicked(self, b):
self.select_all()
def on_flagged_clicked(self, b):
self.select_flagged()
def unselect_all(self):
self.var_selector.value = ()
def select_all(self):
self.var_selector.value = self.var_selector.options
def select_flagged(self):
self.var_selector.value = self.flagged_vars
def disp_current(self):
self.output.clear_output()
#self.output.append_display_data(ipw.Label("PREVIEW current selection", fontsize=22))
self.output.append_display_data(self._df_edit.head().style.set_caption("PREVIEW HEAD"))
self.output
def crop_selection(self):
idx = pd.IndexSlice
try:
self._df_edit = self.df.loc[idx[:, :, self.var_selector.value, :], :]
except Exception as e:
print("WARNING: failed to extract selection.\nTraceback {}".format(format_exc()))
def on_click_apply(self, b):
self.crop_selection()
self.disp_current()
def print_current(self, b):
s=""
for item in self.var_selector.value:
s += "{}\n".format(item)
self.current_disp.value = s
def __repr__(self):
return repr(self.layout)
def __call__(self):
return self.layout
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.