metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jkleve/python_boilers",
"score": 3
} |
#### File: jkleve/python_boilers/argparse_logzero_boiler.py
```python
from argparse import ArgumentParser
import logging
import logzero
from logzero import logger
import sys
# __author__ = None
# __license__ = None
__version__ = '0.1.0'
PROGRAM_NAME = 'argparse logzero boiler'
LOGGING_FORMAT = '%(asctime)s %(levelname)-7s %(name)s: %(message)s'
LOGGING_DATEFMT = '%H:%M:%S'
def program_description():
"""Returns a string with a description of the program
"""
return None
def configure_logging(verbosity):
if verbosity == 1:
logzero.loglevel(logging.INFO)
elif verbosity >= 2:
logzero.loglevel(logging.DEBUG)
else:
logzero.loglevel(logging.WARNING)
def main(args):
pass
if __name__ == '__main__':
parser = ArgumentParser(prog=PROGRAM_NAME, description=program_description())
parser.add_argument('-v', '--verbose', default=0, action='count', help='verbosity level. counting (e.g. -v, -vv)')
parser.add_argument('--version', action='version', version='%(prog)s {__version__}'.format(**globals()))
# parser.add_argument('-n', '--number', type=int)
args = parser.parse_args()
configure_logging(args.verbose)
main(args)
``` |
{
"source": "jklewis99/furiosa",
"score": 3
} |
#### File: jklewis99/furiosa/automate_testing.py
```python
import pandas as pd
from furiosanet import test, get_layers_from_file
def main():
'''
test models saved in the csv
'''
models = pd.read_csv("model-evaluation.csv", index_col=0).index.tolist()
for model in models:
weights_file = "weights/automated/" + model
layers = get_layers_from_file(weights_file)
test(weights_file, layers, show_fig=False)
if __name__ == "__main__":
main()
```
#### File: jklewis99/furiosa/automate_training.py
```python
import random
import numpy as np
from furiosanet import train
def main():
'''
randomly create a model and train it
'''
num_trails = 100
hidden_layers = np.arange(2, 6, 1)
range_params = np.arange(20, 5000, 10)
for _ in range(num_trails):
layers = np.random.choice(range_params, random.choice(hidden_layers)).tolist()
train(layers, "mean_squared_error", scale_input=True)
if __name__ == "__main__":
main()
```
#### File: video-editor-cached/classes/clip.py
```python
import numpy as np
from moviepy.editor import VideoFileClip
from moviepy.editor import ImageClip
import cv2
# TODO: import fps of project
PROJECT_FPS = 24
# TODO: create fucntion that separates audio data
# and video data to create thier respective clips
class Clip():
'''
top level class for audio and video clips
'''
def __init__(self):
# self.__duration = None
self.media = None
# def get_duration(self):
# return self.__duration
class FuriosaVideoClip(Clip):
'''
class of furiosa video clip object
can be created from an imported path OR from a
moviepy VideoClip object
'''
def __init__(self, path=None, clip=None, start=0, end=None):
super(FuriosaVideoClip, self).__init__()
self.path_extension = None
self.__duration = end
self.end = 0
self.start = 0
self.clip_name = None
self.video_clip = None
self.__original = None
self.create_video_clip(path=path, clip=clip, start=start, end=end)
def create_video_clip(self, path=None, clip=None, start=0, end=None):
'''
top-level method to instantiate this object's attributes
'''
if path:
self.video_clip_from_path(path)
elif clip:
self.video_clip_from_clip(clip, start, end)
else:
print('ERROR: Must specify a path or reference to moviepy VideoClip')
self.__set_duration()
def video_clip_from_path(self, path):
'''
create a video clip out of the file path specified
Parameters
----------
path:
String of path to file with supported extension
'''
self.path_extension = path.split(".")[-1]
if self.path_extension in ['jpg', 'png', 'jpeg']:
self.video_clip = self.__original = ImageClip(path, fps=PROJECT_FPS, duration=self.__duration)
elif self.path_extension in ['mp4', 'mov', 'avi', 'gif']:
self.video_clip = self.__original = VideoFileClip(path)
else:
print('ERROR: File Specified could not be found or the extension \
is not currently supported.')
self.end = self.video_clip.duration
def video_clip_from_clip(self, clip, start, end):
'''
create a video clip out of a reference to moviepy VideoClip
Parameters
----------
clip:
moviepy VideoFileClip object
start:
time (float) in reference to clip from which to start new clip
end:
time (float) in refernce to clip at which to end new clip
'''
self.__original = clip
self.video_clip = clip.subclip(start, end)
self.start = start
self.end = end
def trim(self, trim_from, time):
'''
method to cut the ends of a clip
Parameters
----------
trim_from:
the end from which the trimming will occur, but it must be
"front" or "back"
time: time stamp of when video will now start/end
Return
------
the updated copy of the new clip
'''
# TODO: trim out of bounds (front trim exceeds end of clip)
# I would like for this check to be outside of this method
if trim_from == "front":
self.start = time
self.video_clip = self.video_clip.subclip(t_start=time)
elif trim_from == "back":
self.end = time
self.video_clip = self.video_clip.subclip(t_end=time)
self.__set_duration()
return self.video_clip # I may not need to return
# TODO: this method may be better outside of the current clip
def split(self, split_point=None):
'''
method to split current video clip into 2 video clips,
but preserves the original clip in both. This method
will make the current clip the first subclip and will
return a reference to teh new second subclip
Parameters
----------
split_point: defaults to the midpoint, but will accept
a floating point number specifying time at which
to split
Return
------
tuple of references to self and new clip
'''
if not split_point:
split_point = self.end / 2.0
previous_end = self.end
self.trim("back", split_point) # subclip is self.video_clip
# create a reference to a new FuriosaVideoClip object
subclip2 = FuriosaVideoClip(clip=self.__original, start=split_point, end=previous_end)
self.__set_duration()
return self, subclip2
# TODO: iterable effects, though this is not a recommended method
def apply_filter(self):
all_frames = [frame for frame in self.video_clip.iter_frames()]
# TODO: Transfer VideoClip object methods to moviepy objects and methods
def extract_frames(self, path):
video = cv2.VideoCapture(path)
height, width = self.get_dimensions(path)
num_frames = self.count_frames(path)
# create numpy array for all the frames (convert video to array)
video = self.numpy_video(path, num_frames, height, width)
return video
def numpy_video(self, path, num_frames, height, width, num_channels=3):
# TODO: address failure on high resolution videos of long duration
video = cv2.VideoCapture(path)
video_array = np.empty((num_frames, height, width, num_channels), np.dtype('uint8'))
frame_idx = 0
while video.isOpened():
ret, frame = video.read()
if ret:
video_array[frame_idx] = frame
else:
print("1 ERROR: Error reading video", frame_idx)
break
frame_idx += 1
video.release()
return video_array
def get_dimensions(self, path):
video = cv2.VideoCapture(path)
w, h = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
video.release()
return h, w
def count_frames(self, path):
video = cv2.VideoCapture(path)
num_frames = 0
try:
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
except:
num_frames = self.manual_count_frames(video)
video.release()
return num_frames
def manual_count_frames(self, video):
num_frames = 0
while video.isOpened():
ret, frame = video.read()
if not ret:
print("2 ERROR: Error reading video.")
num_frames += 1
return num_frames
def reset(self):
self.video = self.__original
# TODO: getters
def get_original(self):
return self.__original
def get_duration(self):
return self.__duration
# TODO: private methods
def __set_duration(self):
self.__duration = self.end - self.start
class AudioClip(Clip):
def __init__(self):
super(AudioClip, self).__init__()
self.audio_name = None
# TODO: User moviepy to get audio array from video
```
#### File: furiosa/video-editor-cached/trial_painter.py
```python
import cv2
from PyQt5.QtWidgets import QMainWindow, QLabel, QPushButton, QSizePolicy, QApplication, QWidget
from PyQt5.QtGui import QPainter, QPen, QPixmap, QImage
from PyQt5.QtCore import QCoreApplication, QRectF, Qt, QRect, QMetaObject
import numpy as np
import sys
import time
from moviepy.editor import VideoFileClip
class Demo(QWidget):
def __init__(self):
super().__init__()
self.video = VideoFileClip(r'C:\Users\jklew\Videos\Music\Fractalia.MP4')
im_np = self.video.get_frame(0)
self.image = QImage(im_np, im_np.shape[1], im_np.shape[0],
QImage.Format_RGB888)
def paintEvent(self, event):
pen = QPen()
pen.setWidth(5)
painter = QPainter(self)
painter.drawImage(self.rect(), self.image)
painter.setPen(pen)
painter.drawEllipse(300, 300, 500, 500)
def display_clip(self, fps=60, audio=False, audio_fps=22050, audio_buffersize=3000,
audio_nbytes=2):
"""
Displays the clip in a window, at the given frames per second
(of movie) rate. It will avoid that the clip be played faster
than normal, but it cannot avoid the clip to be played slower
than normal if the computations are complex. In this case, try
reducing the ``fps``.
Parameters
------------
fps
Number of frames per seconds in the displayed video.
audio
``True`` (default) if you want the clip's audio be played during
the preview.
audio_fps
The frames per second to use when generating the audio sound.
fullscreen
``True`` if you want the preview to be displayed fullscreen.
"""
# compute and splash the first image
# TODO: change pgame to a widget in Qt
# screen = pg.display.set_mode(clip.size, flags)
audio = audio and (clip.audio is not None)
if audio:
# the sound will be played in parrallel. We are not
# paralellizing it on different CPUs because it seems that
# pygame and openCV already use several cpus it seems.
# two synchro-flags to tell whether audio and video are ready
videoFlag = threading.Event()
audioFlag = threading.Event()
# launch the thread
audiothread = threading.Thread(target=clip.audio.preview,
args=(audio_fps,
audio_buffersize,
audio_nbytes,
audioFlag, videoFlag))
audiothread.start()
clip = self.video
img = clip.get_frame(0)
self.imdisplay(img)
if audio: # synchronize with audio
videoFlag.set() # say to the audio: video is ready
audioFlag.wait() # wait for the audio to be ready
result = []
t0 = time.time()
for t in np.arange(1.0 / fps, clip.duration-.001, 1.0 / fps):
img = clip.get_frame(t)
print(img.shape)
t1 = time.time()
time.sleep(max(0, t - (t1-t0))) # loop at framerate specified
self.imdisplay(img) #, screen)
def imdisplay(self, img_array):
# fill the widget with the image array
# TODO: Qt widget
self.image = QImage(img_array, img_array.shape[1], img_array.shape[0], QImage.Format_RGB888)
self.repaint()
def main():
app = QApplication(sys.argv)
demo = Demo()
demo.show()
demo.display_clip()
sys.exit(app.exec_())
main()
``` |
{
"source": "jklewis99/hypertriviation",
"score": 3
} |
#### File: backend/api/models.py
```python
from django.db import models
import string
import random
import uuid
import math
from authentication.models import HypertriviationUser
def generate_unique_code(length=8):
while True:
code = ''.join(random.choices(string.ascii_uppercase, k=length))
if FixationSession.objects.filter(code=code).count() == 0:
break
return code
class FixationCategory(models.TextChoices):
OTHER = "Other"
MUSIC = "Music"
class TimeLimitOptions(models.IntegerChoices):
FAST = 30
MODERATE = 60
SLOW = 120
UNLIMITED = 1e6
class Room(models.Model):
code = models.CharField(
max_length=8, default=generate_unique_code, unique=True)
host = models.CharField(max_length=50, unique=True)
guest_can_pause = models.BooleanField(null=False, default=False)
votes_to_skip = models.IntegerField(null=False, default=1)
created_at = models.DateTimeField(auto_now_add=True)
current_song = models.CharField(max_length=50, null=True)
def __str__(self):
return self.code
class User(models.Model):
"""
Stores a single user entry, containing publicly accessible information
"""
username = models.CharField(max_length=32, unique=True)
first_name = models.CharField(max_length=32)
last_name = models.CharField(max_length=32)
spotify_authenticated_ind = models.BooleanField(null=False, default=False)
email = models.CharField(max_length=64)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.username
class Fixation(models.Model):
"""
Stores a single fixation entry, which defines the trivia session
"""
created_by = models.ForeignKey(HypertriviationUser, on_delete=models.CASCADE, null=False)
fixation_title = models.CharField(max_length=50, unique=False)
category = models.TextField(choices=FixationCategory.choices, default=FixationCategory.OTHER)
description = models.CharField(max_length=240, null=True)
img_url = models.CharField(max_length=1000, null=True) # change this
keep_shuffled = models.BooleanField(default=True)
spotify_playlist_id = models.CharField(max_length=128, null=True)
spotify_random_start_ind = models.BooleanField(default=True)
default_duration = models.IntegerField(null=False, default=10)
question_count = models.IntegerField(null=False, default=0)
rating = models.FloatField(null=False, default=0)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# TODO: add description
def __str__(self):
return self.fixation_title
class FixationQuestion(models.Model):
"""
Stores a single fixation question entry, which defines the fixation question
"""
fixation = models.ForeignKey(Fixation, null=True, on_delete=models.CASCADE)
question_idx = models.IntegerField(null=False)
question_txt = models.CharField(max_length=512, null=False)
multiple_choice_ind = models.BooleanField(default=True)
img_url = models.CharField(max_length=512, null=True)
video_playback_url = models.CharField(max_length=512, null=True)
created_by = models.ForeignKey(HypertriviationUser, on_delete=models.CASCADE)
question_category = models.CharField(max_length=128, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.question_txt
class FixationAnswer(models.Model):
"""
Stores a single fixation answer entry, which defines a possible fixation answer
"""
question = models.ForeignKey(FixationQuestion, related_name='answers', null=True, on_delete=models.CASCADE)
answer_txt = models.CharField(max_length=512, null=False)
correct_answer_ind = models.BooleanField(default=True)
created_by = models.ForeignKey(HypertriviationUser, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.answer_txt
class FixationSession(models.Model):
"""
Stores a single fixation session entry, which contains the real time metadata for a trivia session
"""
code = models.CharField(
max_length=8, default=generate_unique_code, unique=True)
host = models.CharField(max_length=64, unique=True)
hosted_by = models.ForeignKey(HypertriviationUser, null=False, on_delete=models.CASCADE)
fixation = models.ForeignKey(Fixation, null=True, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
current_song = models.CharField(max_length=64, null=True)
# TODO: add settings (time limit, hints, etc.)
def __str__(self):
return self.code
class FixationSessionPlayer(models.Model):
"""
Stores a single fixation session user entry, designated by the code of FixationSession
"""
player_session_id = models.UUIDField(primary_key=True, default=uuid.uuid4)
fixation_session = models.ForeignKey(FixationSession, null=False, on_delete=models.CASCADE)
display_name = models.CharField(max_length=16)
active_ind = models.BooleanField(null=False, default=True)
added_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.display_name + ": " + self.fixation_session.code
def get_self(self):
return {
"player_session_id": str(self.player_session_id),
"display_name": self.display_name,
"fixation_session": self.fixation_session.code
}
class FixationSessionSettings(models.Model):
"""
Stores a fixation session settings entry, designated by the code of FixationSession
"""
fixation_session = models.ForeignKey(FixationSession, null=False, on_delete=models.CASCADE)
show_hints_ind = models.BooleanField(null=False, default=True)
multiple_choice_ind = models.BooleanField(null=False, default=True)
random_shuffle_ind = models.BooleanField(null=False, default=True)
stop_on_answer_ind = models.BooleanField(null=False, default=False)
spotify_random_start_ind = models.BooleanField(null=True, default=False)
time_limit = models.IntegerField(choices=TimeLimitOptions.choices, default=TimeLimitOptions.UNLIMITED)
active_ind = models.BooleanField(null=False, default=True)
created_ts = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.fixation_session.code
```
#### File: backend/api/tests.py
```python
from django.test import TestCase
from django.conf import settings
# Create your tests here.
# test get user token for websockets
class SettingsTestCase(TestCase):
def test_generate_websockets_token(self):
from django.contrib.auth import get_user_model
User = get_user_model()
user = User.objects.create_user('jklew', '<EMAIL>', 'richardnixon')
user = User.objects.get(username="jklew")
from sesame.utils import get_token
token = get_token(user)
self.assertIs(token, "this");
```
#### File: backend/authentication/models.py
```python
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class HypertriviationUser(AbstractUser):
"""
Stores a single user entry
"""
# username = models.CharField(max_length=32, unique=True)
first_name = models.CharField(max_length=32)
last_name = models.CharField(max_length=32)
spotify_authenticated_ind = models.BooleanField(null=False, default=False)
# email = models.CharField(max_length=64)
# created_at = models.DateTimeField(auto_now_add=True)
# updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.username + " " + self.last_name
``` |
{
"source": "jklewis99/magical-movie-poster-processing",
"score": 3
} |
#### File: jklewis99/magical-movie-poster-processing/generate_train_test.py
```python
import pandas as pd
from sklearn.model_selection import train_test_split
def main():
metadata = pd.read_csv("data/movies-metadata-cleaned.csv").drop(
columns=['Language', 'Poster', 'Country', 'Director', 'Released', 'Writer', 'Genre', 'Actors'])
ratings = pd.get_dummies(metadata['Rated'], prefix='rated') # one hot encode "Rated" column
metadata = metadata.drop(columns=["Rated"]).join(ratings) # replace "Rated" with one_hot
metadata = metadata.dropna() # drop the missing box_office values
posters = pd.read_csv("data/posters-and-genres.csv").drop(columns=["Genre"]).rename(columns={"Id": "imdbID"})
data = metadata.merge(posters, on='imdbID').drop_duplicates() # add genres
data = data[((data['Short'] != 1) & ( data['N/A'] != 1))]
data = data.drop(columns=['Reality-TV', 'Short', 'N/A'])
cols = data.columns.tolist()
cols = cols[1:2] + cols[5:6] + cols[2:5] + cols[6:] + cols[0:1]
data = data[cols] # reorder columns
train, test = train_test_split(data, test_size=0.2) # generate train and test data
train.to_csv("data/train_data.csv", index=False)
test.to_csv("data/test_data.csv", index=False)
if __name__ == "__main__":
main()
```
#### File: magical-movie-poster-processing/utils/data_read.py
```python
import os
import numpy as np
import pandas as pd
from utils.read_images import read_images
from utils.misc import get_genres
from sklearn.model_selection import train_test_split
def split_data(data="data/posters-and-genres.csv", img_shape=(299, 299)):
'''
read data from the csv file containing the id of movies and the labels
Keyword Arguments
==========
data:
location of poster dataset
img_shape:
(299, 299) for input size to XceptionNet
Return
==========
tuple of numpy arrays: (x_train, x_test, y_train, y_test)
'''
imgs, genres = load_data(data, img_shape=img_shape)
# call the sklearn train_test_split method
x_train, x_test, y_train, y_test = train_test_split(imgs, genres, test_size=0.2, random_state=3520)
return x_train, x_test, y_train, y_test
def load_train_test(training_data='data/train_data.csv', testing_data='data/test_data.csv', img_shape=(299,299)):
'''
top level method for getting the training and test data for the CNN models
Parameters
==========
`training_data`:
path to csv file containing training data
`testing_data`:
path to csv file containing training data
Return
==========
x_train, y_train, x_test, y_test
'''
x_train, y_train, _, genres = load_data(training_data, img_shape=img_shape)
x_test, y_test, _, _ = load_data(testing_data, img_shape=img_shape)
return x_train, y_train, x_test, y_test, genres
def load_data(data='data/test_data.csv', posters_csv="data/posters-and-genres.csv", img_shape=(299,299)):
'''
load and read data for testing or training or other tasks associated with CNNs, where the
data is specified by the `data` parameter. Method merges `data` with `posters_csv` and
extracts the matching ImdbIDs, then reads the images and saves them into a numpy array.
Parameters
==========
`data`:
any csv containing column imdbID, and whose imdbIDs are in `posters_csv`
`posters_csv`:
csv with data on poster image id and encoded genres
Return
==========
(imgs, labels, img_ids, genres)
numpy array of images, numpy array of labels per image, numpy array of image ids, and a list
of the column names of the labels
'''
data_ids = pd.read_csv(data)['imdbID'].values
ids_and_genres = pd.read_csv(posters_csv).drop(columns=['Genre'])
ids_and_genres = ids_and_genres.loc[ids_and_genres['Id'].isin(data_ids)]
img_ids = ids_and_genres['Id'].values # isolate the ids
labels = ids_and_genres.loc[:, ids_and_genres.columns != 'Id'].values # isolate the genre labels
print("\nLoading images........")
# read in all the images into an array, return number of indices used (used to combat memory error)
imgs, subset_size = read_images(img_ids, dimensions=img_shape)
print("DONE!\n")
# if there was a memory error, update the labels as were updated within read_images functions
labels = labels[:subset_size]
# genres = ids_and_genres.columns[-labels.shape[1]:]
genres = get_genres()
return imgs, labels, img_ids, genres
``` |
{
"source": "jklewis99/MultimodalDeepfakeDetection",
"score": 2
} |
#### File: jklewis99/MultimodalDeepfakeDetection/fouriernet_train.py
```python
from tqdm import tqdm
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
# from Utils.errors import *
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.optim as optim
from torch import nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter('runs/fouriernet-val-e-5-lstm-4')
# %%
labelmap = {'real': 0, 'fake': 1}
# %%
dct_path = '/home/itdfh/data/dfdc-subset/train-6-dct-all'
spc_path = '/home/itdfh/data/dfdc-subset/train-6-spectrograms'
# ## Load data
# ### Listing files
# In[2]:
def tensor_file_lists(dct_path, spc_path, max_files=None, perc=.9):
dct_files_train, spc_files_train = [], []
dct_files_val, spc_files_val = [], []
for label in ['real', 'fake']:
train_files = []
val_files = []
all_files = os.listdir(os.path.join(dct_path, label))
for i, p in enumerate(all_files):
base_dir = os.path.join(label, p)
full_base_dir = os.path.join(dct_path, base_dir)
if i < len(all_files) * .9:
train_files.extend([os.path.join(base_dir, p)
for p in os.listdir(full_base_dir)])
else:
val_files.extend([os.path.join(base_dir, p)
for p in os.listdir(full_base_dir)])
dct_files_train.extend([(os.path.join(dct_path, p[:-6]+'30.npy'), labelmap[label])
for p in train_files if p[-6:] == '30.npy'])
spc_files_train.extend([(os.path.join(spc_path, p[:-6]+'30.pt'), labelmap[label])
for p in train_files if p[-6:] == '30.npy'])
dct_files_val.extend([(os.path.join(dct_path, p[:-6]+'30.npy'), labelmap[label])
for p in val_files if p[-6:] == '30.npy'])
spc_files_val.extend([(os.path.join(spc_path, p[:-6]+'30.pt'), labelmap[label])
for p in val_files if p[-6:] == '30.npy'])
return dct_files_train, spc_files_train, dct_files_val, spc_files_val
# %%
dct_files_train, spc_files_train, dct_files_val, spc_files_val = tensor_file_lists(
dct_path, spc_path)
# In[3]:
spc_files_train[0], dct_files_train[0]
# ### Checking match
# ### Keeping matches
# In[6]:
clean_spc_files_train = [spc_files_train[i] for i, (f, label) in enumerate(
spc_files_train) if os.path.exists(f)]
clean_dct_files_train = [dct_files_train[i] for i, (f, label) in enumerate(
spc_files_train) if os.path.exists(f)]
spc_files_train = clean_spc_files_train
dct_files_train = clean_dct_files_train
spc_files_train_clean = []
dct_files_train_clean = []
for f1, f2 in zip(dct_files_train, spc_files_train):
if not torch.isnan(torch.load(f2[0]).sum()) and not np.isnan(np.load(f1[0]).sum()):
dct_files_train_clean.append(f1)
spc_files_train_clean.append(f2)
dct_files_train = dct_files_train_clean
spc_files_train = spc_files_train_clean
clean_spc_files_val = [spc_files_val[i] for i, (f, label) in enumerate(
spc_files_val) if os.path.exists(f)]
clean_dct_files_val = [dct_files_val[i] for i, (f, label) in enumerate(
spc_files_val) if os.path.exists(f)]
spc_files_val = clean_spc_files_val
dct_files_val = clean_dct_files_val
spc_files_val_clean = []
dct_files_val_clean = []
for f1, f2 in zip(dct_files_val, spc_files_val):
if not torch.isnan(torch.load(f2[0]).sum()) and not np.isnan(np.load(f1[0]).sum()):
dct_files_val_clean.append(f1)
spc_files_val_clean.append(f2)
dct_files_val = dct_files_val_clean
spc_files_val = spc_files_val_clean
# ### `FourierDataset`
# In[8]:
class FourierDataset(Dataset):
'''
LipSpeech data set for concatenating lntionNet Features and dstrogram features
'''
def __init__(self, dct_files, spc_files, max_spc_size=700):
"""
Args:
DeepSpeech (string): Path to the csv file with annotations.
LipNet (string): Directory with all the images.
"""
self.max_spc_size = max_spc_size
self.dct_files, self.spc_files = dct_files, spc_files
def __len__(self):
return len(self.dct_files)
def __getitem__(self, idx):
dctf, label = self.dct_files[idx]
spcf, label = self.spc_files[idx]
dct_feats = np.load(dctf)
dct_feats = torch.tensor(dct_feats)
specs = torch.load(spcf, map_location=torch.device('cpu'))[
:, :self.max_spc_size]
spc_feats = torch.zeros((specs.shape[0], self.max_spc_size))
spc_feats[:, :specs.shape[-1]] = specs
fourier_feats = torch.cat([dct_feats.float(), spc_feats], dim=1)
label = torch.tensor(label).long()
return fourier_feats, label
# %%
trainset = FourierDataset(dct_files_train, spc_files_train)
valset = FourierDataset(dct_files_val, spc_files_val)
# In[12]:
class FourierNet(nn.Module):
def __init__(self, feature_size, num_layers=2, num_hidden_nodes=512, device='cuda'):
super(FourierNet, self).__init__()
self.device = device
self.num_layers = num_layers
self.num_hidden_nodes = num_hidden_nodes
# input dim is 167, output 200
self.lstm = nn.LSTM(feature_size, num_hidden_nodes,
batch_first=True, num_layers=num_layers)
# fully connected
self.fc1 = nn.Linear(num_hidden_nodes, num_hidden_nodes)
self.act = nn.ReLU()
self.fc2 = nn.Linear(num_hidden_nodes, 2)
self.softmax = nn.Softmax()
def forward(self, x, hidden):
print(x.device, hidden[0].device)
y, hidden = self.lstm(x, hidden) # returns the two outputs
y = y[:, -1, :] # get only the last output
y = self.fc1(y)
y = self.act(y)
y = self.fc2(y)
y = F.softmax(y, dim=1)
return y, hidden
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
hidden = (weight.new(self.num_layers, batch_size, self.num_hidden_nodes).zero_().to(self.device),
weight.new(self.num_layers, batch_size, self.num_hidden_nodes).zero_().to(self.device))
return hidden
# In[15]:
model = FourierNet(1465, num_layers=4)
# In[17]:
def train(model, trainset, loss_function, optimizer, valset=None, epochs=1000, batch_size=50, device='cuda'):
global writer
# epsilon = 1e-6
model = model.to(device)
trainloader = DataLoader(trainset, shuffle=True,
batch_size=batch_size, drop_last=True)
if valset is not None:
valloader = DataLoader(valset, shuffle=True,
batch_size=batch_size, drop_last=True)
hidden = model.init_hidden(batch_size)
for h in hidden:
h = h.to(device)
print_every = 100
i = 0
losses = []
accs = []
vaccs = []
vlosses = []
running_loss = 0.0
running_acc = 0.0
# again, normally you would NOT do 100 epochs, it is toy data
for epoch in range(epochs):
for inp, labels in trainloader: # renamed sequence to inp because inp is a batch of sequences
optimizer.zero_grad()
# Step 1. Remember that Pytorch accumulates gradients.
# We need to clear them out before each instance
model.zero_grad()
inp = inp.float().to(device)
labels = labels.to(device)
# Step 2. Run our forward pass.
tag_scores, h = model(inp, hidden)
# tag_scores = tag_scores.add(epsilon)
# Step 3. Compute the loss, gradients, and update the parameters by
# calling optimizer.step()
loss = loss_function(tag_scores, labels)
# torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
loss.backward()
optimizer.step()
running_acc += torch.mean((tag_scores.argmax(dim=1)
== labels).float()).item()
# print statistics
running_loss += loss.item()
if i % print_every == print_every-1:
print('[%d, %5d] loss: %.3f - acc: %.3f' %
(epoch + 1, i + 1, running_loss / print_every, running_acc * 100 / print_every))
writer.add_scalar('train/loss', running_loss / print_every, i)
writer.add_scalar('train/acc', running_acc *
100 / print_every, i)
losses.append(running_loss / print_every)
accs.append(running_acc * 100 / print_every)
running_loss = 0.0
running_acc = 0.0
i += 1
if valset is not None:
with torch.no_grad():
val_accs, val_losses = [], []
for inp, labels in valloader:
inp = inp.float().to(device)
labels = labels.to(device)
tag_scores, h = model(inp, hidden)
loss = loss_function(tag_scores, labels)
val_accs.append(torch.mean((tag_scores.argmax(dim=1)
== labels).float()).item())
val_losses.append(loss)
val_accs = torch.mean(torch.tensor(val_accs))
val_losses = torch.mean(torch.tensor(val_losses))
writer.add_scalar('val/acc', val_accs * 100, epoch)
writer.add_scalar('val/loss', val_losses, epoch)
vaccs.append(val_accs)
vlosses.append(val_losses)
return losses, accs, vlosses, vaccs
# In[18]:
model = model.cuda()
loss_function = nn.NLLLoss().cuda()
optimizer = optim.Adam(model.parameters(), lr=1e-5)
losses, accs, vlosses, vaccs = train(model, trainset, loss_function,
optimizer, valset=valset, epochs=1000, batch_size=64)
# In[ ]:
```
#### File: jklewis99/MultimodalDeepfakeDetection/frimage_train.py
```python
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
# from Utils.errors import *
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.optim as optim
from torch import nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter('runs/frimage-2')
# %%
labelmap = {'real': 0, 'fake': 1}
# %%
spec_path = '/home/itdfh/data/dfdc-subset/train-6-spectrograms'
xcep_path = '/home/itdfh/data/dfdc-subset/train-6-xception'
# %%
def tensor_file_lists(spec_path, xcep_path, max_files=None, perc=.9):
spec_files_train, xcep_files_train = [], []
spec_files_val, xcep_files_val = [], []
for label in ['real', 'fake']:
train_files = []
val_files = []
all_files = os.listdir(os.path.join(spec_path, label))
for i, p in enumerate(all_files):
base_dir = os.path.join(label, p)
full_base_dir = os.path.join(spec_path, base_dir)
if i < len(all_files) * .9:
train_files.extend([os.path.join(base_dir, p)
for p in os.listdir(full_base_dir)])
else:
val_files.extend([os.path.join(base_dir, p)
for p in os.listdir(full_base_dir)])
spec_files_train.extend([(os.path.join(spec_path, p), labelmap[label])
for p in train_files if p[-5:] == '30.pt'])
xcep_files_train.extend([(os.path.join(xcep_path, p), labelmap[label])
for p in train_files if p[-5:] == '30.pt'])
spec_files_val.extend([(os.path.join(spec_path, p), labelmap[label])
for p in val_files if p[-5:] == '30.pt'])
xcep_files_val.extend([(os.path.join(xcep_path, p), labelmap[label])
for p in val_files if p[-5:] == '30.pt'])
return spec_files_train, xcep_files_train, spec_files_val, xcep_files_val
# %% the cleanage
spec_files_train, xcep_files_train, spec_files_val, xcep_files_val = tensor_file_lists(
spec_path, xcep_path)
spec_files_train_clean = []
xcep_files_train_clean = []
for f1, f2 in zip(xcep_files_train, spec_files_train):
if not torch.isnan(torch.load(f2[0], map_location=torch.device('cpu')).sum()) and not torch.isnan(torch.load(f1[0], map_location=torch.device('cpu')).sum()):
xcep_files_train_clean.append(f1)
spec_files_train_clean.append(f2)
xcep_files_train = xcep_files_train_clean
spec_files_train = spec_files_train_clean
clean_spec_files_val = [spec_files_val[i] for i, (f, label) in enumerate(
spec_files_val) if os.path.exists(f)]
clean_xcep_files_val = [xcep_files_val[i] for i, (f, label) in enumerate(
spec_files_val) if os.path.exists(f)]
spec_files_val = clean_spec_files_val
xcep_files_val = clean_xcep_files_val
spec_files_val_clean = []
xcep_files_val_clean = []
for f1, f2 in zip(xcep_files_val, spec_files_val):
if not torch.isnan(torch.load(f2[0], map_location=torch.device('cpu')).sum()) and not torch.isnan(torch.load(f1[0], map_location=torch.device('cpu')).sum()):
xcep_files_val_clean.append(f1)
spec_files_val_clean.append(f2)
xcep_files_val = xcep_files_val_clean
spec_files_val = spec_files_val_clean
# %%
class FrimagenetDataset(Dataset):
'''
FrimageNet data set for concatenating XceptionNet Features and Spectrogram features
'''
def __init__(self, spec_files, xcep_files, seq_size=30, max_spec_size=700):
"""
Args:
spectrogram_folder (string): Path to the csv file with annotations.
xception_features_folder (string): Directory with all the images.
"""
self.max_spec_size = max_spec_size
self.seq_size = seq_size
self.spec_files, self.xcep_files = spec_files, xcep_files
def __len__(self):
return len(self.spec_files)
def __getitem__(self, idx):
sf, label = self.spec_files[idx]
xf, label = self.xcep_files[idx]
# loading spec_feats with 0 padding
spec_feats = torch.zeros((self.seq_size, self.max_spec_size))
specs = torch.load(sf, map_location=torch.device('cpu'))[
:, :self.max_spec_size]
spec_feats[:, :specs.shape[-1]] = specs
xcep_feats = torch.load(xf, map_location=torch.device('cpu'))
x = torch.cat((xcep_feats, spec_feats), dim=-1)
label = torch.tensor(label).long()
return x, label
# %%
trainset = FrimagenetDataset(
spec_files_train, xcep_files_train)
valset = FrimagenetDataset(spec_files_val, xcep_files_val)
class FrimageNet(nn.Module):
def __init__(self, feature_size, num_layers=2, num_hidden_nodes=1024, device='cuda'):
super(FrimageNet, self).__init__()
self.device = device
self.num_layers = num_layers
self.num_hidden_nodes = num_hidden_nodes
# input dim is 167, output 200
self.lstm = nn.LSTM(feature_size, num_hidden_nodes,
batch_first=True, num_layers=num_layers)
# fully connected
self.fc1 = nn.Linear(num_hidden_nodes, num_hidden_nodes)
self.act = nn.ReLU()
self.fc2 = nn.Linear(num_hidden_nodes, 2)
self.softmax = nn.Softmax()
def forward(self, x, hidden):
y, hidden = self.lstm(x, hidden)
y = y[:, -1, :]
y = self.fc1(y)
y = self.act(y)
y = self.fc2(y)
y = F.log_softmax(y, dim=1)
return y, hidden
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
hidden = (weight.new(self.num_layers, batch_size, self.num_hidden_nodes).zero_().to(self.device),
weight.new(self.num_layers, batch_size, self.num_hidden_nodes).zero_().to(self.device))
return hidden
model = FrimageNet(2748)
def train(model, trainset, loss_function, optimizer, valset=None, epochs=1000, batch_size=50, device='cuda'):
global writer
epsilon = 1e-6
model = model.to(device)
trainloader = DataLoader(trainset, shuffle=True,
batch_size=batch_size, drop_last=True)
if valset is not None:
valloader = DataLoader(valset, shuffle=True,
batch_size=batch_size, drop_last=True)
hidden = model.init_hidden(batch_size)
for h in hidden:
h = h.to(device)
print_every = 5
i = 0
losses = []
accs = []
vaccs = []
vlosses = []
running_loss = 0.0
running_acc = 0.0
# again, normally you would NOT do 100 epochs, it is toy data
for epoch in range(epochs):
for inp, labels in trainloader: # renamed sequence to inp because inp is a batch of sequences
optimizer.zero_grad()
# Step 1. Remember that Pytorch accumulates gradients.
# We need to clear them out before each instance
model.zero_grad()
inp = inp.float().to(device)
labels = labels.to(device)
# Step 2. Run our forward pass.
tag_scores, h = model(inp, hidden)
tag_scores = tag_scores.add(epsilon)
# Step 3. Compute the loss, gradients, and update the parameters by
# calling optimizer.step()
loss = loss_function(tag_scores, labels)
# torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
loss.backward()
optimizer.step()
running_acc += torch.mean((tag_scores.argmax(dim=1)
== labels).float()).item()
# print statistics
running_loss += loss.item()
if i % print_every == print_every-1:
print('[%d, %5d] loss: %.3f - acc: %.3f' %
(epoch + 1, i + 1, running_loss / print_every, running_acc * 100 / print_every))
writer.add_scalar('train/loss', running_loss / print_every, i)
writer.add_scalar('train/acc', running_acc *
100 / print_every, i)
losses.append(running_loss / print_every)
accs.append(running_acc * 100 / print_every)
running_loss = 0.0
running_acc = 0.0
i += 1
if valset is not None:
with torch.no_grad():
val_accs, val_losses = [], []
for inp, labels in valloader:
inp = inp.float().to(device)
labels = labels.to(device)
tag_scores, h = model(inp, hidden)
loss = loss_function(tag_scores, labels)
val_accs.append(torch.mean((tag_scores.argmax(dim=1)
== labels).float()).item())
val_losses.append(loss)
val_accs = torch.mean(torch.tensor(val_accs))
val_losses = torch.mean(torch.tensor(val_losses))
writer.add_scalar('val/loss', val_accs * 100, epoch)
writer.add_scalar('val/acc', val_losses, epoch)
vaccs.append(val_accs)
vlosses.append(val_losses)
return losses, accs, vlosses, vaccs
loss_function = nn.NLLLoss().cuda()
optimizer = optim.Adam(model.parameters(), lr=1e-5)
losses, accs, vlosses, vaccs = train(model, trainset, loss_function,
optimizer, epochs=1000, batch_size=200, valset=valset)
``` |
{
"source": "jklf5/Spider",
"score": 3
} |
#### File: Spider/Corporate_Financial_Reporting_for_Shanghai_and_Shenzhen_A_Shares/test2_small_module.py
```python
import requests
from pyquery import PyQuery as pq
import os
work_cwd = os.path.abspath('..')
path_stock_info = work_cwd + \
r'/Corporate_Financial_Reporting_for_Shanghai_and_Shenzhen_A_Shares/Stock_info'
def f(file_name):
stock_num_list = list()
stock_name_list = list()
stock_info_list = list()
f_stock_info = open(
path_stock_info + '/' + file_name + '.txt', 'r') # 打开一个文件,用于只读
stock_info_list = f_stock_info.readlines()
f_stock_info.close()
# stock_info = ['000631:顺发恒业\n', '600485:*ST信威\n', '002259:*ST升达\n'] #测试用
for item in stock_info_list:
# print(item)
item = item[:-1] # 切去最后的\n
num = item[:6] # 分割成编号
name = item[7:] # 分割成名称
# 股票名称中存在'*',将*替换为^,否则在创建文件时候会出错
flag = name.find('*')
if flag is not -1:
name = name.replace('*', '^')
stock_num_list.append(num)
stock_name_list.append(name)
return stock_info_list, stock_num_list, stock_name_list
if __name__ == "__main__":
# 将股票的编号和名称提取出来存为两个列表
# stock_num_list = list()
# stock_name_list = list()
# stock_info_list = list()
stock_info_list, stock_num_list, stock_name_list = f('stock_info_test')
stock_info_unfinished_list = f('stock_info_unfinished')
# print(stock_info_list, stock_num_list, stock_name_list)
print(stock_info_unfinished_list[0])
if len(stock_info_unfinished_list[0]) is not 0:
index = stock_info_list.index(stock_info_unfinished_list[0][1])
else:
index = 1
# stock_temp_for_unfinished = stock_info_list[index-1:]
for stock_index in range(index-1, len(stock_num_list)-3):
print(stock_name_list[stock_index])
stock_temp_for_unfinished = stock_info_list[stock_index+1:]
with open(path_stock_info + '/test.txt', 'w') as f:
for each in stock_temp_for_unfinished:
f.write(each)
f.close()
```
#### File: ScrapySpider/spiders/itcast.py
```python
import scrapy
from ScrapySpider.items import ScrapyspiderItem
class ItcastSpider(scrapy.Spider):
name = 'itcast'
allowed_domains = ['www.itcast.cn']
start_urls = ['http://www.itcast.cn/channel/teacher.shtml']
def parse(self, response):
node_list=response.xpath("//div[@class='li_txt']")
#用来存储所有items字段
for node in node_list:
#创建items对象,用来存储信息
item=ScrapyspiderItem()
name=node.xpath("./h3/text()").extract()
title=node.xpath("./h4/text()").extract()
info=node.xpath("./p/text()").extract()
item['name']=name[0]
item['title']=title[0]
item['info']=info[0]
yield item
``` |
{
"source": "JKlingPhotos/OpenGoPro",
"score": 2
} |
#### File: python/sdk_wireless_camera_control/noxfile.py
```python
from pathlib import Path
from typing import Any
import nox
from nox_poetry import session
nox.options.sessions = "format", "lint", "tests", "docstrings", "docs", "safety"
@session(python=["3.9"])
def format(session) -> None:
"""Run black code formatter."""
session.install("black")
session.run("black", "--check", "open_gopro", "tests", "noxfile.py", "docs/conf.py")
@session(python=["3.8", "3.9", "3.10"])
def lint(session) -> None:
"""Lint using flake8."""
session.install(".")
session.install(
"pylint",
"mypy",
"types-requests",
"construct-typing",
)
session.run("mypy", "open_gopro")
session.run("pylint", "open_gopro")
@session(python=["3.8", "3.9", "3.10"])
def tests(session) -> None:
"""Run the test suite."""
session.install(".")
session.install(
"pytest",
"pytest-cov",
"pytest-asyncio",
"pytest-mock",
"pytest-html",
"coverage[toml]",
"requests-mock",
)
session.run("pytest", "tests/unit", "--cov-fail-under=70")
@session(python=["3.9"])
def docstrings(session) -> None:
"""Validate docstrings."""
session.install("darglint")
session.run("darglint", "open_gopro")
@session(python=["3.9"])
def docs(session) -> None:
"""Build the documentation."""
session.install(".")
session.install(
"sphinx",
"sphinx-autodoc-typehints",
"sphinx-rtd-theme",
"sphinxcontrib-napoleon",
)
session.run("sphinx-build", "docs", "docs/build")
# Clean up for Jekyll consumption
session.run(
"rm", "-rf", "docs/build/.doctrees", "/docs/build/_sources", "/docs/build/_static/fonts", external=True
)
@session(python=["3.8", "3.9", "3.10"])
def safety(session) -> None:
"""Scan dependencies for insecure packages."""
session.install("safety")
session.run(
"safety",
"check",
f"--file={Path(session.virtualenv.location) / 'tmp' / 'requirements.txt'}",
"--full-report",
)
```
#### File: open_gopro/api/builders.py
```python
from __future__ import annotations
import enum
import types
import logging
from pathlib import Path
from datetime import datetime
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import (
Any,
ClassVar,
TypeVar,
Generic,
Type,
Union,
no_type_check,
Optional,
Dict,
Callable,
Tuple,
List,
Set,
)
from _collections_abc import Iterable
import wrapt
import betterproto
from construct import Int8ub, Int16ub, Struct, Adapter, GreedyBytes
from open_gopro.responses import (
BytesParser,
BytesBuilder,
BytesParserBuilder,
JsonParser,
GoProResp,
StringBuilder,
)
from open_gopro.constants import (
ActionId,
FeatureId,
BleUUID,
CmdId,
ResponseType,
SettingId,
QueryCmdId,
StatusId,
ErrorCode,
GoProUUIDs,
)
from open_gopro.communication_client import GoProBle, GoProWifi
from open_gopro.util import build_log_rx_str, build_log_tx_str, custom_betterproto_to_dict
logger = logging.getLogger(__name__)
SettingValueType = TypeVar("SettingValueType")
CommandValueType = TypeVar("CommandValueType")
####################################################### General ##############################################
@wrapt.decorator
def log_query(
wrapped: Callable, instance: Union[BleSetting, BleStatus, WifiSetting], args: Any, kwargs: Any
) -> GoProResp:
"""Log a query write
Args:
wrapped (Callable): query to log
instance (Union[BleSetting, BleStatus, WifiSetting]): status / setting that owns the write
args (Any): positional args
kwargs (Any): keyword args
Returns:
GoProResp: received response from write
"""
logger.info(build_log_tx_str(f"{wrapped.__name__} : {instance.identifier}"))
response = wrapped(*args, **kwargs)
logger.info(build_log_rx_str(response))
return response
######################################################## BLE #################################################
def build_enum_adapter(target: Type[enum.Enum]) -> Adapter:
"""Build an enum to Construct parsing and building adapter
This adapter only works on byte data of length 1
Args:
target (Type[enum.Enum]): Enum to use use for parsing / building
Returns:
Adapter: adapter to be used by Construct
"""
class EnumByteAdapter(Adapter):
"""An enum to Construct adapter"""
target: ClassVar[Type[enum.Enum]]
def _decode(self, obj: bytearray, *_: Any) -> enum.Enum:
"""Parse a bytestream of length 1 into an Enum
Args:
obj (bytearray): bytestream to parse
_ (Any): Not used
Returns:
enum.Enum: Enum value
"""
return self.target(obj)
def _encode(self, obj: Union[enum.Enum, int], *_: Any) -> int:
"""Adapt an enum for use by Construct
Args:
obj (Union[enum.Enum, int]): Enum to adapt
_ (Any): Not used
Returns:
int: int value of Enum
"""
return obj if isinstance(obj, int) else obj.value
setattr(EnumByteAdapter, "target", target)
return EnumByteAdapter(Int8ub)
status_struct = Struct("status" / build_enum_adapter(ErrorCode))
class DeprecatedAdapter(Adapter):
"""Used to return "DEPRECATED" when a deprecated setting / status is attempted to be parsed / built"""
def _decode(self, *_: Any) -> str:
"""Return "DEPRECATED" when parse() is called
Args:
_ (Any): Not used
Returns:
str: "DEPRECATED"
"""
return "DEPRECATED"
def _encode(self, *_: Any) -> str:
"""Return "DEPRECATED" when parse() is called
Args:
_ (Any): Not used
Returns:
str: "DEPRECATED"
"""
return self._decode()
class DateTimeAdapter(Adapter):
"""Translate between different date time representations"""
def _decode(self, obj: Union[List, str], *_: Any) -> datetime:
"""Translate string or list of bytes into datetime
Args:
obj (list): input
_ (Any): Not used
Raises:
TypeError: Unsupported input type
Returns:
datetime: built datetime
"""
if isinstance(obj, str):
# comes as '%14%01%02%03%09%2F'
year, *remaining = [int(x, 16) for x in obj.split("%")[1:]]
return datetime(year + 2000, *remaining) # type: ignore
if isinstance(obj, list):
# When received from BLE, it includes garbage first byte
obj = obj[-7:]
year = Int16ub.parse(bytes(obj[0:2]))
return datetime(year, *[int(x) for x in obj[2:]]) # type: ignore
raise TypeError("Type must be in (str, list)")
def _encode(self, obj: Union[datetime, str], *_: Any) -> Union[bytes, str]:
"""Translate datetime into bytes or pass through string
Args:
obj (Union[datetime, str]): Input
_ (Any): Not used
Raises:
TypeError: Unsupported input type
Returns:
Union[bytes, str]: built bytes
"""
if isinstance(obj, datetime):
year = [int(x) for x in Int16ub.build(obj.year)]
return bytes([*year, obj.month, obj.day, obj.hour, obj.minute, obj.second])
if isinstance(obj, str):
return obj
raise TypeError("Type must be in (datetime, str)")
# Ignoring because hitting this mypy bug: https://github.com/python/mypy/issues/5374
@dataclass # type: ignore
class BleCommand(ABC):
"""The base class for all BLE commands to store common info
Args:
communicator (GoProBle): BLE client to read / write
uuid (BleUUID): BleUUID to read / write to
"""
communicator: GoProBle
uuid: BleUUID
def __post_init__(self) -> None:
self.communicator._add_parser(self._identifier, self._response_parser)
@property
@abstractmethod
def _identifier(self) -> ResponseType:
raise NotImplementedError
@property
@abstractmethod
def _response_parser(self) -> BytesParser:
raise NotImplementedError
@dataclass
class BleReadCommand(BleCommand):
"""A BLE command that reads data from a BleUUID
Args:
communicator (GoProBle): BLE client to read
uuid (BleUUID): BleUUID to read to
response_parser (BytesParser): the parser that will parse the received bytestream into a JSON dict
"""
response_parser: BytesParser
@property
def _identifier(self) -> ResponseType:
return self.uuid
@property
def _response_parser(self) -> BytesParser:
return self.response_parser
# pylint: disable=missing-return-doc
def __call__(self) -> GoProResp: # noqa: D102
logger.info(build_log_tx_str(self.uuid.name))
response = self.communicator._read_characteristic(self.uuid)
logger.info(build_log_rx_str(f"{self.uuid.name} : {response}"))
return response
@dataclass
class BleWriteNoParamsCommand(BleCommand):
"""A BLE command that writes to a BleUUID and does not accept any parameters
Args:
communicator (GoProBle): BLE client to write
uuid (BleUUID): BleUUID to write to
cmd (CmdId): Command ID that is being sent
response_parser (Optional[ConstructBytesParser]): the parser that will parse the received bytestream into a JSON dict.
Defaults to None
"""
cmd: CmdId
response_parser: Optional[BytesParser] = None
@property
def _identifier(self) -> ResponseType:
return self.cmd
@property
def _response_parser(self) -> BytesParser:
return status_struct if self.response_parser is None else status_struct + self.response_parser
# pylint: disable=missing-return-doc
def __call__(self) -> GoProResp: # noqa: D102
logger.info(build_log_tx_str(self.cmd.name))
# Build data buffer
data = bytearray([self.cmd.value])
data.insert(0, len(data))
# Send the data and receive the response
response = self.communicator._write_characteristic_receive_notification(self.uuid, data)
logger.info(build_log_rx_str(response))
return response
@dataclass
class BleWriteWithParamsCommand(BleCommand, Generic[CommandValueType]):
"""A BLE command that writes to a BleUUID and does not accept any parameters
Args:
communicator (GoProBle): BLE client to write
uuid (BleUUID): BleUUID to write to
cmd (CmdId): Command ID that is being sent
param_builder (BytesBuilder): is responsible for building the bytestream to send from the input params
response_parser (Optional[BytesParser]): the parser that will parse the received bytestream into a JSON dict
"""
cmd: CmdId
param_builder: BytesBuilder
response_parser: Optional[BytesParser] = None
@property
def _identifier(self) -> ResponseType:
return self.cmd
@property
def _response_parser(self) -> BytesParser:
return status_struct if self.response_parser is None else status_struct + self.response_parser
# pylint: disable=missing-return-doc
def __call__(self, value: CommandValueType) -> GoProResp: # noqa: D102
logger.info(build_log_tx_str(f"{self.cmd.name}: {str(value)}"))
# Build data buffer
data = bytearray([self.cmd.value])
# Mypy is not understanding the subclass check here
param = value.value if issubclass(type(value), enum.Enum) else value # type: ignore
# There must be a param builder if we have a param
param = self.param_builder.build(param)
data.extend([len(param), *param])
data.insert(0, len(data))
# Send the data and receive the response
response = self.communicator._write_characteristic_receive_notification(self.uuid, data)
logger.info(build_log_rx_str(response))
return response
@dataclass
class RegisterUnregisterAll(BleWriteNoParamsCommand):
"""Base class for register / unregister all commands
This will loop over all of the elements (i.e. settings / statusess found from the element_set entry of the
producer tuple parameter) and individually register / unregister (depending on the action parameter) each
element in the set
Args:
producer: (Optional[Tuple[Union[Type[SettingId], Type[StatusId]], QueryCmdId]]): Tuple of (element_set,
query command) where element_set is the GoProEnum that this command relates to, i.e. SettingId for
settings, StatusId for Statuses
action: (Optional[Action]): whether to register or unregister
"""
class Action(enum.Enum):
"""Enum to differentiate between register actions"""
REGISTER = enum.auto()
UNREGISTER = enum.auto()
producer: Optional[Tuple[Union[Type[SettingId], Type[StatusId]], QueryCmdId]] = None
action: Optional[Action] = None
def __post_init__(self) -> None:
# TODO refactor to not use dataclasses since derived classes can't have non default members if base classes do
assert self.producer is not None
assert self.action is not None
# pylint: disable=missing-return-doc
def __call__(self) -> GoProResp: # noqa: D102
assert self.producer is not None
assert self.action is not None
element_set = self.producer[0]
responded_command = self.producer[1]
response = super().__call__()
if response.is_ok:
for element in element_set:
(
self.communicator._register_listener
if self.action is RegisterUnregisterAll.Action.REGISTER
else self.communicator._unregister_listener
)(
# Ignoring typing because this seems correct and looks like mypy error
(responded_command, element) # type: ignore
)
return response
def build_protobuf_adapter(protobuf: Type[betterproto.Message]) -> Adapter:
"""Build a protobuf to Construct parsing (only) adapter
Args:
protobuf (Type[betterproto.Message]): protobuf to use as parser
Returns:
Adapter: adapter to be used by Construct
"""
class ProtobufConstructAdapter(Adapter):
"""Adapt a protobuf to be used by Construct (for parsing only)"""
protobuf: Type[betterproto.Message] # TODO use instance instead of class
def _decode(self, obj: bytearray, *_: Any) -> Dict[Any, Any]:
"""Parse a byte stream into a JSON dict using a protobuf
Args:
obj (bytearray): byte stream to parse
_ (Any): Not used
Returns:
Dict[Any, Any]: parsed JSON dict
"""
response: betterproto.Message = self.protobuf().FromString(bytes(obj))
response.to_dict = types.MethodType(custom_betterproto_to_dict, response) # type: ignore
return response.to_dict() # type: ignore
def _encode(self, *_: Any) -> Any:
raise NotImplementedError
setattr(ProtobufConstructAdapter, "protobuf", protobuf)
return ProtobufConstructAdapter(GreedyBytes)
# Ignoring because hitting this mypy bug: https://github.com/python/mypy/issues/5374
@dataclass # type: ignore
class BleProtoCommand(BleCommand):
"""A BLE command that is sent and received as using the Protobuf protocol
Args:
communicator (GoProBle): BLE client to write
uuid (BleUUID): BleUUID to write to
feature_id (CmdId): Command ID that is being sent
action_id (FeatureId): protobuf specific action ID that is being sent
request_proto (Type[betterproto.Message]): protobuf used to build command bytestream
response_proto (Type[betterproto.Message]): protobuf used to parse received bytestream
additional_matching_action_ids: (Optional[Set[ActionId]]): Other action ID's to share
this parser. Defaults to None.
"""
feature_id: FeatureId
action_id: ActionId
request_proto: Type[betterproto.Message]
response_proto: Type[betterproto.Message]
additional_matching_action_ids: Optional[Set[ActionId]] = None
def __post_init__(self) -> None:
super().__post_init__()
if self.additional_matching_action_ids:
for action_id in self.additional_matching_action_ids:
self.communicator._add_parser(action_id, self._response_parser)
@property
def _identifier(self) -> ResponseType:
return self.action_id
@property
def _response_parser(self) -> BytesParser:
return build_protobuf_adapter(self.response_proto)
@abstractmethod
@no_type_check
# pylint: disable=missing-return-doc
def __call__(self, *args: Any, **kwargs: Any) -> GoProResp: # noqa: D102
# The method that will actually build and send the protobuf command
# This method's signature shall be overridden by the subclass.
# The subclass shall then pass the arguments to this method and return it's returned response
# This pattern is technically violating the Liskov substitution principle. But we are accepting this as a
# tradeoff for exposing type hints on BLE Protobuf commands.
logger.info(
build_log_tx_str(
f"{self.action_id.name} : {' '.join([*[str(a) for a in args], *[str(a) for a in kwargs.values()]])}"
)
)
# Build request protobuf bytestream
proto = self.request_proto()
# Add args to protobuf request
attrs = iter(self.__call__.__annotations__.keys())
for arg in args:
param = arg.value if issubclass(type(arg), enum.Enum) else arg
setattr(proto, next(attrs), param)
# Add kwargs to protobuf request
for name, arg in kwargs.items():
if arg is not None:
param = arg.value if issubclass(type(arg), enum.Enum) else arg
setattr(proto, name, param)
# Prepend headers and serialize
request = bytearray([self.feature_id.value, self.action_id.value, *proto.SerializeToString()])
# Prepend length
request.insert(0, len(request))
# Allow exception to pass through if protobuf not completely initialized
response = self.communicator._write_characteristic_receive_notification(self.uuid, request)
logger.info(build_log_rx_str(response))
return response
class BleSetting(Generic[SettingValueType]):
"""An individual camera setting that is interacted with via BLE.
Args:
communicator (GoProBle): Adapter to read / write settings data
identifier (SettingId): ID of setting
parser_builder (BytesParserBuilder): object to both parse and build setting
"""
def __init__(
self, communicator: GoProBle, identifier: SettingId, parser_builder: BytesParserBuilder
) -> None:
self.identifier = identifier
self.communicator: GoProBle = communicator
self.setter_uuid: BleUUID = GoProUUIDs.CQ_SETTINGS
self.reader_uuid: BleUUID = GoProUUIDs.CQ_QUERY
self.parser: BytesParser = parser_builder
self.builder: BytesBuilder = parser_builder # Just syntactic sugar
communicator._add_parser(self.identifier, self.parser)
def __str__(self) -> str: # pylint: disable=missing-return-doc
return str(self.identifier.name)
def set(self, value: SettingValueType) -> GoProResp:
"""Set the value of the setting.
Args:
value (SettingValueType): The argument to use to set the setting value.
Returns:
GoProResp: Status of set
"""
logger.info(build_log_tx_str(f"Set {self.identifier.name}: {str(value)}"))
data = bytearray([self.identifier.value])
try:
param = self.builder.build(value)
data.extend([len(param), *param])
except IndexError:
pass
data.insert(0, len(data))
response = self.communicator._write_characteristic_receive_notification(self.setter_uuid, data)
logger.info(build_log_rx_str(response))
return response
@log_query
def get_value(self) -> GoProResp:
"""Get the settings value.
Returns:
GoProResp: settings value
"""
return self.communicator._write_characteristic_receive_notification(
self.reader_uuid, self._build_cmd(QueryCmdId.GET_SETTING_VAL)
)
@log_query
def get_name(self) -> GoProResp:
"""Get the settings name.
Raises:
NotImplementedError: This isn't implemented on the camera
"""
# return self.communicator._write_characteristic_receive_notification(
# self.reader_uuid, QueryCmdId.GET_SETTING_NAME, self._build_cmd(QueryCmdId.GET_SETTING_NAME)
# )
raise NotImplementedError("Not implemented on camera!")
@log_query
def get_capabilities_values(self) -> GoProResp:
"""Get currently supported settings capabilities values.
Returns:
GoProResp: settings capabilities values
"""
return self.communicator._write_characteristic_receive_notification(
self.reader_uuid, self._build_cmd(QueryCmdId.GET_CAPABILITIES_VAL)
)
@log_query
def get_capabilities_names(self) -> GoProResp:
"""Get currently supported settings capabilities names.
Raises:
NotImplementedError: This isn't implemented on the camera
"""
# return self.communicator._write_characteristic_receive_notification(
# self.reader_uuid,
# QueryCmdId.GET_CAPABILITIES_NAME,
# self._build_cmd(QueryCmdId.GET_CAPABILITIES_NAME),
# )
raise NotImplementedError("Not implemented on camera!")
@log_query
def register_value_update(self) -> GoProResp:
"""Register for asynchronous notifications when a given setting ID's value updates.
Returns:
GoProResp: Current value of respective setting ID
"""
self.communicator._register_listener((QueryCmdId.SETTING_VAL_PUSH, self.identifier))
return self.communicator._write_characteristic_receive_notification(
self.reader_uuid, self._build_cmd(QueryCmdId.REG_SETTING_VAL_UPDATE)
)
@log_query
def unregister_value_update(self) -> GoProResp:
"""Stop receiving notifications when a given setting ID's value updates.
Returns:
GoProResp: Status of unregister
"""
self.communicator._unregister_listener((QueryCmdId.SETTING_VAL_PUSH, self.identifier))
return self.communicator._write_characteristic_receive_notification(
self.reader_uuid, self._build_cmd(QueryCmdId.UNREG_SETTING_VAL_UPDATE)
)
@log_query
def register_capability_update(self) -> GoProResp:
"""Register for asynchronous notifications when a given setting ID's capabilities update.
Returns:
GoProResp: Current capabilities of respective setting ID
"""
self.communicator._register_listener((QueryCmdId.SETTING_CAPABILITY_PUSH, self.identifier))
return self.communicator._write_characteristic_receive_notification(
self.reader_uuid, self._build_cmd(QueryCmdId.REG_CAPABILITIES_UPDATE)
)
@log_query
def unregister_capability_update(self) -> GoProResp:
"""Stop receiving notifications when a given setting ID's capabilities change.
Returns:
GoProResp: Status of unregister
"""
self.communicator._unregister_listener((QueryCmdId.SETTING_CAPABILITY_PUSH, self.identifier))
return self.communicator._write_characteristic_receive_notification(
self.reader_uuid, self._build_cmd(QueryCmdId.UNREG_CAPABILITIES_UPDATE)
)
def _build_cmd(self, cmd: QueryCmdId) -> bytearray:
"""Build the data to send a settings query over-the-air.
Args:
cmd (QueryCmdId): command to build
Returns:
bytearray: data to send over-the-air
"""
ret = bytearray([cmd.value, self.identifier.value])
ret.insert(0, len(ret))
return ret
class BleStatus:
"""An individual camera status that is interacted with via BLE.
Args:
communicator (GoProBle): Adapter to read status data
identifier (StatusId): ID of status
"""
uuid: BleUUID = GoProUUIDs.CQ_QUERY
def __init__(self, communicator: GoProBle, identifier: StatusId, parser: BytesParser) -> None:
self.identifier = identifier
self.communicator = communicator
self.parser = parser
# Add to response parsing map
communicator._add_parser(self.identifier, self.parser)
def __str__(self) -> str: # pylint: disable=missing-return-doc
return str(self.identifier.name)
@log_query
def get_value(self) -> GoProResp:
"""Get the current value of a status.
Returns:
GoProResp: current status value
"""
return self.communicator._write_characteristic_receive_notification(
BleStatus.uuid, self._build_cmd(QueryCmdId.GET_STATUS_VAL)
)
@log_query
def register_value_update(self) -> GoProResp:
"""Register for asynchronous notifications when a status changes.
Returns:
GoProResp: current status value
"""
if (
response := self.communicator._write_characteristic_receive_notification(
BleStatus.uuid, self._build_cmd(QueryCmdId.REG_STATUS_VAL_UPDATE)
)
).is_ok:
self.communicator._register_listener((QueryCmdId.STATUS_VAL_PUSH, self.identifier))
return response
@log_query
def unregister_value_update(self) -> GoProResp:
"""Stop receiving notifications when status changes.
Returns:
GoProResp: Status of unregister
"""
if (
response := self.communicator._write_characteristic_receive_notification(
BleStatus.uuid, self._build_cmd(QueryCmdId.UNREG_STATUS_VAL_UPDATE)
)
).is_ok:
self.communicator._unregister_listener((QueryCmdId.STATUS_VAL_PUSH, self.identifier))
return response
def _build_cmd(self, cmd: QueryCmdId) -> bytearray:
"""Build the data for a given status command.
Args:
cmd (QueryCmdId): command to build data for
Returns:
bytearray: data to send over-the-air
"""
ret = bytearray([cmd.value, self.identifier.value])
ret.insert(0, len(ret))
return ret
######################################################## Wifi #################################################
@dataclass
class WifiGetJsonCommand:
"""The base class for all WiFi Commands. Stores common information.
Args:
communicator (GoProWifi): Wifi client to write command
endpoint (str): endpoint to GET
"""
communicator: GoProWifi
endpoint: str
response_parser: Optional[JsonParser] = None
def __post_init__(self) -> None:
if self.response_parser is not None:
self.communicator._add_parser(self.endpoint, self.response_parser)
@dataclass
class WifiGetJsonWithParams(WifiGetJsonCommand, Generic[CommandValueType]):
"""A Wifi command that writes to a BleUUID (with parameters) and receives JSON as response
Args:
communicator (GoProWifi): Wifi client to write command
endpoint (str): endpoint to GET
response_parser (Optional[JsonParser]): the parser that will parse the received bytestream into a JSON dict
"""
communicator: GoProWifi
endpoint: str
response_parser: Optional[JsonParser] = None
param_builder: Optional[StringBuilder] = None
# pylint: disable=missing-return-doc
def __call__(self, value: CommandValueType) -> GoProResp: # noqa: D102
values: List[CommandValueType] = [*value] if isinstance(value, Iterable) else [value]
logger.info(build_log_tx_str(f"{self.endpoint.format(*values)}"))
# Build list of args as they should be represented in URL
url_params = []
if self.param_builder is not None:
url_params.append(self.param_builder(value))
elif issubclass(type(value), enum.Enum):
url_params.append(value.value) # type: ignore
else:
url_params.extend(values) # type: ignore
url = self.endpoint.format(*url_params)
# Send to camera
response = self.communicator._get(url)
logger.info(build_log_rx_str(response))
return response
@dataclass
class WifiGetJsonNoParams(WifiGetJsonCommand):
"""A Wifi command that writes to a BleUUID (with parameters) and receives JSON as response
Args:
communicator (GoProWifi): Wifi client to write command
endpoint (str): endpoint to GET
response_parser (Optional[JsonParser]): the parser that will parse the received bytestream into a JSON dict
"""
# pylint: disable=missing-return-doc
def __call__(self) -> GoProResp: # noqa: D102
logger.info(build_log_tx_str(self.endpoint))
url = self.endpoint
# Send to camera
response = self.communicator._get(url)
logger.info(build_log_rx_str(response))
return response
# Ignoring because hitting this mypy bug: https://github.com/python/mypy/issues/5374
@dataclass # type: ignore
class WifiGetBinary(ABC):
"""A Wifi command that writes to a BleUUID (with parameters) and receives a binary stream as response
Args:
communicator (GoProWifi): Wifi client to write command
endpoint (str): endpoint to GET
"""
communicator: GoProWifi
endpoint: str
@abstractmethod
@no_type_check
# pylint: disable=missing-return-doc
def __call__(self, /, **kwargs) -> Path: # noqa: D102
# The method that will actually send the command and receive the stream
# This method's signature shall be override by the subclass.
# The subclass shall then pass the arguments to this method and return it's returned response
# This pattern is technically violating the Liskov substitution principle. But we are accepting this as a
# tradeoff for exposing type hints on commands.
camera_file = kwargs["camera_file"]
local_file = Path(kwargs["local_file"]) if "local_file" in kwargs else Path(".") / camera_file
logger.info(build_log_tx_str(f"{self.endpoint.format(camera_file)} ===> {local_file}"))
url = self.endpoint.format(camera_file)
# Send to camera
self.communicator._stream_to_file(url, local_file)
logger.info(build_log_rx_str("SUCCESS"))
return local_file
class WifiSetting(Generic[SettingValueType]):
"""An individual camera setting that is interacted with via Wifi.
Args:
communicator (WifiCommunicator): Adapter to read / write settings data
id (SettingId): ID of setting
endpoint (str): HTTP endpoint to be used to set setting value
"""
def __init__(self, communicator: GoProWifi, identifier: SettingId) -> None:
self.identifier = identifier
self.communicator = communicator
# Note! It is assumed that BLE and WiFi settings are symmetric so we only add to the communicator's
# parser in the BLE Setting.
def __str__(self) -> str: # pylint: disable=missing-return-doc
return str(self.identifier.name)
def set(self, value: SettingValueType) -> GoProResp:
"""Set the value of the setting.
Args:
value (SettingValueType): value to set setting
Returns:
GoProResp: Status of set
"""
logger.info(build_log_tx_str(f"Setting {self.identifier}: {value}"))
# Build url. TODO fix this type error with Mixin (or passing in endpoint as argument)
value = value.value if isinstance(value, enum.Enum) else value
url = self.communicator._api.wifi_setting.endpoint.format(self.identifier.value, value) # type: ignore
# Send to camera
response = self.communicator._get(url)
if response is not None:
logger.info(f"-----------> \n{response}")
return response
```
#### File: open_gopro/ble/services.py
```python
from __future__ import annotations
import csv
import json
import logging
import uuid
from pathlib import Path
from enum import IntFlag, IntEnum
from dataclasses import dataclass, asdict, InitVar
from typing import Dict, Iterator, Generator, Mapping, Optional, Tuple, Type, no_type_check, Union, List
logger = logging.getLogger(__name__)
BLE_BASE_UUID = "0000{}-0000-1000-8000-00805F9B34FB"
class CharProps(IntFlag):
"""BLE Spec-Defined Characteristic Property bitmask values"""
NONE = 0x00
BROADCAST = 0x01
READ = 0x02
WRITE_NO_RSP = 0x04
WRITE_YES_RSP = 0x08
NOTIFY = 0x10
INDICATE = 0x20
AUTH_SIGN_WRITE = 0x40
EXTENDED = 0x80
NOTIFY_ENCRYPTION_REQ = 0x100
INDICATE_ENCRYPTION_REQ = 0x200
class SpecUuidNumber(IntEnum):
"""BLE Spec-Defined BleUUID Number values as ints"""
PRIMARY_SERVICE = 0x2800
SECONDARY_SERVICE = 0x2801
INCLUDE = 0x2802
CHAR_DECLARATION = 0x2803
CHAR_EXTENDED_PROPS = 0x2900
CHAR_USER_DESCR = 0x2901
CLIENT_CHAR_CONFIG = 0x2902
SERVER_CHAR_CONFIG = 0x2903
CHAR_FORMAT = 0x2904
CHAR_AGGREGATE_FORMAT = 0x2905
class UuidLength(IntEnum):
"""Used to specify 8-bit or 128-bit UUIDs"""
BIT_16 = 2
BIT_128 = 16
class BleUUID(uuid.UUID):
"""Used to identify BLE BleUUID's
A extension of the standard UUID to associate a string name with the UUID and allow 8-bit UUID input
"""
# pylint: disable=redefined-builtin
def __init__(
self,
name: str,
uuid_format: UuidLength = UuidLength.BIT_128,
hex: Optional[str] = None,
bytes: Optional[bytes] = None,
bytes_le: Optional[bytes] = None,
int: Optional[int] = None,
) -> None:
self.name: str
if uuid_format is UuidLength.BIT_16:
if [hex, bytes, bytes_le, int].count(None) != 3:
raise ValueError("Only one of [hex, bytes, bytes_le, int] can be set.")
if hex:
if len(hex) != 4:
raise ValueError("badly formed 8-bit hexadecimal UUID string")
hex = BLE_BASE_UUID.format(hex)
elif bytes:
if len(bytes) != 2:
raise ValueError("badly formed 8-bit byte input")
bytes = uuid.UUID(hex=BLE_BASE_UUID.format(bytes.hex())).bytes
elif bytes_le:
raise ValueError("byte_le not possible with 8-bit UUID")
elif int:
int = uuid.UUID(hex=BLE_BASE_UUID.format(int.to_bytes(2, "big").hex())).int
object.__setattr__(self, "name", name) # needed to work around immutability in base class
super().__init__(hex=hex, bytes=bytes, bytes_le=bytes_le, int=int)
def __str__(self) -> str: # pylint: disable=missing-return-doc
return self.hex if self.name == "" else self.name
def __repr__(self) -> str: # pylint: disable=missing-return-doc
return self.__str__()
@dataclass
class Descriptor:
"""A charactersistic descriptor.
Args:
handle (int) : the handle of the attribute table that the descriptor resides at
uuid (BleUUID): BleUUID of this descriptor
value (bytes) : the byte stream value of the descriptor
"""
handle: int
uuid: BleUUID
value: Optional[bytes] = None
def __str__(self) -> str: # pylint: disable=missing-return-doc
return json.dumps(asdict(self), indent=4, default=str)
@property
def name(self) -> str:
"""What is the human-readable name of this characteristic?
Returns:
str: characteristic's name
"""
return self.uuid.name
@dataclass
class Characteristic:
"""A BLE charactersistic.
Args:
handle (int) : the handle of the attribute table that the characteristic resides at
uuid (BleUUID) : the BleUUID of the characteristic
props (CharProps) : the characteristic's properties (READ, WRITE, NOTIFY, etc)
value (bytes) : the current byte stream value of the characteristic value
init_descriptors (Optional[List[Descriptor]]) : Descriptors known at initialization (can also be
set later using the descriptors property)
descriptor_handle (Optional[int]) : handle of this characteristic's declaration descriptor. If not
passed, defaults to handle + 1
"""
handle: int
uuid: BleUUID
props: CharProps
value: Optional[bytes] = None
init_descriptors: InitVar[Optional[List[Descriptor]]] = None
descriptor_handle: Optional[int] = None
def __post_init__(self, init_descriptors: Optional[List[Descriptor]]) -> None:
self._descriptors: Dict[BleUUID, Descriptor] = {}
# Mypy should eventually support this: see https://github.com/python/mypy/issues/3004
self.descriptors = init_descriptors or [] # type: ignore
if self.descriptor_handle is None:
self.descriptor_handle = self.handle + 1
def __str__(self) -> str: # pylint: disable=missing-return-doc
return f"{self.name} @ handle {self.handle}: {self.props.name}"
@property
def descriptors(self) -> Dict[BleUUID, Descriptor]:
"""Return uuid-to-descriptor mapping
Returns:
Dict[BleUUID, Descriptor]: dictionary of descriptors indexed by BleUUID
"""
return self._descriptors
@descriptors.setter
def descriptors(self, descriptors: List[Descriptor]) -> None:
for descriptor in descriptors:
self._descriptors[descriptor.uuid] = descriptor
@property
def name(self) -> str:
"""What is the human-readable name of this characteristic?
Returns:
str: characteristic's name
"""
return self.uuid.name
@property
def is_readable(self) -> bool:
"""Does this characteric have readable property?
Returns:
bool: True if readable, False if not
"""
return CharProps.READ in self.props
@property
def is_writeable_with_response(self) -> bool:
"""Does this characteric have writeable-with-response property?
Returns:
bool: True if writeable-with-response, False if not
"""
return CharProps.WRITE_YES_RSP in self.props
@property
def is_writeable_without_response(self) -> bool:
"""Does this characteric have writeable-without-response property?
Returns:
bool: True if writeable-without-response, False if not
"""
return CharProps.WRITE_NO_RSP in self.props
@property
def is_writeable(self) -> bool:
"""Does this characteric have writeable property?
That is, does it have writeable-with-response or writeable-without-response property
Returns:
bool: True if writeable, False if not
"""
return self.is_writeable_with_response or self.is_writeable_without_response
@property
def is_notifiable(self) -> bool:
"""Does this characteric have notifiable property?
Returns:
bool: True if notifiable, False if not
"""
return CharProps.NOTIFY in self.props
@property
def is_indicatable(self) -> bool:
"""Does this characteric have indicatable property?
Returns:
bool: True if indicatable, False if not
"""
return CharProps.INDICATE in self.props
@property
def cccd_handle(self) -> int:
"""What is this characteristics CCCD (client characteristic configuration descriptor) handle
Returns:
int: the CCCD handle
"""
return self._descriptors[UUIDs.CLIENT_CHAR_CONFIG].handle
@dataclass
class Service:
"""A BLE service or grouping of Characteristics.
Args:
uuid (BleUUID) : the service's BleUUID
start_handle(int): the attribute handle where the service begins
end_handle(int): the attribute handle where the service ends. Defaults to 0xFFFF.
init_chars (List[Characteristic]) : list of characteristics known at service instantation. Can be set
later with the characteristics property
"""
uuid: BleUUID
start_handle: int
end_handle: int = 0xFFFF
init_chars: InitVar[Optional[List[Characteristic]]] = None
def __post_init__(self, init_characteristics: Optional[List[Characteristic]]) -> None:
self._characteristics: Dict[BleUUID, Characteristic] = {}
# Mypy should eventually support this: see https://github.com/python/mypy/issues/3004
self.characteristics = init_characteristics or [] # type: ignore
def __str__(self) -> str: # pylint: disable=missing-return-doc
return self.name
@property
def characteristics(self) -> Dict[BleUUID, Characteristic]:
"""Return uuid-to-characteristic mapping
Returns:
Dict[BleUUID, Characteristic]: Dict of characteristics indexed by uuid
"""
return self._characteristics
@characteristics.setter
def characteristics(self, characteristics: List[Characteristic]) -> None:
for characteristic in characteristics:
self._characteristics[characteristic.uuid] = characteristic
@property
def name(self) -> str:
"""What is the human-readable name of this characteristic?
Returns:
str: characteristic's name
"""
return self.uuid.name
class GattDB:
"""The attribute table to store / look up BLE services, characteristics, and attributes.
Args:
init_services (List[Service]): A list of serices known at instantiation time. Can be updated later
with the services property
"""
# TODO fix typing here
class CharacteristicView(Mapping[BleUUID, Characteristic]):
"""Represent the GattDB mapping as characteristics indexed by BleUUID"""
def __init__(self, db: "GattDB") -> None:
self._db = db
def __getitem__(self, key: BleUUID) -> Characteristic: # pylint: disable=missing-return-doc
for service in self._db.services.values():
for char in service.characteristics.values():
if char.uuid == key:
return char
raise KeyError
def __contains__(self, key: object) -> bool: # pylint: disable=missing-return-doc
for service in self._db.services.values():
for char in service.characteristics.values():
if char.uuid == key:
return True
return False
@no_type_check
def __iter__(self) -> Iterator[Characteristic]: # pylint: disable=missing-return-doc
return iter(self.values())
def __len__(self) -> int: # pylint: disable=missing-return-doc
return sum(len(service.characteristics) for service in self._db.services.values())
@no_type_check
def keys(self) -> Generator[BleUUID, None, None]: # pylint: disable=missing-return-doc
def iter_keys():
for service in self._db.services.values():
for ble_uuid in service.characteristics.keys():
yield ble_uuid
return iter_keys()
@no_type_check
def values(self) -> Generator[Characteristic, None, None]: # pylint: disable=missing-return-doc
def iter_values():
for service in self._db.services.values():
for char in service.characteristics.values():
yield char
return iter_values()
@no_type_check
def items( # pylint: disable=missing-return-doc
self,
) -> Generator[Tuple[BleUUID, Characteristic], None, None]:
def iter_items():
for service in self._db.services.values():
for ble_uuid, char in service.characteristics.items():
yield (ble_uuid, char)
return iter_items()
def __init__(self, init_services: List[Service]) -> None:
self._services: Dict[BleUUID, Service] = {}
# TODO add ServicesView to align with characteristics
# Mypy should eventually support this: see https://github.com/python/mypy/issues/3004
self.services = init_services # type: ignore
self.characteristics = self.CharacteristicView(self)
@property
def services(self) -> Dict[BleUUID, Service]:
"""Return uuid-to-service mapping
Returns:
Dict[BleUUID, Service]: Dict of services indexed by uuid
"""
return self._services
@services.setter
def services(self, services: List[Service]) -> None:
for service in services:
self._services[service.uuid] = service
def handle2uuid(self, handle: int) -> BleUUID:
"""Get a BleUUID from a handle.
Args:
handle (int): the handle to search for
Raises:
KeyError: No characteristic was found at this handle
Returns:
BleUUID: The found BleUUID
"""
for s in self.services.values():
for c in s.characteristics.values():
if c.handle == handle:
return c.uuid
raise KeyError(f"Matching BleUUID not found for handle {handle}")
def uuid2handle(self, ble_uuid: BleUUID) -> int:
"""Convert a handle to a BleUUID
Args:
ble_uuid (BleUUID): BleUUID to translate
Returns:
int: the handle in the Gatt Database where this BleUUID resides
"""
return self.characteristics[ble_uuid].handle
def dump_to_csv(self, file: Path = Path("attributes.csv")) -> None:
"""Dump discovered services to a csv file.
Args:
file (Path): File to write to. Defaults to "./attributes.csv".
"""
with open(file, mode="w") as f:
logger.debug(f"Dumping discovered BLE characteristics to {file}")
w = csv.writer(f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
w.writerow(["handle", "description", BleUUID, "properties", "value"])
# For each service in table
for service in self.services.values():
w.writerow(
[
service.start_handle,
SpecUuidNumber.PRIMARY_SERVICE,
service.uuid.hex,
service.name,
"SERVICE",
]
)
# For each characteristic in service
for char in service.characteristics.values():
w.writerow(
[char.descriptor_handle, SpecUuidNumber.CHAR_DECLARATION, "28:03", str(char.props), ""]
)
w.writerow([char.handle, char.name, char.uuid.hex, "", char.value])
# For each descriptor in characteristic
for descriptor in char.descriptors.values():
w.writerow(
[descriptor.handle, descriptor.name, descriptor.uuid.hex, "", descriptor.value]
)
class UUIDsMeta(type):
"""The metaclass used to build a UUIDs container
Upon creation of a new UUIDs class, this will store the BleUUID names in an internal mapping indexed by UUID as int
"""
@no_type_check
# pylint: disable=missing-return-doc
def __new__(cls, name, bases, dct) -> UUIDsMeta: # noqa
x = super().__new__(cls, name, bases, dct)
x._int2uuid = {}
for db in [*[base.__dict__ for base in bases], dct]:
for _, ble_uuid in [(k, v) for k, v in db.items() if not k.startswith("_")]:
if not isinstance(ble_uuid, BleUUID):
raise TypeError("This class can only be composed of BleUUID attributes")
x._int2uuid[ble_uuid.int] = ble_uuid
return x
@no_type_check
def __getitem__(cls, key: Union[uuid.UUID, int, str]) -> BleUUID: # pylint: disable=missing-return-doc
if isinstance(key, uuid.UUID):
return cls._int2uuid[key.int]
if isinstance(key, int):
return cls._int2uuid[key]
if isinstance(key, str):
return cls._int2uuid[uuid.UUID(hex=key).int]
raise TypeError("Key must be of type Union[uuid.UUID, int, str]")
@no_type_check
def __contains__(cls, key: Union[uuid.UUID, int, str]) -> bool: # pylint: disable=missing-return-doc
if isinstance(key, uuid.UUID):
return key.int in cls._int2uuid
if isinstance(key, int):
return key in cls._int2uuid
if isinstance(key, str):
# Built uuid.UUID to use it's normalizing
return uuid.UUID(hex=key).int in cls._int2uuid
raise TypeError("Key must be of type Union[uuid.UUID, int, str]")
@no_type_check
def __iter__(cls): # pylint: disable=missing-return-doc
for item in cls._int2uuid.items():
yield item
@dataclass(frozen=True)
class UUIDs(metaclass=UUIDsMeta):
"""BLE Spec-defined UUIDs that are common across all applications.
Also functions as a dict to look up UUID's by str, int, or BleUUID
"""
@no_type_check
def __new__(cls: Type[UUIDs]) -> Type[UUIDs]: # noqa
raise Exception("This class shall not be instantiated")
# GATT Identifiers
PRIMARY_SERVICE = BleUUID(
"Primary Service",
uuid_format=UuidLength.BIT_16,
int=SpecUuidNumber.PRIMARY_SERVICE,
)
SECONDARY_SERVICE = BleUUID(
"Secondary Service",
uuid_format=UuidLength.BIT_16,
int=SpecUuidNumber.SECONDARY_SERVICE,
)
INCLUDE = BleUUID(
"Characteristic Include Descriptor",
uuid_format=UuidLength.BIT_16,
int=SpecUuidNumber.INCLUDE,
)
CHAR_DECLARATION = BleUUID(
"Characteristic Declaration",
uuid_format=UuidLength.BIT_16,
int=SpecUuidNumber.CHAR_DECLARATION,
)
CHAR_EXTENDED_PROPS = BleUUID(
"Characteristic Extended Properties",
uuid_format=UuidLength.BIT_16,
int=SpecUuidNumber.CHAR_EXTENDED_PROPS,
)
CHAR_USER_DESCR = BleUUID(
"Characteristic User Description",
uuid_format=UuidLength.BIT_16,
int=SpecUuidNumber.CHAR_USER_DESCR,
)
CLIENT_CHAR_CONFIG = BleUUID(
"Client Characteristic Configuration",
uuid_format=UuidLength.BIT_16,
int=SpecUuidNumber.CLIENT_CHAR_CONFIG,
)
SERVER_CHAR_CONFIG = BleUUID(
"Server Characteristic Configuration",
uuid_format=UuidLength.BIT_16,
int=SpecUuidNumber.SERVER_CHAR_CONFIG,
)
CHAR_FORMAT = BleUUID(
"Characteristic Format",
uuid_format=UuidLength.BIT_16,
int=SpecUuidNumber.CHAR_FORMAT,
)
CHAR_AGGREGATE_FORMAT = BleUUID(
"Characteristic Aggregate Format",
uuid_format=UuidLength.BIT_16,
int=SpecUuidNumber.CHAR_AGGREGATE_FORMAT,
)
# Generic Attribute Service
S_GENERIC_ATT = BleUUID("Generic Attribute Service", hex=BLE_BASE_UUID.format("1801"))
# Generic Access Service
S_GENERIC_ACCESS = BleUUID("Generic Access Service", hex=BLE_BASE_UUID.format("1800"))
ACC_DEVICE_NAME = BleUUID("Device Name", hex=BLE_BASE_UUID.format("2a00"))
ACC_APPEARANCE = BleUUID("Appearance", hex=BLE_BASE_UUID.format("2a01"))
ACC_PREF_CONN_PARAMS = BleUUID("Preferred Connection Parameters", hex=BLE_BASE_UUID.format("2a04"))
ACC_CENTRAL_ADDR_RES = BleUUID("Central Address Resolution", hex=BLE_BASE_UUID.format("2aa6"))
# Tx Power
S_TX_POWER = BleUUID("Tx Power Service", hex=BLE_BASE_UUID.format("1804"))
TX_POWER_LEVEL = BleUUID("Tx Power Level", hex=BLE_BASE_UUID.format("2a07"))
# Battery Service
S_BATTERY = BleUUID("Battery Service", hex=BLE_BASE_UUID.format("180f"))
BATT_LEVEL = BleUUID("Battery Level", hex=BLE_BASE_UUID.format("2a19"))
# Device Information Service
S_DEV_INFO = BleUUID("Device Information Service", hex=BLE_BASE_UUID.format("180a"))
INF_MAN_NAME = BleUUID("Manufacturer Name", hex=BLE_BASE_UUID.format("2a29"))
INF_MODEL_NUM = BleUUID("Model Number", hex=BLE_BASE_UUID.format("2a24"))
INF_SERIAL_NUM = BleUUID("Serial Number", hex=BLE_BASE_UUID.format("2a25"))
INF_FW_REV = BleUUID("Firmware Revision", hex=BLE_BASE_UUID.format("2a26"))
INF_HW_REV = BleUUID("Hardware Revision", hex=BLE_BASE_UUID.format("2a27"))
INF_SW_REV = BleUUID("Software Revision", hex=BLE_BASE_UUID.format("2a28"))
INF_SYS_ID = BleUUID("System ID", hex=BLE_BASE_UUID.format("2a23"))
INF_CERT_DATA = BleUUID("Certification Data", hex=BLE_BASE_UUID.format("2a2a"))
INF_PNP_ID = BleUUID("PNP ID", hex=BLE_BASE_UUID.format("2a50"))
```
#### File: tutorial_modules/tutorial_5_connect_wifi/wifi_enable.py
```python
import sys
import time
import asyncio
import logging
import argparse
from binascii import hexlify
from typing import Tuple, Optional
from bleak import BleakClient
from tutorial_modules import GOPRO_BASE_UUID, connect_ble
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
async def enable_wifi(identifier: str = None) -> Tuple[str, str, BleakClient]:
"""Connect to a GoPro via BLE, find its WiFi AP SSID and password, and enable its WiFI AP
If identifier is None, the first discovered GoPro will be connected to.
Args:
identifier (str, optional): Last 4 digits of GoPro serial number. Defaults to None.
Returns:
Tuple[str, str]: ssid, password
"""
# Synchronization event to wait until notification response is received
event = asyncio.Event()
# UUIDs to write to and receive responses from, and read from
COMMAND_REQ_UUID = GOPRO_BASE_UUID.format("0072")
COMMAND_RSP_UUID = GOPRO_BASE_UUID.format("0073")
WIFI_AP_SSID_UUID = GOPRO_BASE_UUID.format("0002")
WIFI_AP_PASSWORD_UUID = <PASSWORD>("<PASSWORD>")
client: BleakClient
def notification_handler(handle: int, data: bytes) -> None:
logger.info(f'Received response at {handle=}: {hexlify(data, ":")!r}')
# If this is the correct handle and the status is success, the command was a success
if client.services.characteristics[handle].uuid == COMMAND_RSP_UUID and data[2] == 0x00:
logger.info("Command sent successfully")
# Anything else is unexpected. This shouldn't happen
else:
logger.error("Unexpected response")
# Notify the writer
event.set()
client = await connect_ble(notification_handler, identifier)
# Read from WiFi AP SSID BleUUID
logger.info("Reading the WiFi AP SSID")
ssid = await client.read_gatt_char(WIFI_AP_SSID_UUID)
ssid = ssid.decode()
logger.info(f"SSID is {ssid}")
# Read from WiFi AP Password BleUUID
logger.info("Reading the WiFi AP password")
password = await client.read_gatt_char(WIFI_AP_PASSWORD_UUID)
password = password.decode()
logger.info(f"Password is {password}")
# Write to the Command Request BleUUID to enable WiFi
logger.info("Enabling the WiFi AP")
event.clear()
await client.write_gatt_char(COMMAND_REQ_UUID, bytearray([0x03, 0x17, 0x01, 0x01]))
await event.wait() # Wait to receive the notification response
logger.info("WiFi AP is enabled")
return ssid, password, client
async def main(identifier: Optional[str], timeout: Optional[int]) -> None:
*_, client = await enable_wifi(identifier)
if not timeout:
logger.info("Maintaining BLE Connection indefinitely. Send keyboard interrupt to exit.")
while True:
time.sleep(1)
else:
logger.info(f"Maintaining BLE connection for {timeout} seconds")
time.sleep(timeout)
await client.disconnect()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Connect to a GoPro camera via BLE, get WiFi info, and enable WiFi."
)
parser.add_argument(
"-i",
"--identifier",
type=str,
help="Last 4 digits of GoPro serial number, which is the last 4 digits of the default \
camera SSID. If not used, first discovered GoPro will be connected to",
default=None,
)
parser.add_argument(
"-t",
"--timeout",
type=int,
help="time in seconds to maintain connection before disconnecting. If not set, will maintain connection indefinitely",
default=None,
)
args = parser.parse_args()
try:
asyncio.run(main(args.identifier, args.timeout))
except:
sys.exit(-1)
else:
sys.exit(0)
``` |
{
"source": "jklippel/synapse",
"score": 2
} |
#### File: synapse/handlers/event_auth.py
```python
from typing import TYPE_CHECKING
from synapse.api.constants import EventTypes, JoinRules
from synapse.api.room_versions import RoomVersion
from synapse.types import StateMap
if TYPE_CHECKING:
from synapse.server import HomeServer
class EventAuthHandler:
"""
This class contains methods for authenticating events added to room graphs.
"""
def __init__(self, hs: "HomeServer"):
self._store = hs.get_datastore()
async def can_join_without_invite(
self, state_ids: StateMap[str], room_version: RoomVersion, user_id: str
) -> bool:
"""
Check whether a user can join a room without an invite.
When joining a room with restricted joined rules (as defined in MSC3083),
the membership of spaces must be checked during join.
Args:
state_ids: The state of the room as it currently is.
room_version: The room version of the room being joined.
user_id: The user joining the room.
Returns:
True if the user can join the room, false otherwise.
"""
# This only applies to room versions which support the new join rule.
if not room_version.msc3083_join_rules:
return True
# If there's no join rule, then it defaults to invite (so this doesn't apply).
join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None)
if not join_rules_event_id:
return True
# If the join rule is not restricted, this doesn't apply.
join_rules_event = await self._store.get_event(join_rules_event_id)
if join_rules_event.content.get("join_rule") != JoinRules.MSC3083_RESTRICTED:
return True
# If allowed is of the wrong form, then only allow invited users.
allowed_spaces = join_rules_event.content.get("allow", [])
if not isinstance(allowed_spaces, list):
return False
# Get the list of joined rooms and see if there's an overlap.
joined_rooms = await self._store.get_rooms_for_user(user_id)
# Pull out the other room IDs, invalid data gets filtered.
for space in allowed_spaces:
if not isinstance(space, dict):
continue
space_id = space.get("space")
if not isinstance(space_id, str):
continue
# The user was joined to one of the spaces specified, they can join
# this room!
if space_id in joined_rooms:
return True
# The user was not in any of the required spaces.
return False
``` |
{
"source": "jklmn13/junction",
"score": 2
} |
#### File: junction/feedback/service.py
```python
from collections import defaultdict
# Third Party Stuff
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError, transaction
from django.db.models import Count
# Junction Stuff
from junction.devices.models import Device
from junction.schedule.models import ScheduleItem, ScheduleItemType
from .models import (
ChoiceFeedbackQuestion,
ChoiceFeedbackQuestionValue,
ScheduleItemChoiceFeedback,
ScheduleItemTextFeedback,
TextFeedbackQuestion
)
COLORS = ["#46BFBD", "#FDB45C", "#F7464A"]
def get_feedback_questions(conference_id):
"""Get all feedback questions for the conference.
{'talk': {'Text': [{'id': 1, 'title': 'How was the speaker ?',
'is_required': True}], 'Workshop': [{'id': 1,
'title': 'How was the content ?', 'is_required': True,
allowed_values: [{'title': 'Awesome', 'id': 2},
{'title': 'Bad', 'id': 3}, {'title': 'Ok', 'id': 4}}]}]
}}
"""
text_questions = get_text_feedback_questions(
conference_id=conference_id)
choice_questions = get_choice_feedback_questions(
conference_id=conference_id)
return _merge_questions(text_questions=text_questions,
choice_questions=choice_questions)
def get_text_feedback_questions(conference_id):
"""Get all text questions for the conference organized by
schedule item type.
Return dict contain all questions with schedule item type in dict.
"""
qs = TextFeedbackQuestion.objects.filter(conference_id=conference_id)
return _get_question_oragnized_by_type(qs)
def get_choice_feedback_questions(conference_id):
"""Get all choice based questions for the conference organized by
schedule item type.
"""
qs = ChoiceFeedbackQuestion.objects.filter(
conference_id=conference_id).select_related('allowed_values')
return _get_question_oragnized_by_type(qs)
def has_submitted(feedback, device_uuid):
"""
"""
device = Device.objects.get(uuid=device_uuid)
text_feedback = ScheduleItemTextFeedback.objects.filter(
schedule_item_id=feedback.validated_data['schedule_item_id'],
device=device)
if text_feedback:
return True
choice_feedback = ScheduleItemChoiceFeedback.objects.filter(
schedule_item_id=feedback.validated_data['schedule_item_id'],
device=device)
return choice_feedback
def _has_required_ids(master, submitted):
for item in master:
if item not in submitted:
return False
return True
def has_required_fields_data(feedback):
try:
data = feedback.validated_data
sch = ScheduleItem.objects.get(pk=data['schedule_item_id'])
sch_type = ScheduleItemType.objects.get(
title=sch.type)
t_ids = TextFeedbackQuestion.objects.filter(
schedule_item_type=sch_type,
conference=sch.conference, is_required=True).values_list(
'id', flat=True)
if not data.get('text'):
if t_ids:
return False, "Text Feedback is missing"
else:
submitted_t_ids = {d['id'] for d in data.get('text')}
if not _has_required_ids(master=t_ids, submitted=submitted_t_ids):
return False, "Required text questions are missing"
c_ids = ChoiceFeedbackQuestion.objects.filter(
schedule_item_type=sch_type,
conference=sch.conference, is_required=True).values_list(
'id', flat=True)
if not data.get('choices'):
if c_ids:
return False, "Choice feedback is missing"
else:
submitted_c_ids = {d['id'] for d in data.get('choices')}
if not _has_required_ids(master=c_ids, submitted=submitted_c_ids):
return False, "Choice feedback is missing"
return True, ""
except ObjectDoesNotExist as e:
print(e)
return False
def create(feedback, device_uuid):
device = Device.objects.get(uuid=device_uuid)
schedule_item_id = feedback.validated_data['schedule_item_id']
try:
with transaction.atomic():
text, choices = [], []
if feedback.validated_data.get('text'):
text = create_text_feedback(
schedule_item_id=schedule_item_id,
feedbacks=feedback.validated_data.get('text'),
device=device)
if feedback.validated_data.get('choices'):
choices = create_choice_feedback(
schedule_item_id=schedule_item_id,
feedbacks=feedback.validated_data.get('choices'),
device=device)
return {'text': text, 'choices': choices}
except (IntegrityError, ObjectDoesNotExist) as e:
print(e) # Replace with log
return False
def create_text_feedback(schedule_item_id, feedbacks, device):
text = []
for feedback in feedbacks:
obj = ScheduleItemTextFeedback.objects.create(
schedule_item_id=schedule_item_id,
question_id=feedback['id'],
text=feedback['text'], device=device)
d = {'id': obj.id, 'text': obj.text,
'question_id': feedback['id'],
'schedule_item_id': schedule_item_id}
text.append(d)
return text
def create_choice_feedback(schedule_item_id, feedbacks, device):
choices = []
for feedback in feedbacks:
value = ChoiceFeedbackQuestionValue.objects.get(
question_id=feedback['id'], id=feedback['value_id'])
obj = ScheduleItemChoiceFeedback.objects.create(
schedule_item_id=schedule_item_id, device=device,
question_id=feedback['id'], value=value.value)
d = {'id': obj.id, 'value_id': value.id,
'question_id': feedback['id'],
'schedule_item_id': schedule_item_id}
choices.append(d)
return choices
def get_feedback(schedule_item):
feedback = {'text': _get_text_feedback(schedule_item=schedule_item),
'choices': _get_choice_feedback(
schedule_item=schedule_item)}
return feedback
def _get_text_feedback(schedule_item):
questions = TextFeedbackQuestion.objects.filter(
schedule_item_type__title=schedule_item.type)
text = [{'question': question,
'values': ScheduleItemTextFeedback.objects.filter(
question=question, schedule_item=schedule_item)}
for question in questions]
return text
def _get_choice_feedback(schedule_item):
questions = ChoiceFeedbackQuestion.objects.filter(
schedule_item_type__title=schedule_item.type).select_related(
'allowed_values')
choices = []
for question in questions:
values = ScheduleItemChoiceFeedback.objects.filter(
schedule_item=schedule_item, question=question).values(
'value').annotate(Count('value'))
d = {'question': question,
'values': _get_choice_value_for_chart(question=question,
values=values)}
choices.append(d)
return choices
def _get_choice_value_for_chart(question, values):
data = []
for index, value in enumerate(values):
d = {'label': str(question.allowed_values.get(
value=value['value']).title)}
d['value'] = value['value__count']
d['color'] = COLORS[index]
data.append(d)
return data
def _get_question_oragnized_by_type(qs):
questions = defaultdict(list)
for question in qs:
questions[question.schedule_item_type.title].append(
question.to_response())
return questions
def _merge_questions(text_questions, choice_questions):
"""Merge the choice and text based questions into schedule type
{'Talk': {'text': [..], 'choice': [...]},}
"""
types = set(text_questions.keys())
types.union(list(choice_questions.keys()))
questions = {}
for item in types:
questions[item] = {'text': text_questions.get(item),
'choice': choice_questions.get(item)}
return questions
```
#### File: junction/proposals/permissions.py
```python
from django.core.exceptions import PermissionDenied
# Junction Stuff
from junction.conferences.models import ConferenceProposalReviewer
from .models import ProposalSectionReviewer
def is_proposal_author(user, proposal):
return user.is_authenticated() and proposal.author == user
def is_proposal_reviewer(user, conference):
authenticated = user.is_authenticated()
is_reviewer = ConferenceProposalReviewer.objects.filter(
reviewer=user.id, conference=conference, active=True).exists()
return authenticated and is_reviewer
def is_proposal_section_reviewer(user, conference, proposal):
return user.is_authenticated() and ProposalSectionReviewer.objects.filter(
conference_reviewer__reviewer=user,
conference_reviewer__conference=conference,
proposal_section=proposal.proposal_section,
active=True).exists()
def is_proposal_author_or_proposal_reviewer(user, conference, proposal):
reviewer = is_proposal_reviewer(user, conference)
author = is_proposal_author(user, proposal)
return reviewer or author
def is_proposal_author_or_proposal_section_reviewer(user,
conference, proposal):
return is_proposal_author(user, proposal) or \
is_proposal_section_reviewer(user, conference, proposal)
def is_proposal_author_or_permisson_denied(user, proposal):
if is_proposal_author(user, proposal):
return True
raise PermissionDenied
```
#### File: junction/proposals/services.py
```python
from __future__ import absolute_import, unicode_literals
# Standard Library
import logging
# Third Party Stuff
from django.conf import settings
from markdown2 import markdown
# Junction Stuff
from junction.base.emailer import send_email
from .models import ProposalSection, ProposalSectionReviewer
logger = logging.getLogger(__name__)
def markdown_to_html(md):
"""
Convert given markdown to html.
:param md: string
:return: string - converted html
"""
return markdown(md)
def send_mail_for_new_comment(proposal_comment, host):
proposal = proposal_comment.proposal
login_url = '{}?next={}'.format(settings.LOGIN_URL, proposal.get_absolute_url())
send_to = comment_recipients(proposal_comment)
commenter = proposal_comment.commenter
proposal_comment.comment = markdown_to_html(proposal_comment.comment)
for to in send_to:
if to == proposal_comment.commenter:
continue
send_email(to=to,
template_dir='proposals/email/comment',
context={'to': to,
'host': host,
'login_url': login_url,
'proposal': proposal,
'comment': proposal_comment,
'commenter': commenter,
'by_author': commenter == proposal.author})
def comment_recipients(proposal_comment):
proposal = proposal_comment.proposal
if proposal_comment.reviewer:
recipients = _get_proposal_section_reviewers(
proposal=proposal)
elif proposal_comment.private:
recipients = _get_proposal_section_reviewers(
proposal=proposal)
recipients.add(proposal.author)
else:
recipients = {
comment.commenter
for comment in proposal.proposalcomment_set
.all().select_related('commenter')}
recipients.add(proposal.author)
return recipients
def send_mail_for_new_proposal(proposal, host):
proposal_section = ProposalSection.objects.get(
pk=proposal.proposal_section_id)
send_to = [p.conference_reviewer.reviewer for p in
ProposalSectionReviewer.objects.filter(
proposal_section=proposal_section,
active=True)]
proposal_url = proposal.get_absolute_url()
login_url = settings.LOGIN_URL
for to in send_to:
if to == proposal.author:
continue
send_email(to=to,
template_dir='proposals/email/proposal',
context={'to': to,
'proposal': proposal,
'proposal_section': proposal_section,
'host': host,
'proposal_url': proposal_url,
'login_url': login_url})
def _get_proposal_section_reviewers(proposal):
proposal_reviewers = set(ProposalSectionReviewer.objects.filter(
proposal_section=proposal.proposal_section))
recipients = {proposal_reviewer.conference_reviewer.reviewer
for proposal_reviewer in proposal_reviewers}
return recipients
def send_mail_for_proposal_content(conference, proposal, host):
"""
Send mail to proposal author to upload content for proposal.
"""
login_url = '{}?next={}'.format(settings.LOGIN_URL, proposal.get_absolute_url())
author = proposal.author
author_name = author.get_full_name() or author.username
context = {
'host': host,
'login_url': login_url,
'conference': conference,
'proposal': proposal,
'author_name': author_name,
}
return send_email(to=author, template_dir='proposals/email/upload_content', context=context)
``` |
{
"source": "jklmnn/directory2rss",
"score": 3
} |
#### File: jklmnn/directory2rss/d2rss.py
```python
import requests
from bs4 import BeautifulSoup
from flask import Flask, request
import PyRSS2Gen
import datetime
from urllib.parse import unquote
app = Flask(__name__)
def get_entries(url, verify):
content = requests.get(url, verify=verify)
soup = BeautifulSoup(content.text, "lxml")
rows = soup.find_all("tr")
entries = []
if rows:
# Apache
for row in rows[3:-1]:
if row.find_all("a")[0]['href'].endswith("/"):
entries.extend(get_entries(url + ("" if url.endswith("/") else "/") + row.find_all("a")[0]['href'], verify))
else:
entries.append(PyRSS2Gen.RSSItem(
title = unquote(row.find_all("a")[0].text),
link = url + ("" if url.endswith("/") else "/") + row.find_all("a")[0]['href'],
guid = PyRSS2Gen.Guid(row.find_all("a")[0].text),
pubDate = datetime.datetime.strptime(row.find_all("td")[2].text.strip(), "%Y-%m-%d %H:%M")))
else:
# Nginx
rows = soup.find_all("a")
if rows[0]['href'] == "../":
rows = rows[1:]
for row in rows:
if row['href'].endswith("/"):
entries.extend(get_entries(url + ("" if url.endswith("/") else "/") + row['href'], verify))
else:
entries.append(PyRSS2Gen.RSSItem(
title = unquote(row['href']),
link = url + ("" if url.endswith("/") else "/") + row['href'],
guid = PyRSS2Gen.Guid(row.text),
pubDate = datetime.datetime.strptime(" ".join(row.next_sibling.strip(" ").split(" ")[0:2]), "%d-%b-%Y %H:%M")))
return entries
def fetch(url, verify):
return PyRSS2Gen.RSS2(
title = url,
link = url,
lastBuildDate = datetime.datetime.now(),
items = get_entries(url, verify),
description = ""
)
@app.route('/')
def get_data():
url = request.args.get("url")
noverify = request.args.get("noverify")
if url:
return fetch(url, noverify != "yes").to_xml()
return "no url specified"
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "jklopf/tensorbayes",
"score": 3
} |
#### File: tensorbayes/other/first_snips.py
```python
def sample_sigma2_b(betas, NZ, v0B, s0B):
# sample variance of betas
df = v0B+NZ
scale = (tf_squared_norm(betas)+v0B*s0B)/df
sample = rinvchisq(df, scale)
return sample
def sample_sigma2_e(N, epsilon, v0E, s0E):
# sample variance of residuals
df = v0E + N
scale = (tf_squared_norm(epsilon)+v0E*s0E)/df
sample = rinvchisq(df, scale)
return sample
def sample_pi(M, mNZ):
# sample mixture weight
sample = rbeta(mNZ+1, M-mNZ+1)
return sample
# Variables
Ebeta = np.zeros((M,1)) #Numpy array
delta = np.zeros([M,1]) #Numpy array
NZ = tf.Variable(0.)
epsilon = tf.Variable(Y)
Pi = tf.Variable(0.5)
Sigma2_e = tf.Variable(tf_squared_norm(Y) / (N*0.5))
Sigma2_b = tf.Variable(rbeta(1.0,1.0))
# Placeholders
Ebeta_ = tf.placeholder(tf.float32, shape=(M,1))
colx = tf.placeholder(tf.float32, shape=(N,1))
ind_ = tf.placeholder(tf.int32, shape=())
# Constants
# Parameterization of hyperpriors for variances
v0E = tf.constant(4.0)
v0B = tf.constant(4.0)
s0B = Sigma2_b.initialized_value() / 2
s0E = Sigma2_e.initialized_value() / 2
with tf.Session() as sess:
# Initialize variable
sess.run(tf.global_variables_initializer())
# Iterate Gibbs sampling
# scheme 'num_iter' times
for i in range(num_iter):
# Set a new random order of marker
index = np.random.permutation(M)
# Parse and process columns in random order
for marker in index:
sess.run(epsilon.assign_add(colx * Ebeta[marker]),
feed_dict={colx: x[:,marker].reshape(N,1)})
Cj = tf_squared_norm(colx) + Sigma2_e/Sigma2_b
rj = tf.tensordot(tf.transpose(colx), epsilon, 1)[0]
ratio = tf.exp(-(tf.square(rj)/(2*Cj*Sigma2_e)))\
*tf.sqrt((Sigma2_b*Cj)/Sigma2_e)
pij = Pi / (Pi + ratio*(1-Pi))
delta[marker] = sess.run(rbernoulli(pij),\
feed_dict={colx: x[:,marker].reshape(N,1)})
# Beta(j) conditionnal on delta(j)
if (delta[marker]==0): Ebeta[marker]=0
elif (delta[marker]==1):
Ebeta[marker] = sess.run(rnorm(rj/Cj,Sigma2_e/Cj),\
feed_dict={colx: x[:,marker].reshape(N,1)})
# update residuals
sess.run(epsilon.assign_sub(colx * Ebeta[marker]),\
feed_dict={colx: x[:,marker].reshape(N,1)})
# Fullpass over, sample other parameters
sess.run(NZ.assign(np.sum(delta)))
sess.run(Pi.assign(sample_pi(M,NZ)))
sess.run(Sigma2_b.assign(sample_sigma2_b(Ebeta_,NZ,v0B,s0B)),
feed_dict= {Ebeta_: Ebeta})
sess.run(Sigma2_e.assign(sample_sigma2_e(N,epsilon,v0E,s0E)))
```
#### File: tensorbayes/python/tbd.py
```python
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import time
tfd = tfp.distributions
# Start time measures
start_time = time.clock()
# Reset the graph
tf.reset_default_graph()
# Reproducibility
# Seed setting for reproducable research.
# Set numpy seed
np.random.seed(1234)
# Set graph-level seed
tf.set_random_seed(1234)
# Util functions
def tf_squared_norm(vector):
sum_of_squares = tf.reduce_sum(tf.square(vector))
return sum_of_squares
def np_squared_norm(vector):
sum_of_squares = np.sum(np.square(vector))
return sum_of_squares
# ## Distributions functions
#
# rnorm is defined using the variance (i.e sigma^2)
def rnorm(mean, var):
sd = tf.sqrt(var)
dist = tfd.Normal(loc= mean, scale= sd)
return dist.sample()
def rbeta(alpha, beta):
dist = tfd.Beta(alpha, beta)
return dist.sample()
def rinvchisq(df, scale): # scale factor = tau^2
dist = tfd.Chi2(df)
return (df * scale)/dist.sample()
def rbernoulli(p):
dist = tfd.Bernoulli(probs=p)
return dist.sample()
# Sampling functions
#
# sample mean
def sample_mu(N, Sigma2_e, Y, X, beta):
mean = tf.reduce_sum(tf.subtract(Y, tf.matmul(X, beta)))/N
var = Sigma2_e/N
return rnorm(mean, var)
# sample variance of beta
def sample_sigma2_b( beta, NZ, v0B, s0B):
df = v0B+NZ
scale = (tf_squared_norm(beta)+v0B*s0B) / df
return rinvchisq(df, scale)
# sample error variance of Y
def sample_sigma2_e( N, epsilon, v0E, s0E):
df = v0E + N
scale = (tf_squared_norm(epsilon) + v0E*s0E) / df
return rinvchisq(df, scale)
# sample mixture weight
def sample_w( M, NZ):
w=rbeta(1+NZ,1+M-NZ)
return w
## Simulate data
# Var(g) = 0.7
# Var(Beta_true) = Var(g) / M
# Var(error) = 1 - Var(g)
def build_toy_dataset(N, M, var_g):
sigma_b = np.sqrt(var_g/M)
sigma_e = np.sqrt(1 - var_g)
beta_true = np.random.normal(0, sigma_b , M)
x = sigma_b * np.random.randn(N, M)
y = np.dot(x, beta_true) + np.random.normal(0, sigma_e, N)
return x, y, beta_true
# Simulated data parameters
N = 100 # number of data points
M = 10 # number of features
var_g = 0.7 # M * var(Beta_true)
x, y, beta_true = build_toy_dataset(N, M, var_g)
X = tf.constant(x, shape=[N,M], dtype=tf.float32)
Y = tf.constant(y, shape = [N,1], dtype=tf.float32)
# Could be implemented:
# building datasets using TF API without numpy
# # Alternative simulated data
# beta_true = tf.constant(0.25, shape=[M,1], dtype=tf.float32)
# x = np.random.randn(N,M)
# X = tf.constant(x, dtype = tf.float32)
# Y = tf.matmul(X, beta_true) + (tf.random_normal([N,1]) * 0.375)
# Precomputations
sm = np.zeros(M)
for i in range(M):
sm[i] = np_squared_norm(x[:,i])
# Parameters setup
#
# Distinction between constant and variables
# Variables: values might change between evaluation of the graph
# (if something changes within the graph, it should be a variable)
Emu = tf.Variable(0., dtype=tf.float32)
Ebeta = tf.Variable(tf.zeros([M,1]), dtype=tf.float32)
Ew = tf.Variable(0.)
epsilon = tf.Variable(Y)
NZ = tf.Variable(0.)
Sigma2_e = tf.Variable(tf_squared_norm(Y) / (N*0.5))
Sigma2_b = tf.Variable(rbeta(1.0,1.0))
vEmu = tf.ones([N,1])
colx = tf.placeholder(tf.float32, shape=(N,1))
# Alternatives parameterization of hyperpriors for variances
v0E = tf.constant(0.001)
v0B = tf.constant(0.001)
s0B = Sigma2_b.initialized_value() / 2
s0E = Sigma2_e.initialized_value() / 2
print_dict = {'Emu': Emu, 'Ew': Ew,
'NZ': NZ, 'Sigma2_e': Sigma2_e,
'Sigma2_b': Sigma2_b}
# Tensorboard graph
#writer = tf.summary.FileWriter('.')
#writer.add_graph(tf.get_default_graph())
# updates ops
# Emu_up = Emu.assign(sample_mu(N, Sigma2_e, Y, X, Ebeta_))
#sess.run(Cj.assign(tf.reduce_sum(tf.pow(X[:,marker],2)) + Sigma2_e/Sigma2_b)) #adjusted variance
#sess.run(rj.assign(tf.matmul(tf.reshape(X[:,marker], [1,N]),epsilon)[0][0])) # mean, tensordot instead of matmul ?
#sess.run(ratio.assign(tf.exp(-(tf.pow(rj,2))/(2*Cj*Sigma2_e))*tf.sqrt((Sigma2_b*Cj)/Sigma2_e)))
#sess.run(pij.assign(Ew/(Ew+ratio*(1-Ew))))
def cond_true():
return rnorm(rj/Cj,Sigma2_e/Cj)
def cond_false():
return 0.
# Number of Gibbs sampling iterations
num_iter = 30
with tf.Session() as sess:
# Initialize variable
sess.run(tf.global_variables_initializer())
# Begin Gibbs iterations
for i in range(num_iter):
time_in = time.clock()
# Print progress
print("Gibbs sampling iteration: ", i)
# Assign a random order of marker
index = np.random.permutation(M)
# Sample mu
sess.run(Emu.assign(sample_mu(N, Sigma2_e, Y, X, Ebeta))) # matmul here
# Reset NZ parameter
sess.run(NZ.assign(0.))
# Compute beta for each marker
#print("Current marker:", end=" ")
print("Current marker:")
for marker in index:
print(marker, end=" ", flush=True)
feed = x[:,marker].reshape(N,1)
sess.run(epsilon.assign_add(colx * Ebeta[marker]),
feed_dict={colx: feed})
#TODO have the assign op elsewhere and write below tf control dep
# of that assignment
Cj = tf_squared_norm(colx) + Sigma2_e/Sigma2_b
rj = tf.tensordot(tf.transpose(colx), epsilon, 1)[0]
ratio = tf.exp( - ( tf.square(rj) / ( 2*Cj*Sigma2_e ))) * tf.sqrt((Sigma2_b*Cj)/Sigma2_e)
pij = Ew / (Ew + ratio*(1-Ew))
# TODO: replace with tf.cond
sess.run(Ebeta[marker,0].assign(tf.cond(tf.not_equal(rbernoulli(pij)[0],0),cond_true, cond_false)),
feed_dict={colx: feed})
sess.run(tf.cond(tf.not_equal(Ebeta[marker,0], 0.),lambda: NZ.assign_add(1.), lambda: NZ.assign_add(0.)))
sess.run(epsilon.assign_sub(colx * Ebeta[marker,0]), feed_dict={colx: feed})
#for i in range(len(Ebeta)):
# print(Ebeta[i], "\t", ny[i])
#sess.run(NZ.assign(np.sum(ny)))
sess.run(Ew.assign(sample_w(M,NZ)))
#sess.run(Ebeta_.assign(Ebeta))
sess.run(epsilon.assign(Y-tf.matmul(X,Ebeta)-vEmu*Emu))
sess.run(Sigma2_b.assign(sample_sigma2_b(Ebeta,NZ,v0B,s0B)))
sess.run(Sigma2_e.assign(sample_sigma2_e(N,epsilon,v0E,s0E)))
# Print operations
print("\n")
print(sess.run(print_dict))
print(" ")
time_out = time.clock()
print('Time for the ', i, 'th iteration: ', time_out - time_in, ' seconds')
print(" ")
# ## Print results
print("Ebeta" + '\t'+ ' beta_true')
for i in range(M):
print(Ebeta[i], "\t", beta_true[i])
# ## Printe time
print('Time elapsed: ')
print(time.clock() - start_time, "seconds")
```
#### File: tensorbayes/python/TensorBayes_v3.3.py
```python
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import time
tfd = tfp.distributions
'''
This version is able to retrieve the simulated parameters and
store the history of the sampling, as NumPyBayes_v2.py does.
The next version will implement the tensorflow dataset API instead of
placeholders to feed data, and will be called:
- TensorBayes_v3.3.py
'''
# Start time measures
start_time = time.clock()
# Reset the graph
tf.reset_default_graph()
# Reproducibility
# Seed setting for reproducible research.
#
# Set NumPy seed
np.random.seed(1234)
# Set graph-level seed
tf.set_random_seed(1234)
# Util functions
def tf_squared_norm(vector):
sum_of_squares = tf.reduce_sum(tf.square(vector))
return sum_of_squares
def np_squared_norm(vector):
sum_of_squares = np.sum(np.square(vector))
return sum_of_squares
# ## Distributions functions
#
# rnorm is defined using the variance (i.e sigma^2)
def rnorm(mean, var):
sd = tf.sqrt(var)
dist = tfd.Normal(loc= mean, scale= sd)
sample = dist.sample()
return sample
def rbeta(a, b):
dist = tfd.Beta(a, b)
sample = dist.sample()
return sample
def rinvchisq(df, scale): # scale factor = tau^2
dist = tfd.Chi2(df)
sample = (df * scale)/dist.sample()
return sample
def rbernoulli(p):
dist = tfd.Bernoulli(probs=p)
sample = dist.sample()
return sample
# Sampling functions
#
# sample mean
def sample_mu(N, Sigma2_e, Y, X, betas):
mean = tf.reduce_sum(tf.subtract(Y, tf.matmul(X, betas)))/N
var = Sigma2_e/N
sample = rnorm(mean,var)
return sample
# sample variance of beta
def sample_sigma2_b(betas, NZ, v0B, s0B):
df = v0B+NZ
scale = (tf_squared_norm(betas)+v0B*s0B) / df
sample = rinvchisq(df, scale)
return sample
# sample error variance of Y
def sample_sigma2_e(N, epsilon, v0E, s0E):
df = v0E + N
scale = (tf_squared_norm(epsilon) + v0E*s0E) / df
sample = rinvchisq(df, scale)
return sample
# sample mixture weight
def sample_w(M, NZ):
sample = rbeta(NZ+1, M-NZ+1)
return sample
# sample a beta
def sample_beta(x_j, eps, s2e, s2b, w, beta_old):
eps = eps + (x_j*beta_old)
Cj = tf_squared_norm(x_j) + s2e/s2b
rj = tf.tensordot(tf.transpose(x_j), eps, 1)[0,0]
ratio = tf.exp( - ( tf.square(rj) / ( 2*Cj*s2e ))) * tf.sqrt((s2b*Cj)/s2e)
pij = w / (w + ratio*(1-w))
toss = rbernoulli(pij)
def case_zero():
return 0., 0. # could return a list [beta,ny]
def case_one():
return rnorm(rj/Cj, s2e/Cj), 1. # could return a list [beta,ny]
beta_new, ny_new = tf.cond(tf.equal(toss,1),case_one, case_zero)
#beta_new, incl = tf.case([(tf.equal(toss, 0), case_zero)], default=case_one)
# maybe use tf.cond since we only got 1 pair ?
# do we handle ny/nz here ?
eps = eps - (x_j*beta_new)
return beta_new, ny_new, eps # could return a list [beta,ny]
## Simulate data
def build_toy_dataset(N, M, var_g):
sigma_b = np.sqrt(var_g/M)
sigma_e = np.sqrt(1 - var_g)
beta_true = np.random.normal(0, sigma_b , M)
x = sigma_b * np.random.randn(N, M)
y = np.dot(x, beta_true) + np.random.normal(0, sigma_e, N)
return x, y, beta_true.reshape(M,1)
# Simulated data parameters
N = 5000 # number of data points
M = 10 # number of features
var_g = 0.7 # M * var(Beta_true)
# Var(Beta_true) = Var(g) / M
# Var(error) = 1 - Var(g)
x, y, beta_true = build_toy_dataset(N, M, var_g)
X = tf.constant(x, shape=[N,M], dtype=tf.float32)
Y = tf.constant(y, shape=[N,1], dtype=tf.float32)
# Could be implemented:
# building datasets using TF API without numpy
# # Precomputations - not practicable with huge datasets
# sm = np.zeros(M)
# for i in range(M):
# sm[i] = np_squared_norm(x[:,i])
'''
TODO: Actually implement all the algorithm optimizations of the reference article
which are not implemented here. Depends on later implementations of input pipeline.
'''
# Parameters setup
#
# Distinction between constant and variables
# Variables: values might change between evaluation of the graph
# (if something changes within the graph, it should be a variable)
# Variables:
Emu = tf.Variable(0., dtype=tf.float32)
Ebeta = tf.Variable(tf.zeros([M,1], dtype=tf.float32), dtype=tf.float32)
Ny = tf.Variable(tf.zeros(M, dtype=tf.float32), dtype=tf.float32)
NZ = tf.Variable(0., dtype=tf.float32)
Ew = tf.Variable(0., dtype=tf.float32)
epsilon = tf.Variable(Y, dtype=tf.float32)
Sigma2_e = tf.Variable(tf_squared_norm(Y) / (N*0.5), dtype=tf.float32)
Sigma2_b = tf.Variable(rbeta(1., 1.), dtype=tf.float32)
# Constants:
vEmu = tf.ones([N,1], dtype=tf.float32)
v0E = tf.constant(0.001, dtype=tf.float32)
v0B = tf.constant(0.001, dtype=tf.float32)
s0B = Sigma2_b.initialized_value() / 2
s0E = Sigma2_e.initialized_value() / 2
# Placeholders:
Xj = tf.placeholder(tf.float32, shape=(N,1))
ind = tf.placeholder(tf.int32, shape=())
# Print stuff:
# TODO: construct the op with tf.print() so that values get automatically printed
print_dict = {'Emu': Emu, 'Ew': Ew,
'NZ': NZ, 'Sigma2_e': Sigma2_e,
'Sigma2_b': Sigma2_b}
# Tensorboard graph
# TODO: look up what TensorBoard can do, this can be used in the end to have a graph representation of the algorithm.
# Also, for graph clarity, operations should be named.
#writer = tf.summary.FileWriter('.')
#writer.add_graph(tf.get_default_graph())
# Computations
ta_beta, ta_ny, ta_eps = sample_beta(Xj, epsilon, Sigma2_e, Sigma2_b, Ew, Ebeta[ind,0]) # Ebeta[ind] might be replaced by using dictionnaries key/value instead.
ta_epsilon = Y - tf.matmul(X,Ebeta) - vEmu*Emu
ta_s2b = sample_sigma2_b(Ebeta,NZ,v0B,s0B)
ta_s2e = sample_sigma2_e(N,epsilon,v0E,s0E)
ta_nz = tf.reduce_sum(Ny)
# Assignment ops
# As we don't chain assignment operations, assignment does not require to return the evaluation of the new value
# therefore, all read_value are set to False. No idea if this changes anything.
emu_up = Emu.assign(sample_mu(N, Sigma2_e, Y, X, Ebeta), read_value=False)
beta_item_assign_op = Ebeta[ind,0].assign(ta_beta) # when doing item assignment, read_value becomes an unexpected parameter,
ny_item_assign_op = Ny[ind].assign(ta_ny) # as tensorflow doesn't know what to return the single item or the whole variable
nz_up = NZ.assign(ta_nz, read_value=False)
eps_up_fl = epsilon.assign(ta_eps, read_value=False)
eps_up = epsilon.assign(ta_epsilon, read_value=False)
ew_up = Ew.assign(sample_w(M,NZ), read_value=False)
s2b_up = Sigma2_b.assign(ta_s2b, read_value=False)
s2e_up = Sigma2_e.assign(ta_s2e, read_value=False)
up_grp = tf.group(beta_item_assign_op, ny_item_assign_op, eps_up)
# Run with `read_value = True`: 63.4s
# Run with `read_value = False`: 62.2s
# Log definition:
param_log = [] # order: Sigma2_e, Sigma2_b
beta_log = [] # as rank 1 vector
# Number of Gibbs sampling iterations
num_iter = 5000
burned_samples_threshold = 2000
with tf.Session() as sess:
# Initialize variable
sess.run(tf.global_variables_initializer())
# Gibbs sampler iterations
print('\n', "Beginning of sampling: each dot = 500 iterations", '\n')
for i in range(num_iter): # TODO: replace with tf.while ?
if(i%500 == 0): print(".",end='', flush=True)
#sess.run(emu_up)
#sess.run(ny_reset)
index = np.random.permutation(M)
for marker in index:
current_col = x[:,[marker]]
feed = {ind: marker, Xj: current_col}
sess.run(up_grp, feed_dict=feed)
sess.run(nz_up)
sess.run(ew_up)
#sess.run(eps_up)
sess.run(s2b_up)
sess.run(s2e_up)
# Print operations
# print(sess.run(print_dict))
# Logs
if(i > burned_samples_threshold):
param_log.append(sess.run([Sigma2_e, Sigma2_b]))
beta_log.append(np.array(sess.run(Ebeta)).reshape(M))
print("\n")
print("End of sampling" + '\n')
# Time elapsed
total_time = np.round(time.clock() - start_time, 5)
print("Time elapsed: " + str(total_time) + "s" + '\n')
# Results
param_log = np.array(param_log)
mean_Sigma2_e = np.round(np.mean(param_log[:,0]),5)
mean_Sigma2_b = np.round(np.mean(param_log[:,1]),5)
mean_betas = np.round(np.mean(beta_log, axis=0),5).reshape([M,1])
# Results printing
print("Parameters: " + '\n')
#print(" ")
print("Mean Sigma2_e:", mean_Sigma2_e,'\t', "Expected Sigma2_e:", 1-var_g)
print("Mean Sigma2_b:", mean_Sigma2_b,'\t', "Expected Sigma2_b:", var_g / M, "\n")
print("Coefficients:" + '\n')
print("Computed" + '\t' + '\t' + "Expected" )
for i in range(M):
print(mean_betas[i,0], '\t', '\t', beta_true[i,0] )
``` |
{
"source": "jkloth/pyxml",
"score": 4
} |
#### File: demo/dom/dom_from_html_file.py
```python
from xml.dom.ext.reader import HtmlLib
from xml.dom import ext
def read_html_from_file(fileName):
#build a DOM tree from the file
reader = HtmlLib.Reader()
dom_object = reader.fromUri(fileName)
#strip any ignorable white-space in preparation for pretty-printing
ext.StripHtml(dom_object)
#pretty-print the node
ext.PrettyPrint(dom_object)
#reclaim the object
reader.releaseNode(dom_object);
if __name__ == '__main__':
import sys
read_html_from_file(sys.argv[1])
```
#### File: demo/dom/xptr.py
```python
import re,string,sys
from xml.dom import Node
from xml.dom import ext
# Spec deviations:
# - html keyword not supported
# - negative instance numbers not supported
# - #cdata node type selector not supported
# - * for attribute values/names not supported
# - preceding keyword not supported
# - span keyword unsupported
# - support 'string' location terms
# Spec questions
# - what if locator fails?
# - what to do with "span(...).child(1)"?
# - how to continue from a set of selected nodes?
# - attr: error if does not use element as source?
# - should distinguish between semantic errors and failures?
# - can string terms locate inside attr vals?
# - are the string loc semantics a bit extreme? perhaps restrict to one node?
# - how to represent span and string results in terms of the DOM?
# Global variables
version="0.20"
specver="WD-xptr-19980303"
# Useful regular expressions
reg_sym=re.compile("[a-z]+|\\(|\\)|\\.|[-+]?[1-9][0-9]*|[A-Za-z_:][\-A-Za-z_:.0-9]*|,|#[a-z]+|\\*|\"[^\"]*\"|'[^']*'")
reg_sym_param=re.compile(",|\)|\"|'")
reg_name=re.compile("[A-Za-z_:][\-A-Za-z_:.0-9]*")
# Some exceptions
class XPointerException(Exception):
"Means something went wrong when attempting to follow an XPointer."
pass
class XPointerParseException(XPointerException):
"Means the XPointer was syntactically invalid."
def __init__(self,msg,pos):
self.__msg=msg
self.__pos=pos
def get_pos(self):
return self.__pos
def __str__(self):
return self.__msg % self.__pos
class XPointerFailedException(XPointerException):
"Means the XPointer was logically invalid."
pass
class XPointerUnsupportedException(XPointerException):
"Means the XPointer used unsupported constructs."
pass
# Simple XPointer lexical analyzer
class SymbolGenerator:
"Chops XPointers up into distinct symbols."
def __init__(self,xpointer):
self.__data=xpointer
self.__pos=0
self.__last_was_param=0
self.__next_is=""
def get_pos(self):
"Returns the current position in the string."
return self.__pos
def more_symbols(self):
"True if there are more symbols in the XPointer."
return self.__pos<len(self.__data) or self.__next_is!=""
def next_symbol(self):
"Returns the next XPointer symbol."
if self.__next_is!="":
tmp=self.__next_is
self.__next_is=""
return tmp
if self.__last_was_param:
self.__last_was_param=0
sym=""
count=0
while self.more_symbols():
n=self.next_symbol()
if n=='"' or n=="'":
pos=string.find(self.__data,n,self.__pos)
if pos==-1:
raise XPointerParseException("Unmatched %s at %d" % \
n,self.__pos)
sym=self.__data[self.__pos-1:pos+1]
self.__pos=pos+1
elif n=="(":
count=count+1
elif n==")":
count=count-1
if count<0:
if sym=="":
return ")"
else:
self.__next_is=")"
return sym
elif n=="," and count==0:
self.__last_was_param=1
self.__next_is=","
return sym
sym=sym+n
mo=reg_sym.match(self.__data,self.__pos)
if mo==None:
raise XPointerParseException("Invalid symbol at position %d",
self.__pos)
self.__pos=self.__pos+len(mo.group(0))
self.__last_was_param= mo.group(0)=="("
return mo.group(0)
# Simple XPointer parser
class XPointerParser:
"""Simple XPointer parser that parses XPointers firing events that receive
terms and parameters."""
def __init__(self,xpointer):
self.__sgen=SymbolGenerator(xpointer)
self.__first_term=1
self.__prev=None
def __skip_over(self,symbol):
if self.__sgen.next_symbol()!=symbol:
raise XPointerParseException("Expected '"+symbol+"' at %s",
self.__sgen.get_pos())
def __is_valid(self,symbol,regexp):
mo=regexp.match(symbol)
return mo!=None and len(mo.group(0))==len(symbol)
def __parse_instance_or_all(self,iora):
if iora!="all":
try:
return int(iora)
except ValueError,e:
raise XPointerParseException("Expected number or 'all' at %s",
self.__sgen.get_pos())
else:
return "all"
def parse(self):
"Runs through the entire XPointer, firing events."
sym="."
while sym==".":
name=self.__sgen.next_symbol()
if name=="(":
name="" # Names can be defaulted
else:
self.__skip_over("(")
sym=self.__sgen.next_symbol()
if sym!=")":
params=[sym]
sym=self.__sgen.next_symbol()
else:
params=[]
while sym==",":
params.append(self.__sgen.next_symbol())
sym=self.__sgen.next_symbol()
if sym!=")":
raise XPointerParseException("Expected ')' at %s",
self.__sgen.get_pos())
self.dispatch_term(name,params)
if self.__sgen.more_symbols():
sym=self.__sgen.next_symbol()
else:
return
# If the XPointer ends correctly, we'll return from the if above
raise XPointerParseException("Expected '.' at %s",
self.__sgen.get_pos())
def dispatch_term(self,name,params):
"""Called when a term is encountered to analyze it and fire more
detailed events."""
if self.__first_term:
if name=="root" or name=="origin" or name=="id" or name=="html":
if name=="root" or name=="origin":
if len(params)!=0:
raise XPointerParseException(name+" terms have no "
"parameters (at %s)",
self.__sgen.get_pos())
else:
param=None
elif name=="id" or name=="html":
if len(params)!=1:
raise XPointerParseException(name+" terms require one "
"parameter (at %s)",
self.__sgen.get_pos())
else:
param=params[0]
# XXX Validate parameter
self.__first_term=0
self.handle_abs_term(name,param)
return
else:
self.handle_abs_term("root",None)
else:
if name=="" and self.__prev!=None:
name=self.__prev
if name=="child" or name=="ancestor" or name=="psibling" or \
name=="fsibling" or name=="descendant" or name=="following" or \
name=="preceding":
self.parse_rel_term(name,params)
elif name=="span":
self.parse_span_term(params)
elif name=="attr":
self.parse_attr_term(params)
elif name=="string":
self.parse_string_term(params)
else:
raise XPointerParseException("Illegal term type "+name+\
" at %s",self.__sgen.get_pos())
self.__prev=name
def parse_rel_term(self,name,params):
"Parses the arguments of relative location terms and fires the event."
no=self.__parse_instance_or_all(params[0])
if len(params)>1:
type=params[1]
if not (type=="#element" or type=="#pi" or type=="#comment" or \
type=="#text" or type=="#cdata" or type=="#all" or \
self.__is_valid(type,reg_name)):
raise XPointerParseException("Invalid type at %s",
self.__sgen.get_pos())
else:
type="#element"
attrs=[]
ix=2
while ix+1<len(params):
if not self.__is_valid(params[ix],reg_name):
raise XPointerParseException("Not a valid name at %s",
self.__sgen.get_pos())
attrs.append((params[ix],params[ix+1]))
ix=ix+2
self.handle_rel_term(name,no,type,attrs)
def parse_span_term(self,params):
"Parses the arguments of the span term and fires the event."
raise XPointerUnsupportedException("'span' keyword unsupported.")
def parse_attr_term(self,params):
"Parses the argument of the attr term and fires the event."
if len(params)!=1:
raise XPointerParseException("'attr' location terms must have "
"exactly one parameter (at %s)",
self.__sgen.get_pos())
if not self.__is_valid(params[0],reg_name):
raise XPointerParseException("'%s' is not a valid attribute "
"name at %s" % name,
self.__sgen.get_pos())
self.handle_attr_term(params[0])
def parse_string_term(self,params):
"Parses the argument of the string term and fires the event."
no=self.__parse_instance_or_all(params[0])
if len(params)>1:
skiplit=params[1]
else:
skiplit=None
if len(params)>2:
if params[2]=="end":
pos="end"
else:
try:
pos=int(params[2])
except ValueError,e:
raise XPointerParseException("Expected number at %s",
self.__sgen.get_pos())
if pos==0:
raise XPointerParseException("0 is not an acceptable "
"value at %s",
self.__sgen.get_pos())
else:
pos=None
if len(params)>3:
try:
length=int(params[3])
except ValueError,e:
raise XPointerParseException("Expected number at %s",
self.__sgen.get_pos())
else:
length=0
self.handle_string_term(no,skiplit,pos,length)
# Event methods to be overridden
def handle_abs_term(self,name,param):
"Called to handle absolute location terms."
pass
def handle_rel_term(self,name,no,type,attrs):
"Called to handle relative location terms."
pass
def handle_attr_term(self,attr_name):
"Called to handle 'attr' location terms."
pass
def handle_span_term(self,frm,to):
"Called to handle 'span' location terms."
pass
def handle_string_term(self,no,skiplit,pos,length):
"Called to handle 'string' location terms."
pass
# ----- XPointer implementation that navigates a DOM tree
# Iterator classes
class DescendantIterator:
def __init__(self):
self.stack=[]
def __call__(self,node):
next=node.firstChild
if next==None:
next=node.nextSibling
while next==None:
if self.stack==[]:
raise XPointerFailedException("No matching node")
next=self.stack[-1].nextSibling
del self.stack[-1]
self.stack.append(next)
return next
class FollowingIterator:
def __init__(self):
self.seen_hash={}
self.skip_child=0
def __call__(self,node):
if not self.skip_child:
next=node.firstChild
else:
self.skip_child=0
next=None
if next==None:
next=node.getNextSibling()
if next==None:
next=node.parentNode
self.skip_child=1 # Don't go down, we've been there :-)
if next.GI=="#DOCUMENT":
raise XPointerFailedException("No matching node")
if self.seen_hash.has_key(next.id()):
next=node.nextSibling
prev=node
while next==None:
next=prev.parentNode
self.skip_child=1 # Don't go down, we've been there :-)
prev=next
if next.nodeName=="#DOCUMENT":
raise XPointerFailedException("No matching node")
if self.seen_hash.has_key(next.id()):
next=prev.nextSibling
if next!=None:
self.skip_child=0
else:
# We're above all the nodes we've looked at. Throw out the
# hashed objects.
self.seen_hash.clear()
self.seen_hash[next.id()]=1
return next
# The implementation itself
class XDOMLocator(XPointerParser):
def __init__(self, xpointer, document):
XPointerParser.__init__(self, xpointer)
self.__node=document
self.__first=1
self.__prev=None
def __node_matches(self,node,type,attrs):
"Checks whether a DOM node matches a foo(2,SECTION,ID,I5) selector."
if type==node.nodeName or \
(type=="#element" and node.nodeType == Node.ELEMENT_NODE) or \
(type=="#pi" and node.nodeType == Node.PROCESSING_INSTRUCTION_NODE) or \
(type=="#comment" and node.nodeType == Node.COMMENT_NODE) or \
(type=="#text" and node.nodeType == Node.TEXT_NODE) or \
(type=="#cdata" and node.nodeType == Node.CDATA_SECTION_NODE) or \
type=="#all":
if attrs!=None:
for (a,v) in attrs:
try:
if v!=node.getAttribute(a):
return 0
except KeyError,e:
return 0
return 1
else:
return 0
def __get_node(self,no,type,attrs,iterator):
"""General method that iterates through the tree calling the iterator
on the current node for each step to get the next node."""
count=0
current=iterator(self.__node)
while current!=None:
if self.__node_matches(current,type,attrs):
count=count+1
if count==no:
return current
current=iterator(current)
raise XPointerFailedException("No matching node")
def __get_child(self,no,type,attrs):
if type==None:
candidates = self.__node.childNodes
else:
candidates = []
for obj in self.__node.childNodes:
if self.__node_matches(obj,type,attrs):
candidates.append(obj)
try:
return candidates[no-1]
except IndexError,e:
raise XPointerFailedException("No matching node")
def get_node(self):
"Returns the located node."
return self.__node
def handle_abs_term(self,name,param):
"Called to handle absolute location terms."
if name=="root":
if self.__node.nodeType != Node.DOCUMENT_NODE:
raise XPointerFailedException("Expected document node")
self.__node=self.__node.documentElement
elif name=="origin":
pass # Just work from current node
elif name=="id":
self.__node=ext.GetElementById(self.__node, param)
elif name=="html":
raise XPointerUnsupportedException("Term type 'html' unsupported.")
def handle_rel_term(self,name,no,type,attrs):
"Called to handle relative location terms."
if name=="child":
next=self.__get_child(no,type,attrs)
elif name=="ancestor":
next=self.__get_node(no,type,attrs,DOM.Node._get_parentNode)
elif name=="psibling":
next=self.__get_node(no,type,attrs,DOM.Node._get_previousSibling)
elif name=="fsibling":
next=self.__get_node(no,type,attrs,DOM.Node._get_nextSibling)
elif name=="descendant":
next=self.__get_node(no,type,attrs,DescendantIterator())
elif name=="following":
next=self.__get_node(no,type,attrs,FollowingIterator())
self.__node=next
self.__prev=name
def handle_attr_term(self, attr_name):
if __node.nodeType != Node.ELEMENT_NODE:
raise XPointerFailedException("'attr' location term used from "
"non-element node")
if not self.__node.attributes.has_key(attr_name):
raise XPointerFailedException("Non-existent attribute '%s' located"
" by 'attr' term" % attr_name)
self.__node=self.__node.attributes.getNamedItem(attr_name)
def handle_string_term(self,no,skiplit,pos,length):
raise XPointerUnsupportedException("'string' location terms not "
"supported")
def LocateNode(node, xpointer):
try:
xp=XDOMLocator(xpointer, node)
xp.parse()
return xp.get_node()
except XPointerParseException,e:
print "ERROR: "+str(e)
```
#### File: demo/quotes/qtfmt.py
```python
__doc__ = """Usage: qtfmt.py [options] file1.xml file2.xml ...
If no filenames are provided, standard input will be read.
Available options:
-f or --fortune Produce output for the fortune(1) program
-h or --html Produce HTML output
-t or --text Produce plain text output
-m N or --max N Suppress quotations longer than N lines;
defaults to 0, which suppresses no quotations at all.
"""
import string, re, cgi, types
import codecs
from xml.sax import saxlib, saxexts
def simplify(t, indent="", width=79):
"""Strip out redundant spaces, and insert newlines to
wrap the text at the given width."""
t = string.strip(t)
t = re.sub('\s+', " ", t)
if t=="": return t
t = indent + t
t2 = ""
while len(t) > width:
index = string.rfind(t, ' ', 0, width)
if index == -1: t2 = t2 + t[:width] ; t = t[width:]
else: t2 = t2 + t[:index] ; t = t[index+1:]
t2 = t2 + '\n'
return t2 + t
class Quotation:
"""Encapsulates a single quotation.
Attributes:
stack -- used during construction and then deleted
text -- A list of Text() instances, or subclasses of Text(),
containing the text of the quotation.
source -- A list of Text() instances, or subclasses of Text(),
containing the source of the quotation. (Optional)
author -- A list of Text() instances, or subclasses of Text(),
containing the author of the quotation. (Optional)
Methods:
as_fortune() -- return the quotation formatted for fortune
as_html() -- return an HTML version of the quotation
as_text() -- return a plain text version of the quotation
"""
def __init__(self):
self.stack = [ Text() ]
self.text = []
def as_text(self):
"Convert instance into a pure text form"
output = ""
def flatten(textobj):
"Flatten a list of subclasses of Text into a list of paragraphs"
if type(textobj) != types.ListType: textlist=[textobj]
else: textlist = textobj
paragraph = "" ; paralist = []
for t in textlist:
if (isinstance(t, PreformattedText) or
isinstance(t, CodeFormattedText) ):
paralist.append(paragraph)
paragraph = ""
paralist.append(t)
elif isinstance(t, Break):
paragraph = paragraph + t.as_text()
paralist.append(paragraph)
paragraph = ""
else:
paragraph = paragraph + t.as_text()
paralist.append(paragraph)
return paralist
# Flatten the list of instances into a list of paragraphs
paralist = flatten(self.text)
if len(paralist) > 1:
indent = 2*" "
else:
indent = ""
for para in paralist:
if isinstance(para, PreformattedText) or isinstance(para, CodeFormattedText):
output = output + para.as_text()
else:
output = output + simplify(para, indent) + '\n'
attr = ""
for i in ['author', 'source']:
if hasattr(self, i):
paralist = flatten(getattr(self, i))
text = string.join(paralist)
if attr:
attr = attr + ', '
text = string.lower(text[:1]) + text[1:]
attr = attr + text
attr=simplify(attr, width = 79 - 4 - 3)
if attr: output = output + ' -- '+re.sub('\n', '\n ', attr)
return output + '\n'
def as_fortune(self):
return self.as_text() + '%'
def as_html(self):
output = "<P>"
def flatten(textobj):
if type(textobj) != types.ListType: textlist = [textobj]
else: textlist = textobj
paragraph = "" ; paralist = []
for t in textlist:
paragraph = paragraph + t.as_html()
if isinstance(t, Break):
paralist.append(paragraph)
paragraph = ""
paralist.append(paragraph)
return paralist
paralist = flatten(self.text)
for para in paralist: output = output + string.strip(para) + '\n'
attr = ""
for i in ['author', 'source']:
if hasattr(self, i):
paralist = flatten(getattr(self, i))
text = string.join(paralist)
attr=attr + ('<P CLASS=%s>' % i) + string.strip(text)
return output + attr
# Text and its subclasses are used to hold chunks of text; instances
# know how to display themselves as plain text or as HTML.
class Text:
"Plain text"
def __init__(self, text=""):
self.text = text
# We need to allow adding a string to Text instances.
def __add__(self, val):
newtext = self.text + str(val)
# __class__ must be used so subclasses create instances of themselves.
return self.__class__(newtext)
def __str__(self): return self.text
def __repr__(self):
s = string.strip(self.text)
if len(s) > 15: s = s[0:15] + '...'
return '<%s: "%s">' % (self.__class__.__name__, s)
def as_text(self): return self.text
def as_html(self): return cgi.escape(self.text)
class PreformattedText(Text):
"Text inside <pre>...</pre>"
def as_text(self):
return str(self.text)
def as_html(self):
return '<pre>' + cgi.escape(str(self.text)) + '</pre>'
class CodeFormattedText(Text):
"Text inside <code>...</code>"
def as_text(self):
return str(self.text)
def as_html(self):
return '<code>' + cgi.escape(str(self.text)) + '</code>'
class CitedText(Text):
"Text inside <cite>...</cite>"
def as_text(self):
return '_' + simplify(str(self.text)) + '_'
def as_html(self):
return '<cite>' + string.strip(cgi.escape(str(self.text))) + '</cite>'
class ForeignText(Text):
"Foreign words, from Latin or French or whatever."
def as_text(self):
return '_' + simplify(str(self.text)) + '_'
def as_html(self):
return '<i>' + string.strip(cgi.escape(str(self.text))) + '</i>'
class EmphasizedText(Text):
"Text inside <em>...</em>"
def as_text(self):
return '*' + simplify(str(self.text)) + '*'
def as_html(self):
return '<em>' + string.strip(cgi.escape(str(self.text))) + '</em>'
class Break(Text):
def as_text(self): return ""
def as_html(self): return "<P>"
# The QuotationDocHandler class is a SAX handler class that will
# convert a marked-up document using the quotations DTD into a list of
# quotation objects.
class QuotationDocHandler(saxlib.HandlerBase):
def __init__(self, process_func):
self.process_func = process_func
self.newqt = None
# Errors should be signaled, so we'll output a message and raise
# the exception to stop processing
def fatalError(self, exception):
sys.stderr.write('ERROR: '+ str(exception)+'\n')
sys.exit(1)
error = fatalError
warning = fatalError
def characters(self, ch, start, length):
if self.newqt != None:
s = ch[start:start+length]
# Undo the UTF-8 encoding, converting to ISO Latin1, which
# is the default character set used for HTML.
latin1_encode = codecs.lookup('iso-8859-1') [0]
unicode_str = s
s, consumed = latin1_encode( unicode_str )
assert consumed == len( unicode_str )
self.newqt.stack[-1] = self.newqt.stack[-1] + s
def startDocument(self):
self.quote_list = []
def startElement(self, name, attrs):
methname = 'start_'+str(name)
if hasattr(self, methname):
method = getattr(self, methname)
method(attrs)
else:
sys.stderr.write('unknown start tag: <' + name + ' ')
for name, value in attrs.items():
sys.stderr.write(name + '=' + '"' + value + '" ')
sys.stderr.write('>\n')
def endElement(self, name):
methname = 'end_'+str(name)
if hasattr(self, methname):
method = getattr(self, methname)
method()
else:
sys.stderr.write('unknown end tag: </' + name + '>\n')
# There's nothing to be done for the <quotations> tag
def start_quotations(self, attrs):
pass
def end_quotations(self):
pass
def start_quotation(self, attrs):
if self.newqt == None: self.newqt = Quotation()
def end_quotation(self):
st = self.newqt.stack
for i in range(len(st)):
if type(st[i]) == types.StringType:
st[i] = Text(st[i])
self.newqt.text=self.newqt.text + st
del self.newqt.stack
if self.process_func: self.process_func(self.newqt)
else:
print "Completed quotation\n ", self.newqt.__dict__
self.newqt=Quotation()
# Attributes of a quotation: <author>...</author> and <source>...</source>
def start_author(self, data):
# Add the current contents of the stack to the text of the quotation
self.newqt.text = self.newqt.text + self.newqt.stack
# Reset the stack
self.newqt.stack = [ Text() ]
def end_author(self):
# Set the author attribute to contents of the stack; you can't
# have more than one <author> tag per quotation.
self.newqt.author = self.newqt.stack
# Reset the stack for more text.
self.newqt.stack = [ Text() ]
# The code for the <source> tag is exactly parallel to that for <author>
def start_source(self, data):
self.newqt.text = self.newqt.text + self.newqt.stack
self.newqt.stack = [ Text() ]
def end_source(self):
self.newqt.source = self.newqt.stack
self.newqt.stack = [ Text() ]
# Text markups: <br/> for breaks, <pre>...</pre> for preformatted
# text, <em>...</em> for emphasis, <cite>...</cite> for citations.
def start_br(self, data):
# Add a Break instance, and a new Text instance.
self.newqt.stack.append(Break())
self.newqt.stack.append( Text() )
def end_br(self): pass
def start_pre(self, data):
self.newqt.stack.append( Text() )
def end_pre(self):
self.newqt.stack[-1] = PreformattedText(self.newqt.stack[-1])
self.newqt.stack.append( Text() )
def start_code(self, data):
self.newqt.stack.append( Text() )
def end_code(self):
self.newqt.stack[-1] = CodeFormattedText(self.newqt.stack[-1])
self.newqt.stack.append( Text() )
def start_em(self, data):
self.newqt.stack.append( Text() )
def end_em(self):
self.newqt.stack[-1] = EmphasizedText(self.newqt.stack[-1])
self.newqt.stack.append( Text() )
def start_cite(self, data):
self.newqt.stack.append( Text() )
def end_cite(self):
self.newqt.stack[-1] = CitedText(self.newqt.stack[-1])
self.newqt.stack.append( Text() )
def start_foreign(self, data):
self.newqt.stack.append( Text() )
def end_foreign(self):
self.newqt.stack[-1] = ForeignText(self.newqt.stack[-1])
self.newqt.stack.append( Text() )
if __name__ == '__main__':
import sys, getopt
# Process the command-line arguments
opts, args = getopt.getopt(sys.argv[1:], 'fthm:r',
['fortune', 'text', 'html', 'max=', 'help',
'randomize'] )
# Set defaults
maxlength = 0 ; method = 'as_fortune'
randomize = 0
# Process arguments
for opt, arg in opts:
if opt in ['-f', '--fortune']:
method='as_fortune'
elif opt in ['-t', '--text']:
method = 'as_text'
elif opt in ['-h', '--html']:
method = 'as_html'
elif opt in ['-m', '--max']:
maxlength = string.atoi(arg)
elif opt in ['-r', '--randomize']:
randomize = 1
elif opt == '--help':
print __doc__ ; sys.exit(0)
# This function will simply output each quotation by calling the
# desired method, as long as it's not suppressed by a setting of
# --max.
qtlist = []
def process_func(qt, qtlist=qtlist, maxlength=maxlength, method=method):
func = getattr(qt, method)
output = func()
length = string.count(output, '\n')
if maxlength!=0 and length > maxlength: return
qtlist.append(output)
# Loop over the input files; use sys.stdin if no files are specified
if len(args) == 0: args = [sys.stdin]
for file in args:
if type(file) == types.StringType: input = open(file, 'r')
else: input = file
# Enforce the use of the Expat parser, because the code needs to be
# sure that the output will be UTF-8 encoded.
p=saxexts.XMLParserFactory.make_parser(["xml.sax.drivers.drv_pyexpat"])
dh = QuotationDocHandler(process_func)
p.setDocumentHandler(dh)
p.setErrorHandler(dh)
p.parseFile(input)
if type(file) == types.StringType: input.close()
p.close()
# Randomize the order of the quotations
if randomize:
import whrandom
q2 = []
for i in range(len(qtlist)):
qt = whrandom.randint(0,len(qtlist)-1 )
q2.append( qtlist[qt] )
qtlist[qt:qt+1] = []
assert len(qtlist) == 0
qtlist = q2
for quote in qtlist:
print quote
# We're done!
```
#### File: demo/sgmlop/benchxml.py
```python
import time, sys, os
from xml.parsers import xmllib, sgmlop
SIZE = 16384
FILE = "hamlet.xml"
try:
FILE = sys.argv[1]
except IndexError:
pass
print "---", FILE, "---"
bytes = os.stat(FILE)[6]
# --------------------------------------------------------------------
# 1) sgmlop with null parser (no registered callbacks)
def test1():
fp = open(FILE)
parser = sgmlop.XMLParser()
while 1:
data = fp.read(SIZE)
if not data:
break
parser.feed(data)
parser.close()
fp.close()
# --------------------------------------------------------------------
# 2) sgmlop with dummy parser
class sgmlopDummy:
def finish_starttag(self, tag, data):
pass
def finish_endtag(self, tag):
pass
def handle_entityref(self, data):
pass
def handle_data(self, data):
pass
def handle_proc(self, name, data):
pass
def handle_cdata(self, data):
pass
def handle_charref(self, data):
pass
def handle_comment(self, data):
pass
def handle_special(self, data):
pass
def test2():
fp = open(FILE)
out = sgmlopDummy()
parser = sgmlop.XMLParser()
parser.register(out)
while 1:
data = fp.read(SIZE)
if not data:
break
parser.feed(data)
parser.close()
fp.close()
# --------------------------------------------------------------------
# 3) accelerated xmllib
class FastXMLParser(xmllib.FastXMLParser):
def unknown_starttag(self, tag, data):
pass
def unknown_endtag(self, tag):
pass
def handle_entityref(self, data):
pass
def handle_data(self, data):
pass
def handle_cdata(self, data):
pass
def test3():
fp = open(FILE)
parser = FastXMLParser()
while 1:
data = fp.read(SIZE)
if not data:
break
parser.feed(data)
parser.close()
fp.close()
# --------------------------------------------------------------------
# 4) old xmllib
class SlowXMLParser(xmllib.SlowXMLParser):
def unknown_starttag(self, tag, data):
pass
def unknown_endtag(self, tag):
pass
def handle_entityref(self, data):
pass
def handle_data(self, data):
pass
def handle_cdata(self, data):
pass
def test4():
fp = open(FILE)
parser = SlowXMLParser()
while 1:
data = fp.read(SIZE)
if not data:
break
parser.feed(data)
parser.close()
fp.close()
# --------------------------------------------------------------------
# 5) xmltok parser
try:
import xmltok
except (ImportError, SystemError):
xmltok = None
class xmltokDummy:
def do_tag(self, tag, data):
pass
def do_endtag(self, tag):
pass
def do_entity(self, tag, data):
pass
def do_data(self, data):
pass
def test5():
fp = open(FILE)
out = xmltokDummy()
parser = xmltok.ParserCreate()
parser.StartElementHandler = out.do_tag
parser.EndElementHandler = out.do_endtag
parser.CharacterDataHandler = out.do_data
parser.ProcessingInstructionHandler = out.do_entity
while 1:
data = fp.read(SIZE)
if not data:
break
parser.Parse(data)
parser.Parse("", 1)
fp.close()
# ====================================================================
# main
test = test1
t = time.clock()
test(); test(); test(); test(); test();
t = (time.clock() - t) / 5
print "sgmlop/null parser:", round(t, 3), "seconds;",
print int(bytes / t), "bytes per second"
time1 = t
test = test2
t = time.clock()
test(); test(); test(); test(); test();
t = (time.clock() - t) / 5
print "sgmlop/dummy parser:", round(t, 3), "seconds;",
print int(bytes / t), "bytes per second"
time2 = t
test = test3
t = time.clock()
test(); test();
t = (time.clock() - t) / 2
print "xmllib/fast parser:", round(t, 3), "seconds;",
print int(bytes / t), "bytes per second"
time3 = t
test = test4
t = time.clock()
test();
t = (time.clock() - t) / 1
print "xmllib/slow parser:", round(t, 3), "seconds;",
print int(bytes / t), "bytes per second"
time4 = t
print
print "normalized timings:"
print "slow xmllib ", 1.0
print "fast xmllib ", round(time3/time4, 3), "(%sx)" % round(time4/time3, 1)
print "sgmlop dummy", round(time2/time4, 3), "(%sx)" % round(time4/time2, 1)
print "sgmlop null ", round(time1/time4, 3), "(%sx)" % round(time4/time1, 1)
print
```
#### File: demo/xbel/lynx_parse.py
```python
import bookmark
import re
def parse_lynx_file(bms, input):
"""Convert a Lynx 2.8 bookmark file to XBEL, reading from the
input file object, and write to the output file object."""
# Read the whole file into memory
data = input.read()
# Get the title
m = re.search("<title>(.*?)</title>", data, re.IGNORECASE)
if m is None: title = "Untitled"
else: title = m.group(1)
bms.add_folder( title )
hrefpat = re.compile( r"""^ \s* <li> \s*
<a \s+ href \s* = \s* "(?P<url> [^"]* )" \s*>
(?P<name> .*? ) </a>""",
re.IGNORECASE| re.DOTALL | re.VERBOSE | re.MULTILINE)
pos = 0
while 1:
m = hrefpat.search(data, pos)
if m is None: break
pos = m.end()
url, name = m.group(1,2)
bms.add_bookmark( name, href = url)
bms.leave_folder()
if __name__ == '__main__':
import sys, glob
if len(sys.argv)<2 or len(sys.argv)>3:
print
print "A simple utility to convert Lynx bookmarks to XBEL."
print
print "Usage: "
print " lynx_parse.py <lynx-directory> [<xbel-file>]"
sys.exit(1)
bms = bookmark.Bookmarks()
# Determine the owner on Unix platforms
import os, pwd
uid = os.getuid()
t = pwd.getpwuid( uid )
bms.owner = t[4]
glob_pattern = os.path.join(sys.argv[1], '*.html')
file_list = glob.glob( glob_pattern )
for file in file_list:
input = open(file)
parse_lynx_file(bms, input)
if len(sys.argv)==3:
out=open(sys.argv[2],"w")
bms.dump_xbel(out)
out.close()
else:
bms.dump_xbel()
# Done
```
#### File: demo/xbel/xbel2html.py
```python
import sys
from xml.sax import make_parser,saxlib,saxutils
# --- HTML templates
top=\
"""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<HTML>
<HEAD>
<TITLE>%s</TITLE>
<META NAME="Generator" CONTENT="xbel2html">
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=%s">
</HEAD>
<BODY>
<H1>%s</H1>
"""
bottom=\
"""
<HR>
<ADDRESS>
Converted from XBEL by xbel2html.
</ADDRESS>
</BODY>
</HTML>
"""
# --- DocumentHandler
class XBELHandler(saxlib.ContentHandler):
def __init__(self,writer=sys.stdout,encoding='utf-8'):
self.stack=[]
self.writer=writer
self.last_url=None
self.inside_ul=0
self.level=0
self.encoding=encoding
def startElement(self,name,attrs):
self.stack.append(name)
self.data = ''
if name=="bookmark":
self.last_url=attrs["href"].encode(self.encoding)
def characters(self,data):
self.data += data.encode(self.encoding)
def endElement(self,name):
data = self.data
if self.stack[-1]=="title" and self.stack[-2]=="xbel":
self.writer.write(top % (data,self.encoding,data))
self.state=None
if self.stack[-1]=="desc" and self.stack[-2]=="xbel":
self.writer.write("<P>%s</P>\n" % data)
if self.stack[-1]=="title" and self.stack[-2]=="bookmark":
if not self.inside_ul:
self.inside_ul=1
self.writer.write("<UL>\n")
self.writer.write('<LI><A HREF="%s">%s</A>. \n' %
(self.last_url,data))
if self.stack[-1]=="desc" and self.stack[-2]=="bookmark":
self.writer.write(data+"\n\n")
if self.stack[-1]=="title" and self.stack[-2]=="folder":
self.writer.write("<LI><B>%s</B>\n" % data)
self.writer.write("<UL>\n")
self.inside_ul=1
del self.stack[-1]
if name=="folder":
self.writer.write("</UL>\n")
def endDocument(self):
self.writer.write("</UL>\n")
self.writer.write(bottom)
# --- Main program
if __name__ == '__main__':
p=make_parser()
from xml.sax.handler import feature_external_ges
p.setFeature(feature_external_ges, 0)
p.setContentHandler(XBELHandler())
p.setErrorHandler(saxutils.ErrorPrinter())
p.parse(sys.argv[1])
```
#### File: jkloth/pyxml/setup.py
```python
import sys, os, string
from distutils.core import setup, Extension
from setupext import Data_Files, install_Data_Files, wininst_request_delete
from distutils.sysconfig import get_config_var, get_config_vars
from distutils.sysconfig import parse_config_h, get_config_h_filename
# I want to override the default build directory so the extension
# modules are compiled and placed in the build/xml directory
# tree. This is a bit clumsy, but I don't see a better way to do
# this at the moment.
ext_modules = []
# Rename xml to _xmlplus for Python 2.x
if sys.hexversion < 0x2000000:
def xml(s):
return "xml"+s
else:
def xml(s):
return "_xmlplus"+s
# special command-line arguments
LIBEXPAT = None
LDFLAGS = []
args = sys.argv[:]
extra_packages = []
with_xpath = 1
with_xslt = 0
for arg in args:
if string.find(arg, '--with-libexpat=') == 0:
LIBEXPAT = string.split(arg, '=')[1]
sys.argv.remove(arg)
elif string.find(arg, '--ldflags=') == 0:
LDFLAGS = string.split(string.split(arg, '=')[1])
sys.argv.remove(arg)
elif arg == '--with-xpath':
with_xpath = 1
sys.argv.remove(arg)
elif arg == '--with-xslt':
with_xslt = 1
sys.argv.remove(arg)
elif arg == '--without-xpath':
with_xpath = 0
sys.argv.remove(arg)
elif arg == '--without-xslt':
with_xslt = 0
sys.argv.remove(arg)
if sys.platform.startswith("darwin"):
# Fixup various Mac OS X issues
if get_config_var("LDSHARED").find("-flat_namespace") == -1:
LDFLAGS.append('-flat_namespace')
if get_config_var("LDFLAGS").find("-arch i386") != -1:
# Remove the erroneous duplicate -arch flag globally
new_flags = get_config_var('LDFLAGS').replace('-arch i386', '')
get_config_vars()['LDFLAGS'] = new_flags
if with_xpath:
extra_packages.append(xml('.xpath'))
if with_xslt:
extra_packages.append(xml('.xslt'))
def get_expat_prefix():
if LIBEXPAT:
return LIBEXPAT
# XXX temporarily disable usage of installed expat
# until we figure out a way to determine its version
return
for p in ("/usr", "/usr/local"):
incs = os.path.join(p, "include")
libs = os.path.join(p, "lib")
if os.path.isfile(os.path.join(incs, "expat.h")) \
and (os.path.isfile(os.path.join(libs, "libexpat.so"))
or os.path.isfile(os.path.join(libs, "libexpat.a"))):
return p
expat_prefix = get_expat_prefix()
sources = ['extensions/pyexpat.c']
if expat_prefix:
define_macros = [('HAVE_EXPAT_H', None)]
include_dirs = [os.path.join(expat_prefix, "include")]
libraries = ['expat']
library_dirs = [os.path.join(expat_prefix, "lib")]
else:
# To build expat 1.95.x, we need to find out the byteorder
if sys.byteorder == "little":
xmlbo = "1234"
else:
xmlbo = "4321"
define_macros = [
('XML_NS', '1'),
('XML_DTD', '1'),
('BYTEORDER', xmlbo),
('XML_CONTEXT_BYTES','1024'),
]
if sys.platform == "win32":
# HAVE_MEMMOVE is not in PC/pyconfig.h
define_macros.extend([
('HAVE_MEMMOVE', '1'),
('XML_STATIC', ''),
])
include_dirs = ['extensions/expat/lib']
sources.extend([
'extensions/expat/lib/xmlparse.c',
'extensions/expat/lib/xmlrole.c',
'extensions/expat/lib/xmltok.c',
])
libraries = []
library_dirs = []
config_h = get_config_h_filename()
config_h_vars = parse_config_h(open(config_h))
for feature_macro in ['HAVE_MEMMOVE', 'HAVE_BCOPY']:
if config_h_vars.has_key(feature_macro):
define_macros.append((feature_macro, '1'))
ext_modules.append(
Extension(xml('.parsers.pyexpat'),
define_macros=define_macros,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries,
extra_link_args=LDFLAGS,
sources=sources
))
# Build sgmlop
ext_modules.append(
Extension(xml('.parsers.sgmlop'),
extra_link_args=LDFLAGS,
sources=['extensions/sgmlop.c'],
))
# Build boolean
ext_modules.append(
Extension(xml('.utils.boolean'),
extra_link_args=LDFLAGS,
sources=['extensions/boolean.c'],
))
# On Windows, install the documentation into a directory xmldoc, along
# with xml/_xmlplus. For RPMs, docs are installed into the RPM doc
# directory via setup.cfg (usuall /usr/doc). On all other systems, the
# documentation is not installed.
doc2xmldoc = 0
if sys.platform == 'win32':
doc2xmldoc = 1
# This is a fragment from MANIFEST.in which should contain all
# files which are considered documentation (doc, demo, test, plus some
# toplevel files)
# distutils 1.0 has a bug where
# recursive-include test/output test_*
# is translated into a pattern ^test\\output\.*test\_[^/]*$
# on windows, which results in files not being included. Work around
# this bug by using graft where possible.
docfiles="""
recursive-include doc *.html *.tex *.txt *.gif *.css *.api *.web
recursive-include demo README *.py *.xml *.dtd *.html *.htm
include demo/genxml/data.txt
include demo/dom/html2html
include demo/xbel/doc/xbel.bib
include demo/xbel/doc/xbel.tex
include demo/xmlproc/catalog.soc
recursive-include test *.py *.xml *.html *.dtd
include test/test.xml.out
graft test/output
include ANNOUNCE CREDITS LICENCE README* TODO
global-exclude */CVS/*
"""
if doc2xmldoc:
xmldocfiles = [
Data_Files(copy_to = 'xmldoc',
template = string.split(docfiles,"\n"),
preserve_path = 1)
]
else:
xmldocfiles = []
setup (name = "PyXML",
version = "0.8.5", # Needs to match xml/__init__.version_info
description = "Python/XML package",
author = "XML-SIG",
author_email = "<EMAIL>",
url = "http://www.python.org/sigs/xml-sig/",
long_description =
"""XML Parsers and API for Python
This version of PyXML was tested with Python 2.x.
""",
# Override certain command classes with our own ones
cmdclass = {'install_data':install_Data_Files,
'bdist_wininst':wininst_request_delete
},
package_dir = {xml(''):'xml'},
data_files = [Data_Files(base_dir='install_lib',
copy_to=xml('/dom/de/LC_MESSAGES'),
files=['xml/dom/de/LC_MESSAGES/4Suite.mo']),
Data_Files(base_dir='install_lib',
copy_to=xml('/dom/en_US/LC_MESSAGES'),
files=['xml/dom/en_US/LC_MESSAGES/4Suite.mo']),
Data_Files(base_dir='install_lib',
copy_to=xml('/dom/fr/LC_MESSAGES'),
files=['xml/dom/fr/LC_MESSAGES/4Suite.mo']),
] + xmldocfiles,
packages = [xml(''),
xml('.dom'), xml('.dom.html'), xml('.dom.ext'),
xml('.dom.ext.reader'),
xml('.marshal'), xml('.unicode'),
xml('.parsers'), xml('.parsers.xmlproc'),
xml('.sax'), xml('.sax.drivers'),
xml('.sax.drivers2'), xml('.utils'), xml('.schema'),
#xml('.xpath'), xml('.xslt')
] + extra_packages,
ext_modules = ext_modules,
scripts = ['scripts/xmlproc_parse', 'scripts/xmlproc_val']
)
```
#### File: dom/ext/test_memory.py
```python
import Cyclops,sys
from xml.dom.ext.reader import Sax2
from xml.dom import ext
def test():
data = sys.stdin.read()
doc = Sax2.FromXml(data)
b1 = doc.createElementNS("http://foo.com","foo:branch")
c1 = doc.createElementNS("http://foo.com","foo:child1")
c2 = doc.createElementNS("http://foo.com","foo:child2")
b1.setAttributeNS("http://foo.com","foo:a1","value-1")
a1 = b1.getAttributeNodeNS("http://foo.com","a1")
a1.value = "This shouldn't leak"
b1.appendChild(c1)
b1.appendChild(c2)
doc.documentElement.appendChild(b1)
r1 = doc.createElementNS("http://foo.com","foo:replace")
doc.documentElement.replaceChild(r1,b1)
b1.removeChild(c2)
import cStringIO
s = cStringIO.StringIO()
import xml.dom.ext
xml.dom.ext.Print(doc, stream = s)
ext.ReleaseNode(doc)
ext.ReleaseNode(b1)
doc = Sax2.FromXml(data)
ext.ReleaseNode(doc)
if __name__ == '__main__':
cy = Cyclops.CycleFinder()
cy.run(test)
cy.find_cycles()
cy.show_cycles()
```
#### File: dom/ext/test_single_elements.py
```python
def test(inFile):
from xml.dom.ext.reader import HtmlLib
from xml.dom import ext
from xml.dom import Node
from xml.dom.html import HTMLDocument
doc = HTMLDocument.HTMLDocument()
HtmlLib.FromHtmlStream(inFile,doc)
print doc
ext.PrettyPrint(doc)
if __name__ == '__main__':
import sys
inFile = sys.stdin
if len(sys.argv) == 2:
inFile = open(sys.argv[1],'r')
test(inFile)
```
#### File: dom/html/test_basefont.py
```python
from util import testAttribute
from util import testIntAttribute
def test():
print 'testing source code syntax'
from xml.dom.html import HTMLBaseFontElement
from xml.dom import implementation
doc = implementation.createHTMLDocument('Title')
b = doc.createElement('BaseFont')
print 'testing get/set'
testAttribute(b,'color');
testAttribute(b,'face');
testAttribute(b,'size');
print 'get/set works'
if __name__ == '__main__':
test();
```
#### File: dom/html/test_col.py
```python
from util import testAttribute
from util import testIntAttribute
def test():
print 'testing source code syntax'
from xml.dom.html import HTMLTableColElement
from xml.dom import implementation
doc = implementation.createHTMLDocument('Title')
c = doc.createElement('COL');
print 'testing get/set'
testAttribute(c,'ch');
testAttribute(c,'chOff');
testIntAttribute(c,'span');
testAttribute(c,'width');
c._set_align('left')
rt = c._get_align()
if rt != 'Left':
error('get/set align failed')
c._set_vAlign('top')
rt = c._get_vAlign()
if rt != 'Top':
error('get/set vAlign failed')
print 'get/set works'
if __name__ == '__main__':
test();
```
#### File: dom/html/test_dl.py
```python
from util import testAttribute
from util import testIntAttribute
def test():
print 'testing source code syntax'
from xml.dom.html import HTMLDListElement
from xml.dom import implementation
doc = implementation.createHTMLDocument('Title')
d = doc.createElement('DL')
print 'testing get and set'
testIntAttribute(d,'compact')
print 'get/set works'
if __name__ == '__main__':
test();
```
#### File: dom/html/test_h.py
```python
from util import testAttribute, error
from util import testIntAttribute
def test():
print 'testing source code syntax'
from xml.dom.html import HTMLHeadingElement
from xml.dom import implementation
doc = implementation.createHTMLDocument('Title')
h = doc.createElement('H1')
print 'testing get/set'
h._set_align('left')
rt = h._get_align()
if rt != 'Left':
error('get/set align failed')
print 'get/set works'
if __name__ == '__main__':
test()
```
#### File: dom/html/test_isindex.py
```python
from util import error
from util import testAttribute
from util import testIntAttribute
def test():
print 'testing source code syntax'
from xml.dom.html import HTMLIsIndexElement
from xml.dom import implementation
doc = implementation.createHTMLDocument('Title')
i = doc.createElement('IsIndex')
f = doc.createElement('Form')
print 'testing get/set of Prompt'
testAttribute(i,'prompt');
print 'get/set Prompt works'
print 'testing getForm'
f.appendChild(i)
if i._get_form().nodeName != f.nodeName:
error('getForm failed')
print 'getForm works'
if __name__ == '__main__':
test()
```
#### File: dom/html/test_label.py
```python
from util import testAttribute
from util import testIntAttribute
def test():
print 'testing source code syntax'
from xml.dom.html import HTMLLabelElement
from xml.dom import implementation
doc = implementation.createHTMLDocument('Title')
l = doc.createElement('LABEL')
print 'testing get/set attributes'
testAttribute(l,'accessKey')
testAttribute(l,'htmlFor')
print 'get/set works'
if __name__ == '__main__':
test()
```
#### File: dom/html/test.py
```python
fileList = ['Collection',
'Element',
'HTML',
'HEAD',
'LINK',
'TITLE',
'META',
'BASE',
'ISINDEX',
'STYLE',
'BODY',
'FORM',
'SELECT',
'OPTGROUP',
'OPTION',
'INPUT',
'TEXTAREA',
'BUTTON',
'LABEL',
'FIELDSET',
'LEGEND',
'UL',
'OL',
'DL',
'DIR',
'MENU',
'LI',
'BLOCKQUOTE',
'DIV',
'P',
'H',
'Q',
'PRE',
'BR',
'BASEFONT',
'FONT',
'HR',
'MOD',
'A',
'IMG',
'OBJECT',
'PARAM',
'APPLET',
'MAP',
'AREA',
'SCRIPT',
'CAPTION',
'COL',
'TD',
'TR',
'SECTION',
'TABLE',
'FRAMESET',
'FRAME',
'IFRAME',
'DOCUMENT',
'HTML_DOM_IMPLEMENTATION',
]
import string
def test(files):
print 'Testing HTML Level 1'
for file in files:
print '**********Testing HTML %s**********' % file
exec 'import test_%s;_mod = test_%s' % (string.lower(file),string.lower(file));
_mod.test();
if __name__ == '__main__':
import sys
if len(sys.argv) <2:
test(fileList)
else:
test(sys.argv[1:]);
```
#### File: dom/html/test_section.py
```python
from util import testAttribute
from util import error
def test():
print 'testing source code syntax'
from xml.dom.html import HTMLTableSectionElement
from xml.dom import implementation
doc = implementation.createHTMLDocument('Title')
s = doc.createElement('TFOOT')
#Row index and section row index tested in section
print 'testing get/set'
testAttribute(s,'ch')
testAttribute(s,'chOff')
s._set_align('left')
rt = s._get_align()
if rt != 'Left':
error('get/set align failed')
s._set_vAlign('Top')
rt = s._get_vAlign()
if rt != 'Top':
error('get/set align failed')
print 'get/set works'
print 'testing insertRow,deleteRow, getRows, and TR.getRowSelectionIndex'
try:
r1 = s.insertRow(-1)
error('insertRow(-1) does not raise exception');
except:
pass
r1 = s.insertRow(0)
if r1 == None:
error('insertRow(0) failed');
r2 = s.insertRow(1)
if r2 == None:
error('insertRow(1) failed');
if r2._get_sectionRowIndex() != 1:
error('getSectionRowIndex Failed');
rows = s._get_rows()
if rows._get_length() != 2:
error('getRows failed')
if rows.item(0).nodeName != r1.nodeName:
error('getRows failed')
if rows.item(1).nodeName != r2.nodeName:
error('getRows failed')
try:
s.deleteRow(-1)
error('deleteRow(-1) does not raise exception')
except:
pass
s.deleteRow(1)
if r2._get_rowIndex() != -1:
error('deleted row still in tree')
if s._get_rows()._get_length() != 1:
error('deleteRow failed');
s.deleteRow(0)
if s._get_rows()._get_length() != 0:
error('deleteRow(0) failed')
print 'insertRow, deleteRow, getRows, and TR.getSelectionRowIndex works'
if __name__ == '__main__':
test()
```
#### File: dom/html/test_textarea.py
```python
from util import error
from util import testAttribute
from util import testIntAttribute
def test():
print 'testing source code syntax'
from xml.dom.html import HTMLTextAreaElement
from xml.dom import implementation
doc = implementation.createHTMLDocument('Title')
t = doc.createElement('TEXTAREA')
print 'testing get/set of attributes'
testAttribute(t,'defaultValue');
testAttribute(t,'accessKey');
testIntAttribute(t,'cols');
testIntAttribute(t,'disabled');
testAttribute(t,'name');
testIntAttribute(t,'readonly');
testIntAttribute(t,'rows');
testIntAttribute(t,'tabIndex');
print 'get/set work'
print 'testing clone node'
t2 = t.cloneNode(1)
if t2._get_defaultValue() != t._get_defaultValue():
error('cloneNode did not set the default value');
print 'cloneNode works'
if __name__ == '__main__':
test()
```
#### File: test/dom/test_nodeiterator.py
```python
from TestSuite import EMPTY_NAMESPACE
def test(tester):
tester.startGroup('NodeIterator')
tester.startTest('Checking syntax')
try:
from xml.dom import NodeIterator
from xml.dom.NodeIterator import NodeIterator
except:
tester.error('Error in syntax', 1)
tester.testDone()
tester.startTest('Creating test environment')
from xml.dom import implementation
doc = implementation.createDocument(EMPTY_NAMESPACE,None,None);
#xml_string = '<a><b><c/><d/></b><e><f/><g/></e></a>'
try:
a = doc.createElement('a')
b = doc.createElement('b')
c = doc.createElement('c')
d = doc.createElement('d')
e = doc.createElement('e')
f = doc.createElement('f')
g = doc.createElement('g')
except:
tester.error('Couldn\'t create elements')
try:
b.appendChild(c)
b.appendChild(d)
a.appendChild(b)
e.appendChild(f)
e.appendChild(g)
a.appendChild(e)
doc.appendChild(a)
except:
tester.error('Counl\'t append to DOM tree')
from xml.dom.NodeFilter import NodeFilter
nit = doc.createNodeIterator(doc, NodeFilter.SHOW_ELEMENT, None,1)
tester.testDone()
tester.startTest('Iterating forward')
curr_node = nit.nextNode()
while curr_node:
curr_node = nit.nextNode()
tester.testDone()
tester.startTest('Iterating in reverse')
curr_node = nit.previousNode()
while curr_node:
curr_node = nit.previousNode()
tester.testDone()
tester.startTest('Iterating forward again')
curr_node = nit.nextNode()
while curr_node:
curr_node = nit.nextNode()
tester.testDone()
return tester.groupDone()
if __name__ == '__main__':
import sys
import TestSuite
tester = TestSuite.TestSuite()
retVal = test(tester)
sys.exit(retVal)
```
#### File: test/dom/test_nodelist.py
```python
from TestSuite import EMPTY_NAMESPACE
from xml.dom import DOMException
from xml.dom import NO_MODIFICATION_ALLOWED_ERR
def get_exception_name(code):
import types
from xml import dom
for (name,value) in vars(dom).items():
if (type(value) == types.IntType
and value == code):
return name
def test(tester):
tester.startGroup('NodeList')
tester.startTest('Checking syntax')
try:
from xml.dom import NodeList
from xml.dom.NodeList import NodeList
except:
tester.error('Error in syntax',1)
tester.testDone()
tester.startTest('Creating the test environment')
try:
from xml.dom import implementation
dt = implementation.createDocumentType('','','')
doc = implementation.createDocument(EMPTY_NAMESPACE,'ROOT',dt)
except:
tester.error('Error creating document')
nodes = []
try:
for ctr in range(3):
nodes.append(doc.createElement('Node%d' %ctr))
except:
tester.error("Error creating nodes")
e = doc.createElement('PARENT')
try:
for n in nodes:
e.appendChild(n)
except:
tester.error('Error appending nodes')
nl = e.childNodes
tester.testDone()
tester.startTest("Testing attributes")
if nl.length != 3:
tester.error('length reports wrong amount')
try:
nl.length = 5
except DOMException, x:
if x.code != NO_MODIFICATION_ALLOWED_ERR:
name = get_exception_name(x.code)
tester.error("Wrong exception '%s', expected NOMODIFICATION_ALLOWED_ERR" % name)
else:
tester.error('length not read-only')
tester.testDone()
tester.startTest("Testing item()")
if nl.item(0).nodeName != nodes[0].nodeName:
tester.error("Item returns wrong item")
if nl.item(3) != None:
tester.error("Item returns something on invalid index")
tester.testDone()
return tester.groupDone()
if __name__ == '__main__':
import sys
import TestSuite
tester = TestSuite.TestSuite()
retVal = test(tester)
sys.exit(retVal)
```
#### File: test/dom/test.py
```python
import string, time
import TestSuite
### Methods ###
def runTests(tests, testSuite):
banner = 'Performing a test of DOM Core/Traversal/HTML'
markers = '#'*((testSuite.columns - len(banner)) / 2 - 1)
print markers, banner, markers
total = 0.0
for test in tests:
module = __import__('test_' + string.lower(test))
start = time.time()
module.test(testSuite)
total = total + time.time() - start
return total
### Application ###
if __name__ == '__main__':
logLevel = 1
logFile = None
haltOnError = 1
test_list = ['Node',
'NodeList',
'NamedNodeMap',
'NodeIterator',
'TreeWalker',
'Attr',
'Element',
'DocumentFragment',
'Document',
'DOMImplementation',
'CharacterData',
'Comment',
'Text',
'CDATASection',
'DocumentType',
'Entity',
'EntityReference',
'Notation',
'ProcessingInstruction',
'Range',
'Struct',
'HTML',
#'Demo',
#'Pythonic'
]
import sys, os, getopt
prog_name = os.path.split(sys.argv[0])[1]
short_opts = 'hl:nqtv:'
long_opts = ['help',
'log=',
'no-error'
'tests'
'quiet',
'verbose='
]
usage = '''Usage: %s [options] [[all] [test]...]
Options:
-h, --help Print this message and exit
-l, --log <file> Write output to a log file (default=%s)
-n, --no-error Continue testing if error condition
-q, --quiet Display as little as possible
-t, --tests Show a list of tests that can be run
-v, --verbose <level> Set the output level (default=%s)
0 - display nothing
1 - errors only (same as --quiet)
2 - warnings and errors
3 - information, warnings and errors
4 - display everything
''' %(prog_name, logFile, logLevel)
command_line_error = 0
bad_options = []
finished = 0
args = sys.argv[1:]
while not finished:
try:
optlist, args = getopt.getopt(args, short_opts, long_opts)
except getopt.error, data:
bad_options.append(string.split(data)[1])
args.remove(bad_options[-1])
command_line_error = 1
else:
finished = 1
display_usage = 0
display_tests = 0
for op in optlist:
if op[0] == '-h' or op[0] == '--help':
display_usage = 1
elif op[0] == "-l" or op[0] == '--log':
logFile = op[1]
elif op[0] == '-n' or op[0] == '--no-error':
haltOnError = 0
elif op[0] == '-t' or op[0] == '--tests':
display_tests = 1
elif op[0] == '-q' or op[0] == '--quiet':
logLevel = 1
elif op[0] == '-v' or op[0] == '--verbose':
logLevel = int(op[1])
all_tests = 0
if args:
lower_test = []
for test in test_list:
lower_test.append(string.lower(test))
for test in args:
if string.lower(test) == 'all':
all_tests = 1
break
if string.lower(test) not in lower_test:
print "%s: Test not found '%s'" %(prog_name, test)
args.remove(test)
display_tests = 1
if len(args) and not all_tests:
tests = args
elif not display_tests:
tests = test_list
if command_line_error or display_usage or display_tests:
for op in bad_options:
print "%s: Unrecognized option '%s'" %(prog_name,op)
if display_usage:
print usage
if display_tests:
print 'Available tests are:'
for t in test_list:
print ' %s' % t
sys.exit(command_line_error)
testSuite = TestSuite.TestSuite(haltOnError)
total = runTests(tests, testSuite)
print "Test Time - %.3f secs" % total
```
#### File: pyxml/test/test_dom.py
```python
import StringIO, sys
from xml.dom import Node # MUST be first
from xml.dom import implementation, DOMException
from xml.dom import HIERARCHY_REQUEST_ERR, NOT_FOUND_ERR
from xml.dom import INDEX_SIZE_ERR, INVALID_CHARACTER_ERR, SYNTAX_ERR
from xml.dom.ext.reader.Sax2 import FromXml
from xml.dom.ext import PrettyPrint
# Internal test function: traverse a DOM tree, then verify that all
# the parent pointers are correct. Do NOT take this function as an
# example of using the Python DOM interface; it knows about the hidden
# details of the DOM implementation in order to check them.
def _check_dom_tree(t):
"Verify that all the parent pointers in a DOM tree are correct"
parent = {} # Dict mapping _nodeData instances to their parent
nodes = [] # Cumulative list of all the _nodeDatas encountered
Queue = [t] # Queue for breadth-first traversal of tree
# Do a breadth-first traversal of the DOM tree t
while Queue:
node = Queue[0]
children = node.childNodes
for c in children:
# Store this node as the parent of each child
parent[c] = node
# Add each child to the cumulative list
nodes.append(c)
# Append each child to the queue
Queue.append(c)
# Remove the node we've just processed
Queue = Queue[1:]
# OK, now walk over all the children, checking that .parentNode
# is correct.
count = 0
for n in nodes:
p = n.parentNode
if p is None:
assert not parent.has_key(n)
else:
assert p == parent[n]
count = count + 1
test_text = """<?xml version="1.0"?>
<doc>
<title>This is a test</title>
<h1>Don't panic</h1>
<p>Maybe it will work.</p>
<h2>We can handle it</h2>
<h3>Yes we can</h3>
<h3>Or maybe not</h3>
End of test.
</doc>
"""
doc = FromXml(test_text)
_check_dom_tree(doc)
print 'Simple document'
PrettyPrint(doc, sys.stdout)
print
# Example from the docstring at the top of xml.dom.core.py
doc = implementation.createDocument(None,None,None)
html = doc.createElement('html')
html.setAttribute('attr', 'value')
head = doc.createElement('head')
title = doc.createElement('title')
text = doc.createTextNode("Title goes here")
title.appendChild(text)
head.appendChild(title)
html.appendChild(head)
doc.appendChild (html)
_check_dom_tree(doc)
print '\nOutput of docstring example'
PrettyPrint(doc, sys.stdout)
print
# Detailed test suite for the DOM
from xml.dom import Document
print '\nRunning detailed test suite'
def check(cond, explanation, expected=0):
truth = eval(cond)
if not truth:
if expected:
print "XFAIL:",
else:
print ' *** Failed:',
print explanation, '\n\t', cond
doc = implementation.createDocument(None,None,None)
check('isinstance(doc, Document.Document)', 'createDocument returns a Document')
check('doc.parentNode == None', 'Documents have no parent')
# Check that documents can only have one child
n1 = doc.createElement('n1') ; n2 = doc.createElement('n2')
pi = doc.createProcessingInstruction("Processing", "Instruction")
doc.appendChild(pi)
doc.appendChild(n1)
try:
doc.appendChild(n1) # n1 should be removed, and then added again
except DOMException:
print "XFAIL: 4DOM does not support multiple insertion of same node"
try:
doc.appendChild(n2)
except DOMException,e:
assert e.code==HIERARCHY_REQUEST_ERR
else:
print " *** Failed: Document.insertBefore didn't raise HierarchyRequestException"
doc.replaceChild(n2, n1) # Should work
try: doc.replaceChild(n1, pi)
except DOMException,e:
assert e.code==HIERARCHY_REQUEST_ERR
else:
print " *** Failed: Document.replaceChild didn't raise HierarchyRequestException"
doc.replaceChild(n2, pi) # Should also work
check('pi.parentNode == None',
'Document.replaceChild: PI should have no parent')
try:
doc.removeChild(n2)
except DOMException:
print "XFAIL"
check('n2.parentNode == None',
'Document.removeChild: n2 should have no parent')
# Check adding and deletion with DocumentFragments
fragment = doc.createDocumentFragment() ; fragment.appendChild( n1 )
doc.appendChild( fragment )
check('fragment.parentNode == None',
'Doc.appendChild: fragment has no parent')
check('n1.parentNode.nodeType == Node.DOCUMENT_NODE',
'Doc.appendChild: n1 now has document as parent')
fragment = doc.createDocumentFragment() ; fragment.appendChild( n1 )
n2 = doc.createElement('n2') ; fragment.appendChild( n2 )
try: doc.appendChild( fragment )
except DOMException,e:
assert e.code == HIERARCHY_REQUEST_ERR
else:
print " *** Failed: Document.fragment.appendChild didn't raise HierarchyRequestException"
fragment = doc.createDocumentFragment() ; fragment.appendChild( n1 )
n2 = doc.createElement('n2') ; fragment.appendChild( n2 )
doc.appendChild( pi )
try: doc.replaceChild(fragment, pi)
except DOMException:
assert e.code==HIERARCHY_REQUEST_ERR
else:
print " *** Failed: Document.fragment.replaceChild didn't raise HierarchyRequestException"
#FIXME - fragment.removeChild(n2)
fragment.appendChild(pi)
doc.appendChild( fragment)
check('n1.parentNode == doc',
"Document.fragment.replaceChild parent node is correct")
_check_dom_tree(doc)
# Check adding and deleting children for ordinary nodes
n1 = doc.createElement('n1') ; n2 = doc.createElement('n2')
check( 'n1.parentNode == None', 'newly created Element has no parent')
e1 = doc.createTextNode('e1') ; e2 = doc.createTextNode('e2')
e3 = doc.createTextNode('e3')
n1.appendChild( e1 ) ; n1.appendChild( e2 ) ; n2.appendChild(e3)
# Test .insertBefore with refChild set to a node
n2.insertBefore(e1, e3)
check('len(n1.childNodes) == 1', "insertBefore: node1 has 1 child")
check('len(n2.childNodes) == 2', "insertBefore: node2 has 2 children")
check('n1.firstChild.data=="e2"', "insertBefore: node1's child is e2")
check('n2.firstChild.data=="e1"', "insertBefore: node2's first child is e1")
check('n2.lastChild.data=="e3"', "insertBefore: node2's last child is e3")
check('e1.parentNode.tagName == "n2"', "insertBefore: e1's parent is n2")
check('e2.parentNode.tagName == "n1"', "insertBefore: e2's parent is n1")
check('e3.parentNode.tagName == "n2"', "insertBefore: e3's parent is n3")
try: n2.insertBefore(e1, e2)
except DOMException,e:
assert e.code==NOT_FOUND_ERR
else:
print " *** Failed: insertBefore didn't raise NotFoundException"
# Test .insertBefore with refChild==None
n2.insertBefore(e1, None)
check('len(n2.childNodes) == 2', "None insertBefore: node1 has 2 children")
check('n2.firstChild.data=="e3"', "None insertBefore: node2's first child is e3")
check('n2.lastChild.data=="e1"', "None insertBefore: node2's last child is e1")
# Test replaceChild
ret = n1.replaceChild(e1, e2)
check('e2.parentNode == None', "replaceChild: e2 has no parent")
check('len(n1.childNodes) == 1', "replaceChild: node1 has 1 child")
check('n1.firstChild.data=="e1"', "replaceChild: node1's only child is e1")
check('ret.data == "e2"', "replaceChild: returned value node1's only child is e1")
try: n1.replaceChild(e2, e2)
except DOMException,e:
assert e.code==NOT_FOUND_ERR
else:
print " *** Failed: insertBefore didn't raise NotFoundException"
# Test removeChild
ret = n1.removeChild( e1 )
check('e1.parentNode == None', "removeChild: e1 has no parent")
check('ret.data == "e1"', "removeChild: e1 is the returned value")
try: n1.removeChild(e2)
except DOMException,e:
assert e.code==NOT_FOUND_ERR
else:
print " *** Failed: removeChild didn't raise NotFoundException"
# XXX two more cases for adding stuff: normal, Document, DocumentFragment
# Test the functions in the CharacterData interface
text = doc.createTextNode('Hello world')
#FIXME - check('text[0:5].value == "Hello"', 'text: slicing a node')
try: text.substringData(-5, 5)
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: substringData didn't raise IndexSizeException (negative)"
try: text.substringData(200, 5)
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: substringData didn't raise IndexSizeException (larger)"
try: text.substringData(5, -5)
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: substringData didn't raise IndexSizeException (negcount)"
text.appendData('!')
check('text.data == "Hello world!"', 'text: appendData')
try: text.insertData(-5, 'string')
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: insertData didn't raise IndexSizeException (negative)"
try: text.insertData(200, 'string')
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: insertData didn't raise IndexSizeException (larger)"
text.insertData(5, ',')
check('text.data == "Hello, world!"', 'text: insertData of ","')
try: text.deleteData(-5, 5)
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: deleteData didn't raise IndexSizeException (negative)"
try: text.deleteData(200, 5)
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: deleteData didn't raise IndexSizeException (larger)"
text.deleteData(0, 5)
check('text.data == ", world!"', 'text: deleteData of first 5 chars')
try: text.replaceData(-5, 5, 'Top of the')
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: replaceData didn't raise IndexSizeException (negative)"
try: text.replaceData(200, 5, 'Top of the')
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: replaceData didn't raise IndexSizeException (larger)"
text.replaceData(0, 1, 'Top of the')
check('text.data == "Top of the world!"', 'text: deleteData of first 5 chars')
# Test the Element class
e = doc.createElement('elem')
attr = doc.createAttribute('attr2')
attr.value = "v2"
#check('e.toxml() == "<elem />"', 'Element: empty element')
check('e.tagName == "elem"', 'Element: tag name')
check('len(e.attributes) == 0', 'Element: empty get_attributes')
check('e.getAttribute("dummy") == ""', 'Element: empty getAttribute')
check('e.getAttributeNode("dummy") == None', 'Element: empty getAttributeNode')
try: e.setAttribute('dummy', attr)
except DOMException,x:
assert x.code == SYNTAX_ERR
# Spec says invalid character for name not value
# assert x.code==INVALID_CHARACTER_ERR
else:
print " *** Failed: setAttribute didn't raise InvalidCharacterException"
e.setAttribute('dummy', 'value')
#check('e.toxml() == "<elem dummy=\'value\' />"', 'Element with 1 attribute')
check('e.getAttribute("dummy") == "value"', 'Element: getAttribute w/ value')
check('e.getAttributeNode("dummy").value == "value"', 'Element: getAttributeNode w/ value')
a2 = e.getAttributeNode( 'dummy' )
check('a2.parentNode == None', 'Attribute: should have no parent')
check('a2.value == "value"', 'Attribute: value is correct')
e.removeAttribute('dummy')
check('len(e.attributes) == 0', 'Element: attribute removed')
e.setAttributeNode(attr)
check('e.attributes[0].value == "v2"', 'Element: attribute node added')
a2 = doc.createAttribute('attr2')
a2.value = 'v3'
ret = e.setAttributeNode(a2)
check('e.attributes[0].value == "v3"', 'Element: attribute node replaced')
check('ret.value == "v2"', 'Element: deleted attribute node returned')
e.removeAttributeNode(a2)
check('len(e.attributes) == 0', 'Element: attribute node removed')
# Check handling of namespace prefixes
#FIXME (start)
#e.setAttribute('xmlns', 'http://defaulturi')
#e.setAttribute('xmlns:html', 'http://htmluri')
#check('e.ns_prefix[""] == "http://defaulturi"',
# 'Default namespace with setAttribute')
#check('e.ns_prefix["html"] == "http://htmluri"',
# 'Prefixed namespace with setAttribute')
#e.removeAttribute('xmlns:html')
#check('not e.ns_prefix.has_key("html")',
# 'Prefixed namespace with removeAttribute')
#e.removeAttribute('xmlns')
#check('len(e.ns_prefix) == 0', 'Default namespace with removeAttribute')
#default = doc.createAttribute('xmlns') ; default.value = "http://defaulturi"
#html = doc.createAttribute('xmlns:html') ; html.value = "http://htmluri"
#e.setAttributeNode(default) ; e.setAttributeNode(html)
#check('e.ns_prefix[""] == "http://defaulturi"',
# 'Default namespace with setAttributeNode')
#check('e.ns_prefix["html"] == "http://htmluri"',
# 'Prefixed namespace with setAttributeNode')
#e.removeAttributeNode(html)
#check('not e.ns_prefix.has_key("html")',
# 'Prefixed namespace with removeAttribute')
#e.removeAttributeNode(default)
#FIXME (end)
#
# Check getElementsByTagName
#
check('len(e.getElementsByTagName("elem")) == 0',
"getElementsByTagName doesn't return element")
check('len(e.getElementsByTagName("*")) == 0',
"getElementsByTagName doesn't return element")
# Check CharacterData interfaces using Text nodes
t1 = doc.createTextNode('first') ; e.appendChild( t1 )
t2 = doc.createTextNode('second') ; e.appendChild( t2 )
t3 = doc.createTextNode('third') ; e.appendChild( t3 )
#check('e.toxml() == "<elem>firstsecondthird</elem>"',
# "Element: content of three Text nodes as children")
check('len(e.childNodes) == 3', 'Element: three Text nodes as children')
e.normalize()
check('e.firstChild.data == "firstsecondthird"',
"Element: normalized Text nodes")
check('len(e.childNodes) == 1', 'Element: should be one normalized Text node')
check('t2.parentNode == None', 'Element: normalized t2 should have no parent')
check('t3.parentNode == None', 'Element: normalized t3 should have no parent')
# Text node
t1.splitText(5)
check('e.firstChild.data == "first"',
"Element: newly split Text nodes")
check('len(e.childNodes) == 2', 'Text: should be two split Text nodes')
check('e.lastChild.data == "secondthird"',
"Element: newly split Text nodes")
# Check comparisons; e1 and e2 are different proxies for the same underlying
# node
n1 = doc.createElement('n1') ; n2 = doc.createElement('n2')
n1.appendChild(n2)
e1 = n1 ; e2 = n2.parentNode
check('e1 is e2', 'Two proxies are different according to "is" operator')
check('e1 == e2', 'Two proxies are different according to "==" operator')
# Done at last!
print 'Test suite completed'
```
#### File: pyxml/test/test_domreg.py
```python
import unittest
import test_support
from xml.dom import domreg
def parse_feature_string(s):
# helper to make sure the results are always plain lists
return list(domreg._parse_feature_string(s))
class DomregTestCase(unittest.TestCase):
def setUp(self):
domreg.registerDOMImplementation("its-a-fake",
self.getDOMImplementation)
def getDOMImplementation(self):
self.fake = FakeDOM(self.my_features)
return self.fake
def test_simple(self):
self.assertEqual(parse_feature_string("simple"),
[("simple", None)])
self.assertEqual(parse_feature_string("simple 1.0"),
[("simple", "1.0")])
self.assertEqual(parse_feature_string("simple complex"),
[("simple", None), ("complex", None)])
self.assertEqual(parse_feature_string("simple 2 complex 3.1.4.2"),
[("simple", "2"), ("complex", "3.1.4.2")])
def test_extra_version(self):
self.assertRaises(ValueError,
domreg._parse_feature_string, "1.0")
self.assertRaises(ValueError,
domreg._parse_feature_string, "1 simple")
self.assertRaises(ValueError,
domreg._parse_feature_string, "simple 1 2")
def test_find_myself(self):
self.my_features = [("splat", "1"), ("splat", "2"), ("splat", None)]
self.failUnless(domreg.getDOMImplementation(features="splat")
is self.fake)
self.failUnless(domreg.getDOMImplementation(features="splat 1")
is self.fake)
self.failUnless(domreg.getDOMImplementation(features="splat 2")
is self.fake)
self.failUnless(domreg.getDOMImplementation(features="splat 1 splat 2")
is self.fake)
self.failUnless(domreg.getDOMImplementation(features="splat 2 splat 1")
is self.fake)
def _test_cant_find(self):
# This test is disabled since we need to determine what the
# right thing to do is. ;-( The DOM Level 3 draft says
# getDOMImplementation() should return null when there isn't a
# match, but the existing Python API raises ImportError.
self.my_features = []
self.failUnless(domreg.getDOMImplementation(features="splat")
is None)
self.failUnless(domreg.getDOMImplementation(features="splat 1")
is None)
class FakeDOM:
def __init__(self, features):
self.__features = features
def hasFeature(self, feature, version):
return (feature, version) in self.__features
def test_suite():
return unittest.makeSuite(DomregTestCase)
def test_main():
test_support.run_suite(test_suite())
if __name__ == "__main__":
test_support.verbose = 1
test_main()
```
#### File: pyxml/test/test_filter.py
```python
import pprint
import sys
from xml.dom import xmlbuilder, expatbuilder, Node
from xml.dom.NodeFilter import NodeFilter
class Filter(xmlbuilder.DOMBuilderFilter):
whatToShow = NodeFilter.SHOW_ELEMENT
def startContainer(self, node):
assert node.nodeType == Node.ELEMENT_NODE
if node.tagName == "skipthis":
return self.FILTER_SKIP
elif node.tagName == "rejectbefore":
return self.FILTER_REJECT
elif node.tagName == "stopbefore":
return self.FILTER_INTERRUPT
else:
return self.FILTER_ACCEPT
def acceptNode(self, node):
assert node.nodeType == Node.ELEMENT_NODE
if node.tagName == "skipafter":
return self.FILTER_SKIP
elif node.tagName == "rejectafter":
return self.FILTER_REJECT
elif node.tagName == "stopafter":
return self.FILTER_INTERRUPT
else:
return self.FILTER_ACCEPT
class RecordingFilter:
# Inheriting from xml.dom.xmlbuilder.DOMBuilderFilter is not
# required, so we won't inherit from it this time to make sure it
# isn't a problem. We have to implement the entire interface
# directly.
whatToShow = NodeFilter.SHOW_ALL
def __init__(self):
self.events = []
def startContainer(self, node):
self.events.append(("start", node.nodeType, str(node.nodeName)))
return xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT
def acceptNode(self, node):
self.events.append(("accept", node.nodeType, str(node.nodeName)))
return xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT
simple_options = xmlbuilder.Options()
simple_options.filter = Filter()
simple_options.namespaces = 0
record_options = xmlbuilder.Options()
record_options.namespaces = 0
def checkResult(src):
print
dom = expatbuilder.makeBuilder(simple_options).parseString(src)
print dom.toxml()
dom.unlink()
def checkFilterEvents(src, record, what=NodeFilter.SHOW_ALL):
record_options.filter = RecordingFilter()
record_options.filter.whatToShow = what
dom = expatbuilder.makeBuilder(record_options).parseString(src)
if record != record_options.filter.events:
print
print "Received filter events:"
pprint.pprint(record_options.filter.events)
print
print "Expected filter events:"
pprint.pprint(record)
dom.unlink()
# a simple case of skipping an element
checkResult("<doc><e><skipthis>text<e/>more</skipthis>abc</e>xyz</doc>")
# skip an element nested indirectly within another skipped element
checkResult('''\
<doc>Text.
<skipthis>Nested text.
<skipthis>Nested text in skipthis element.</skipthis>
More nested text.
</skipthis>Outer text.</doc>
''')
# skip an element nested indirectly within another skipped element
checkResult('''\
<doc>Text.
<skipthis>Nested text.
<nested-element>
<skipthis>Nested text in skipthis element.</skipthis>
More nested text.
</nested-element>
More text.
</skipthis>Outer text.</doc>
''')
checkResult("<doc><rejectbefore/></doc>")
checkResult("<doc><rejectafter/></doc>")
checkResult('''\
<doc><rejectbefore>
Text.
<?my processing instruction?>
<more stuff="foo"/>
<!-- a comment -->
</rejectbefore></doc>
''')
checkResult('''\
<doc><rejectafter>
Text.
<?my processing instruction?>
<more stuff="foo"/>
<!-- a comment -->
</rejectafter></doc>
''')
# Make sure the document element is not passed to the filter:
checkResult("<rejectbefore/>")
checkResult("<rejectafter/>")
checkResult("<stopbefore/>")
checkResult("<doc>text<stopbefore> and </stopbefore>more</doc>")
checkResult("<doc>text<stopafter> and </stopafter>more</doc>")
checkResult("<doc><a/><skipafter>text</skipafter><a/></doc>")
checkFilterEvents("<doc/>", [])
checkFilterEvents("<doc attr='value'/>", [])
checkFilterEvents("<doc><e/></doc>", [
("start", Node.ELEMENT_NODE, "e"),
("accept", Node.ELEMENT_NODE, "e"),
])
src = """\
<!DOCTYPE doc [
<!ENTITY e 'foo'>
<!NOTATION n SYSTEM 'http://xml.python.org/notation/n'>
]>
<!-- comment -->
<?sample pi?>
<doc><e attr='value'><?pi data?><!--comment--></e></doc>
"""
checkFilterEvents(src, [
("accept", Node.DOCUMENT_TYPE_NODE, "doc"),
("accept", Node.ENTITY_NODE, "e"),
("accept", Node.NOTATION_NODE, "n"),
("accept", Node.COMMENT_NODE, "#comment"),
("accept", Node.PROCESSING_INSTRUCTION_NODE, "sample"),
("start", Node.ELEMENT_NODE, "e"),
("accept", Node.PROCESSING_INSTRUCTION_NODE, "pi"),
("accept", Node.COMMENT_NODE, "#comment"),
("accept", Node.ELEMENT_NODE, "e"),
])
# Show everything except a couple of things to the filter, to check
# that whatToShow is implemented. This isn't sufficient to be a
# black-box test, but will get us started.
checkFilterEvents(src, [
("accept", Node.DOCUMENT_TYPE_NODE, "doc"),
("accept", Node.ENTITY_NODE, "e"),
("accept", Node.NOTATION_NODE, "n"),
("accept", Node.PROCESSING_INSTRUCTION_NODE, "sample"),
("start", Node.ELEMENT_NODE, "e"),
("accept", Node.PROCESSING_INSTRUCTION_NODE, "pi"),
("accept", Node.ELEMENT_NODE, "e"),
], what=NodeFilter.SHOW_ALL & ~NodeFilter.SHOW_COMMENT)
checkFilterEvents(src, [
("accept", Node.DOCUMENT_TYPE_NODE, "doc"),
("accept", Node.ENTITY_NODE, "e"),
("accept", Node.NOTATION_NODE, "n"),
("accept", Node.COMMENT_NODE, "#comment"),
("start", Node.ELEMENT_NODE, "e"),
("accept", Node.COMMENT_NODE, "#comment"),
("accept", Node.ELEMENT_NODE, "e"),
], what=NodeFilter.SHOW_ALL & ~NodeFilter.SHOW_PROCESSING_INSTRUCTION)
```
#### File: pyxml/test/test_pyexpat.py
```python
try:
import xml.parsers.expat
except ImportError:
import pyexpat
from xml.parsers import expat
class Outputter:
def StartElementHandler(self, name, attrs):
print 'Start element:\n\t', repr(name), "{",
# attrs may contain characters >127, which are printed hex in Python
# 2.1, but octal in earlier versions
keys = attrs.keys()
keys.sort()
for k in keys:
v = attrs[k]
value = ""
for c in v:
if ord(c)>=256:
value = "%s\\u%.4x" % (value, ord(c))
elif ord(c)>=128:
value = "%s\\x%.2x" % (value, ord(c))
else:
value = value + c
print "%s: %s," % (repr(k),repr(value)),
print "}"
def EndElementHandler(self, name):
print 'End element:\n\t', repr(name)
def CharacterDataHandler(self, data):
data = data.strip()
if data:
print 'Character data:'
print '\t', repr(data)
def ProcessingInstructionHandler(self, target, data):
print 'PI:\n\t', repr(target), repr(data)
def StartNamespaceDeclHandler(self, prefix, uri):
print 'NS decl:\n\t', repr(prefix), repr(uri)
def EndNamespaceDeclHandler(self, prefix):
print 'End of NS decl:\n\t', repr(prefix)
def StartCdataSectionHandler(self):
print 'Start of CDATA section'
def EndCdataSectionHandler(self):
print 'End of CDATA section'
def CommentHandler(self, text):
print 'Comment:\n\t', repr(text)
def NotationDeclHandler(self, *args):
name, base, sysid, pubid = args
print 'Notation declared:', args
def UnparsedEntityDeclHandler(self, *args):
entityName, base, systemId, publicId, notationName = args
print 'Unparsed entity decl:\n\t', args
def NotStandaloneHandler(self, userData):
print 'Not standalone'
return 1
def ExternalEntityRefHandler(self, *args):
context, base, sysId, pubId = args
print 'External entity ref:', args[1:]
return 1
def SkippedEntityHandler(self, *args):
print 'Skipped entity ref:', args
def DefaultHandler(self, userData):
pass
def DefaultHandlerExpand(self, userData):
pass
def confirm(ok):
if ok:
print "OK."
else:
print "Not OK."
out = Outputter()
parser = expat.ParserCreate(namespace_separator='!')
# Test getting/setting returns_unicode
parser.returns_unicode = 0; confirm(parser.returns_unicode == 0)
parser.returns_unicode = 1; confirm(parser.returns_unicode == 1)
parser.returns_unicode = 2; confirm(parser.returns_unicode == 1)
parser.returns_unicode = 0; confirm(parser.returns_unicode == 0)
# Test getting/setting ordered_attributes
parser.ordered_attributes = 0; confirm(parser.ordered_attributes == 0)
parser.ordered_attributes = 1; confirm(parser.ordered_attributes == 1)
parser.ordered_attributes = 2; confirm(parser.ordered_attributes == 1)
parser.ordered_attributes = 0; confirm(parser.ordered_attributes == 0)
# Test getting/setting specified_attributes
parser.specified_attributes = 0; confirm(parser.specified_attributes == 0)
parser.specified_attributes = 1; confirm(parser.specified_attributes == 1)
parser.specified_attributes = 2; confirm(parser.specified_attributes == 1)
parser.specified_attributes = 0; confirm(parser.specified_attributes == 0)
HANDLER_NAMES = [
'StartElementHandler', 'EndElementHandler',
'CharacterDataHandler', 'ProcessingInstructionHandler',
'UnparsedEntityDeclHandler', 'NotationDeclHandler',
'StartNamespaceDeclHandler', 'EndNamespaceDeclHandler',
'CommentHandler', 'StartCdataSectionHandler',
'EndCdataSectionHandler',
'DefaultHandler', 'DefaultHandlerExpand',
#'NotStandaloneHandler',
'ExternalEntityRefHandler', 'SkippedEntityHandler',
]
for name in HANDLER_NAMES:
setattr(parser, name, getattr(out, name))
data = '''\
<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
<?xml-stylesheet href="stylesheet.css"?>
<!-- comment data -->
<!DOCTYPE quotations SYSTEM "quotations.dtd" [
<!ELEMENT root ANY>
<!NOTATION notation SYSTEM "notation.jpeg">
<!ENTITY acirc "â">
<!ENTITY external_entity SYSTEM "entity.file">
<!ENTITY unparsed_entity SYSTEM "entity.file" NDATA notation>
%unparsed_entity;
]>
<root attr1="value1" attr2="value2ὀ">
<myns:subelement xmlns:myns="http://www.python.org/namespace">
Contents of subelements
</myns:subelement>
<sub2><![CDATA[contents of CDATA section]]></sub2>
&external_entity;
</root>
'''
# Produce UTF-8 output
parser.returns_unicode = 0
try:
parser.Parse(data, 1)
except expat.error:
print '** Error', parser.ErrorCode, expat.ErrorString(parser.ErrorCode)
print '** Line', parser.ErrorLineNumber
print '** Column', parser.ErrorColumnNumber
print '** Byte', parser.ErrorByteIndex
# Try the parse again, this time producing Unicode output
parser = expat.ParserCreate(namespace_separator='!')
parser.returns_unicode = 1
for name in HANDLER_NAMES:
setattr(parser, name, getattr(out, name))
try:
parser.Parse(data, 1)
except expat.error:
print '** Error', parser.ErrorCode, expat.ErrorString(parser.ErrorCode)
print '** Line', parser.ErrorLineNumber
print '** Column', parser.ErrorColumnNumber
print '** Byte', parser.ErrorByteIndex
# Try parsing a file
parser = expat.ParserCreate(namespace_separator='!')
parser.returns_unicode = 1
for name in HANDLER_NAMES:
setattr(parser, name, getattr(out, name))
import StringIO
file = StringIO.StringIO(data)
try:
parser.ParseFile(file)
except expat.error:
print '** Error', parser.ErrorCode, expat.ErrorString(parser.ErrorCode)
print '** Line', parser.ErrorLineNumber
print '** Column', parser.ErrorColumnNumber
print '** Byte', parser.ErrorByteIndex
# Tests that make sure we get errors when the namespace_separator value
# is illegal, and that we don't for good values:
print
print "Testing constructor for proper handling of namespace_separator values:"
expat.ParserCreate()
expat.ParserCreate(namespace_separator=None)
expat.ParserCreate(namespace_separator=' ')
print "Legal values tested o.k."
try:
expat.ParserCreate(namespace_separator=42)
except TypeError, e:
print "Caught expected TypeError."
else:
print "Failed to catch expected TypeError."
try:
expat.ParserCreate(namespace_separator='too long')
except ValueError, e:
print "Caught expected ValueError."
else:
print "Failed to catch expected ValueError."
# ParserCreate() needs to accept a namespace_separator of zero length
# to satisfy the requirements of RDF applications that are required
# to simply glue together the namespace URI and the localname. Though
# considered a wart of the RDF specifications, it needs to be supported.
#
# See XML-SIG mailing list thread starting with
# http://mail.python.org/pipermail/xml-sig/2001-April/005202.html
#
expat.ParserCreate(namespace_separator='') # too short
# Test the interning machinery.
p = expat.ParserCreate()
L = []
def collector(name, *args):
L.append(name)
p.StartElementHandler = collector
p.EndElementHandler = collector
p.Parse("<e> <e/> <e></e> </e>", 1)
tag = L[0]
if len(L) != 6:
print "L should only contain 6 entries; found", len(L)
for entry in L:
if tag is not entry:
print "expected L to contain many references to the same string",
print "(it didn't)"
print "L =", `L`
break
# Weird public ID bug reported by <NAME>; he was only able to
# tickle this under Zope with ParsedXML and PyXML 0.7 installed.
text = '''\
<?xml version="1.0" ?>
<!DOCTYPE foo SYSTEM "foo">
<doc>Test</doc>
'''
def start_doctype_decl_handler(doctypeName, systemId, publicId,
has_internal_subset):
if publicId is not None:
print "Unexpect publicId: " + `publicId`
if systemId != "foo":
print "Unexpect systemId: " + `systemId`
p = expat.ParserCreate()
p.StartDoctypeDeclHandler = start_doctype_decl_handler
p.Parse(text, 1)
# Tests of the buffer_text attribute.
import sys
class TextCollector:
def __init__(self, parser):
self.stuff = []
def check(self, expected, label):
require(self.stuff == expected,
"%s\nstuff = %s\nexpected = %s"
% (label, `self.stuff`, `map(unicode, expected)`))
def CharacterDataHandler(self, text):
self.stuff.append(text)
def StartElementHandler(self, name, attrs):
self.stuff.append("<%s>" % name)
bt = attrs.get("buffer-text")
if bt == "yes":
parser.buffer_text = 1
elif bt == "no":
parser.buffer_text = 0
def EndElementHandler(self, name):
self.stuff.append("</%s>" % name)
def CommentHandler(self, data):
self.stuff.append("<!--%s-->" % data)
def require(cond, label):
# similar to confirm(), but no extraneous output
if not cond:
raise TestFailed(label)
def setup(handlers=[]):
parser = expat.ParserCreate()
require(not parser.buffer_text,
"buffer_text not disabled by default")
parser.buffer_text = 1
handler = TextCollector(parser)
parser.CharacterDataHandler = handler.CharacterDataHandler
for name in handlers:
setattr(parser, name, getattr(handler, name))
return parser, handler
parser, handler = setup()
require(parser.buffer_text,
"text buffering either not acknowledged or not enabled")
parser.Parse("<a>1<b/>2<c/>3</a>", 1)
handler.check(["123"],
"buffered text not properly collapsed")
# XXX This test exposes more detail of Expat's text chunking than we
# XXX like, but it tests what we need to concisely.
parser, handler = setup(["StartElementHandler"])
parser.Parse("<a>1<b buffer-text='no'/>2\n3<c buffer-text='yes'/>4\n5</a>", 1)
handler.check(["<a>", "1", "<b>", "2", "\n", "3", "<c>", "4\n5"],
"buffering control not reacting as expected")
parser, handler = setup()
parser.Parse("<a>1<b/><2><c/> \n 3</a>", 1)
handler.check(["1<2> \n 3"],
"buffered text not properly collapsed")
parser, handler = setup(["StartElementHandler"])
parser.Parse("<a>1<b/>2<c/>3</a>", 1)
handler.check(["<a>", "1", "<b>", "2", "<c>", "3"],
"buffered text not properly split")
parser, handler = setup(["StartElementHandler", "EndElementHandler"])
parser.CharacterDataHandler = None
parser.Parse("<a>1<b/>2<c/>3</a>", 1)
handler.check(["<a>", "<b>", "</b>", "<c>", "</c>", "</a>"],
"huh?")
parser, handler = setup(["StartElementHandler", "EndElementHandler"])
parser.Parse("<a>1<b></b>2<c/>3</a>", 1)
handler.check(["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "3", "</a>"],
"huh?")
parser, handler = setup(["CommentHandler", "EndElementHandler",
"StartElementHandler"])
parser.Parse("<a>1<b/>2<c></c>345</a> ", 1)
handler.check(["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "345", "</a>"],
"buffered text not properly split")
parser, handler = setup(["CommentHandler", "EndElementHandler",
"StartElementHandler"])
parser.Parse("<a>1<b/>2<c></c>3<!--abc-->4<!--def-->5</a> ", 1)
handler.check(["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "3",
"<!--abc-->", "4", "<!--def-->", "5", "</a>"],
"buffered text not properly split")
# Tests of namespace_triplets support.
text = '''\
<doc xmlns:foo="http://xml.python.org/x"
xmlns:bar="http://xml.python.org/x">
<foo:e foo:a1="a1" bar:a2="a2"/>
<bar:e foo:a1="a1" bar:a2="a2"/>
<e a2="a2" xmlns="http://xml.python.org/e"/>
</doc>
'''
expected_info = [
("doc", {}),
("http://xml.python.org/x e foo",
{"http://xml.python.org/x a1 foo": "a1",
"http://xml.python.org/x a2 bar": "a2"}),
"http://xml.python.org/x e foo",
("http://xml.python.org/x e bar",
{"http://xml.python.org/x a1 foo": "a1",
"http://xml.python.org/x a2 bar": "a2"}),
"http://xml.python.org/x e bar",
("http://xml.python.org/e e", {"a2": "a2"}),
"http://xml.python.org/e e",
"doc"
]
class Handler:
def __init__(self, parser):
self.info = []
parser.StartElementHandler = self.StartElementHandler
parser.EndElementHandler = self.EndElementHandler
def StartElementHandler(self, name, attrs):
self.info.append((name, attrs))
def EndElementHandler(self, name):
self.info.append(name)
p = expat.ParserCreate(namespace_separator=" ")
p.namespace_prefixes = 1
h = Handler(p)
p.Parse(text, 1)
if h.info != expected_info:
raise ValueError, ("got bad element information:\n "
+ `h.info`)
```
#### File: pyxml/test/test_saxdrivers.py
```python
from xml.sax.saxutils import XMLGenerator, ContentGenerator
from xml.sax import handler, SAXReaderNotAvailable
import xml.sax.saxexts
import xml.sax.sax2exts
from cStringIO import StringIO
from test.test_support import verbose, TestFailed, findfile
try:
import warnings
except ImportError:
pass
else:
warnings.filterwarnings("ignore", ".* xmllib .* obsolete.*",
DeprecationWarning, 'xmllib$')
tests=0
fails=0
xml_test = open(findfile("test.xml.out")).read()
xml_test_out = open(findfile("test.xml.out")).read()
expected_failures=[
"xml.sax.drivers.drv_sgmlop", # does not handle " entity reference
"xml.sax.drivers.drv_xmllib", # reports S before first tag,
# does not report xmlns: attribute
]
def summarize(p,result):
global tests,fails
tests=tests+1
if result == xml_test_out:
if p in expected_failures:
print p,"XPASS"
else:
print p,"PASS"
elif p in expected_failures:
print p,"XFAIL"
else:
print p,"FAIL"
fails=fails+1
if verbose:
print result
#open("test.xml."+p,"w").write(result.getvalue())
def test_sax1():
factory=xml.sax.saxexts.XMLParserFactory
for p in factory.get_parser_list():
try:
parser = factory._create_parser(p)
except ImportError:
print p,"NOT SUPPORTED"
continue
except SAXReaderNotAvailable:
print p,"NOT SUPPORTED"
continue
result = StringIO()
xmlgen = ContentGenerator(result)
parser.setDocumentHandler(xmlgen)
# We should not pass file names to parse; we don't have
# any URLs, either
parser.parseFile(open(findfile("test.xml")))
summarize(p,result.getvalue())
def test_sax2():
factory = xml.sax.sax2exts.XMLParserFactory
for p in factory.get_parser_list():
try:
parser = factory._create_parser(p)
except ImportError:
print p,"NOT SUPPORTED"
continue
except SAXReaderNotAvailable:
print p,"NOT SUPPORTED"
continue
# Don't try to test namespace support, yet
parser.setFeature(handler.feature_namespaces,0)
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
# In SAX2, we can pass an open file object to parse()
parser.parse(open(findfile("test.xml")))
summarize(p,result.getvalue())
def test_incremental():
global tests, fails
factory = xml.sax.sax2exts.XMLParserFactory
for p in factory.get_parser_list():
try:
parser = factory._create_parser(p)
except ImportError:
print p,"NOT SUPPORTED"
continue
except SAXReaderNotAvailable:
print p,"NOT SUPPORTED"
continue
if not hasattr(parser, "feed"):
continue
# Don't try to test namespace support, yet
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.feed("<doc>")
parser.feed("</doc>")
parser.close()
tests = tests + 1
if result.getvalue() == '<?xml version="1.0" encoding="iso-8859-1"?>\n<doc></doc>':
print p, "PASS"
else:
print p, "FAIL"
fails = fails + 1
items = locals().items()
items.sort()
for (name, value) in items:
if name[ : 5] == "test_":
value()
print "%d tests, %d failures" % (tests, fails)
if fails != 0:
raise TestFailed, "%d of %d tests failed" % (fails, tests)
```
#### File: pyxml/test/test_saxutils.py
```python
import unittest
from os.path import dirname, abspath, join
from xml.sax.saxutils import escape, absolute_system_id
class EscapeTC(unittest.TestCase):
def test(self):
v1, v2 = escape('&<>'), '&<>'
self.assertEquals(v1, v2)
v1, v2 = escape('foo&bar'), 'foo&amp;bar'
self.assertEquals(v1, v2)
v1, v2 = escape('< test > &', {'test': '&myentity;'}), '< &myentity; > &'
self.assertEquals(v1, v2)
v1, v2 = escape('&\'"<>', {'"': '"', "'": '''}), '&'"<>'
self.assertEquals(v1, v2)
TEST_DIR = abspath(dirname(__file__)) + '/'
class AbsoluteSystemIdTC(unittest.TestCase):
def test_base(self):
res = absolute_system_id('http://www.xml.com')
self.assertEquals(res, 'http://www.xml.com')
res = absolute_system_id('http://www.xml.com', 'http://whatever')
self.assertEquals(res, 'http://www.xml.com')
res = absolute_system_id('quotes.xml')
self.assertEquals(res, 'file://%s' % join(TEST_DIR, 'quotes.xml'))
def test_relative(self):
# FIXME: empty authority // added by MakeUrlLibSafe (actually by
# urlunsplit), which is probably acceptable since the sysid is designed
# to be used by urlopen
res = absolute_system_id('quotes.xml', 'file:%s' % TEST_DIR)
self.assertEquals(res, 'file://%squotes.xml' % TEST_DIR)
res = absolute_system_id('relative.xml', 'file:/base')
self.assertEquals(res, 'file:///relative.xml')
res = absolute_system_id('relative.xml', 'file:/base/')
self.assertEquals(res, 'file:///base/relative.xml')
res = absolute_system_id('file:relative.xml', 'file:/base')
self.assertEquals(res, 'file:///relative.xml')
def test_no_base_scheme(self):
# FIXME: warning ?
self.assertRaises(ValueError, absolute_system_id, 'file:relative.xml', '/base')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JklqmYao1994/TRICAL",
"score": 2
} |
#### File: python/TRICAL/__init__.py
```python
import os
import sys
from ctypes import *
"""
This is a simple Python interface wrapper around TRICAL, designed more as a
convenient interface for tests than anything else.
If run directly (i.e. `python -m TRICAL`), we read expect comma-separated
readings on stdin (3 values per line, ending with \\n), and write calibrated
values on stdout in the same format.
"""
_TRICAL = None
class _Instance(Structure):
def __repr__(self):
fields = {
"field_norm": self.field_norm,
"measurement_noise": self.measurement_noise,
"state": tuple(self.state),
"state_covariance": tuple(self.state_covariance),
"measurement_count": self.measurement_count
}
return str(fields)
def _init():
"""
Loads the TRICAL library and sets up the ctypes interface.
Called automatically the first time a Python instance is created.
"""
global _TRICAL
# TODO: search properly
lib = os.path.join(os.path.dirname(__file__), "libTRICAL.dylib")
_TRICAL = cdll.LoadLibrary(lib)
# Set up the _Instance structure based on the definition in TRICAL.h
_Instance._fields_ = [
("field_norm", c_float),
("measurement_noise", c_float),
("state", c_float * 9),
("state_covariance", c_float * 9 * 9),
("measurement_count", c_uint)
]
_TRICAL.TRICAL_init.argtypes = [POINTER(_Instance)]
_TRICAL.TRICAL_init.restype = None
_TRICAL.TRICAL_norm_set.argtypes = [POINTER(_Instance), c_float]
_TRICAL.TRICAL_norm_set.restype = None
_TRICAL.TRICAL_norm_get.argtypes = [POINTER(_Instance)]
_TRICAL.TRICAL_norm_get.restype = c_float
_TRICAL.TRICAL_noise_set.argtypes = [POINTER(_Instance), c_float]
_TRICAL.TRICAL_noise_set.restype = None
_TRICAL.TRICAL_noise_get.argtypes = [POINTER(_Instance)]
_TRICAL.TRICAL_noise_get.restype = c_float
_TRICAL.TRICAL_measurement_count_get.argtypes = [POINTER(_Instance)]
_TRICAL.TRICAL_measurement_count_get.restype = c_uint
_TRICAL.TRICAL_estimate_update.argtypes = [POINTER(_Instance),
POINTER(c_float * 3)]
_TRICAL.TRICAL_estimate_update.restype = None
_TRICAL.TRICAL_estimate_get.argtypes = [POINTER(_Instance),
POINTER(c_float * 3),
POINTER(c_float * 9)]
_TRICAL.TRICAL_estimate_get.restype = None
_TRICAL.TRICAL_estimate_get_ext.argtypes = [POINTER(_Instance),
POINTER(c_float * 3),
POINTER(c_float * 9),
POINTER(c_float * 3),
POINTER(c_float * 9)]
_TRICAL.TRICAL_estimate_get_ext.restype = None
_TRICAL.TRICAL_measurement_calibrate.argtypes = [POINTER(_Instance),
POINTER(c_float * 3),
POINTER(c_float * 3)]
_TRICAL.TRICAL_measurement_calibrate.restype = None
class Instance(object):
def __init__(self, field_norm=1.0, measurement_noise=1e-6):
"""
Create a new TRICAL instance with the supplied field norm (magnitude)
and measurement noise.
The field norm should be the expected magnitude of the field at the
sensor location, and the measurement noise should be the standard
deviation of the error in sensor readings (the error being presumed
to be white Gaussian noise).
"""
if not _TRICAL:
_init()
# Sanity-check the input parameters
if field_norm <= 0.0:
raise ValueError("Field norm must be > 0.0 (got %f)" % field_norm)
if measurement_noise <= 0.0:
raise ValueError("Measurement noise must be > 0.0 (got %f)" %
measurement_noise)
# Initialize the internal (C) instance
self._instance = _Instance()
_TRICAL.TRICAL_init(self._instance)
_TRICAL.TRICAL_norm_set(self._instance, field_norm)
_TRICAL.TRICAL_noise_set(self._instance, measurement_noise)
# Initialize the Python-accessible calibration estimate
self.bias = (0.0, 0.0, 0.0)
self.scale = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
self.measurement_count = 0
def update(self, measurement):
"""
Update the calibration estimate based on a new measurement.
"""
if not measurement or len(measurement) != 3:
raise ValueError("Measurement must be a sequence with 3 items")
_TRICAL.TRICAL_estimate_update(self._instance,
(c_float * 3)(*measurement))
bias = (c_float * 3)()
scale = (c_float * 9)()
_TRICAL.TRICAL_estimate_get(self._instance, bias, scale)
self.bias = tuple(bias[0:3])
self.scale = tuple(scale[0:9])
self.measurement_count = \
_TRICAL.TRICAL_measurement_count_get(self._instance)
def calibrate(self, measurement):
"""
Given a measurement, return a calibrated measurement based on the
current calibration estimate.
"""
if not measurement or len(measurement) != 3:
raise ValueError("Measurement must be a sequence with 3 items")
calibrated_measurement = (c_float * 3)()
_TRICAL.TRICAL_measurement_calibrate(self._instance,
(c_float * 3)(*measurement),
calibrated_measurement)
return tuple(calibrated_measurement[0:3])
def _squared_norm(v):
return v[0] * v[0] + v[1] * v[1] + v[2] * v[2]
def generate_html_viz(instance, samples):
"""
Generate a WebGL visualisation of
"""
import math
import pkg_resources
raw_data = samples
iteratively_calibrated_data = []
squared_magnitude = 0.0
# Iterate over the samples and generate the iteratively calibrated data
for measurement in samples:
instance.update(measurement)
calibrated_measurement = instance.calibrate(measurement)
iteratively_calibrated_data.append(calibrated_measurement)
# Update the maximum magnitude seen
squared_magnitude = max(squared_magnitude, _squared_norm(measurement))
squared_magnitude = max(squared_magnitude,
_squared_norm(calibrated_measurement))
# And now insert them into the appropriate part of the HTML. Try the
# pkg_resources way, but if the package hasn't actually been installed
# then look for it in the same directory as this file
try:
html_path = pkg_resources.resource_filename("TRICAL", "viz.html")
html = open(html_path, "rb").read()
except IOError:
html_path = os.path.join(os.path.dirname(__file__), "viz.html")
html = open(html_path, "rb").read()
# Insert the visualisation data -- output measurements as a single array
# of points
raw_points = ",\n".join(",".join("%.4f" % v for v in measurement)
for measurement in raw_data)
calibrated_points = ",\n".join(",".join(
"%.4f" % v for v in measurement) for measurement in calibrated_data)
html = html.replace("{{raw}}", "[" + raw_points + "]")
html = html.replace("{{calibrated}}", "[" + calibrated_points + "]")
# TODO: maybe insert the calibration estimate as well?
html = html.replace("{{magnitude}}",
"%.3f" % math.sqrt(squared_magnitude))
html = html.replace("{{fieldNorm}}",
"%.3f" % instance._instance.field_norm)
return html
``` |
{
"source": "jkluter/MLG",
"score": 2
} |
#### File: jkluter/MLG/image.py
```python
import matplotlib.pyplot as plt
import numpy as np
from MLG import imagepath, paperpath, path
from imageio import imread
import matplotlib.cbook as cbook
from MLG.utils import color_own
from matplotlib import rc
__all__ = ['dark','Image_Window','Image_precision','Image_Illustration','Image_Illustration_Multi','Image_compare_micro','Image_astroshift', 'create_all_Image']
def dark(onof = 0):
if onof is 'on': plt.style.use('dark_background')
elif onof is 'off': plt.style.use('default')
elif onof == True: plt.style.use('dark_background')
else: plt.style.use('default')
def Image_Window(string = 'resolve_Window', pres = False):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.7,0.7,0.7,1])
else: black = color_own([0.,0.,0.,1])
c1 = color_own([0,1,1,1])
c2 = color_own([1,0,0,1])
c3 = color_own([1,1,0.2,1])
c4 = color_own([0.4,0.4,0.4,1])
c_star = [c1,c2,c3,c4]
c_grid1 = color_own([0,int(dark),1,1])
c_grid2 = color_own([0.5,1,0,1])
star= np.array([[0, 0],[0.1,0.9],[-1,-1.1],[-0.5,0.1]])
fig = plt.figure(figsize = [12,12])
x_width = 0.059
y_width = 0.177
#------------------------------------------------------------
# axis
plt.xticks( fontsize = 25)
plt.yticks( fontsize = 25)
plt.grid(True)
plt.axis('equal')
plt.axis([-1.5,1.5,-1.4,1.6])
plt.ylabel('Across-scan direction (AC) [arcsec]', fontsize = 30)
plt.xlabel('Along-scan direction (AL) [arcsec]', fontsize = 30)
#------------------------------------------------------------
#------------------------------------------------------------
#Grid Major Star
for i in range(-6,7):
plt.plot([-6*x_width,6*x_width], [i*y_width,i*y_width], c = c_grid1,linewidth = 3)
plt.plot([i*x_width,i*x_width], [-6*y_width,6*y_width], c = c_grid1, linewidth = 3)
plt.text(0,1.4,"Along-scan direction\n $12\,\mathrm{pix} \\times 0.059 \mathrm{''/pix} = 0.708\mathrm{''}$",fontsize = 25, verticalalignment = 'center', horizontalalignment = 'center', rotation = 0)
plt.text(0.7,0,"Across-scan direction\n $12\,\mathrm{pix} \\times 0.177 \mathrm{''/pix} = 2.124\mathrm{''}$",fontsize = 25, verticalalignment = 'center', horizontalalignment = 'center', rotation = 90)
plt.arrow(0,6*y_width+2*x_width, -6*x_width+0.02,0,color= black, head_width=0.1,\
overhang = 0.5, length_includes_head=True ,zorder = 10, linewidth = 3)
plt.arrow(0,6*y_width+2*x_width, 6*x_width-0.02,0,color= black, head_width=0.1,\
overhang = 0.5, length_includes_head=True ,zorder = 10, linewidth = 3)
plt.arrow(8*x_width,0,0, -6*y_width+0.02,color= black, head_width=0.1,\
overhang = 0.5, length_includes_head=True ,zorder = 10, linewidth = 3)
plt.arrow(8*x_width,0,0, 6*y_width-0.02,color= black, head_width=0.1,\
overhang = 0.5, length_includes_head=True ,zorder = 10, linewidth = 3)
plt.scatter(star[:1,0], star[:1,1], marker=(5, 1),c = c_star[:1], s = [3000], zorder = 1000)
if pres: fig.savefig(imagepath + string + '_1.png', format = 'png')
#------------------------------------------------------------
#------------------------------------------------------------
#Grid Minor Star
plt.scatter(star[1:3,0], star[1:3,1], marker=(5, 1),c = c_star[1:3], s = [2000,2000], zorder = 1000)
if pres: fig.savefig(imagepath + string + '_2.png', format = 'png')
for i in range(-5,8):
plt.plot([-15*x_width,-6*x_width], [i*y_width,i*y_width], c = c_grid2,linewidth = 3, zorder = -1)
for i in range(-15,-5):
plt.plot([i*x_width,i*x_width], [-5*y_width,7*y_width], c = c_grid2, linewidth = 3, zorder = -1)
plt.scatter(star[3:,0], star[3:,1], marker=(5, 1),c = c_star[3:], s = [2000], zorder = 1000)
if pres: fig.savefig(imagepath + string + '_3.png', format = 'png')
#------------------------------------------------------------
fig.savefig(imagepath + string + '.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_precision(string = 'Sig_vs_Gmag', Gaia_precision = path+'InputTable/resolution_Gaia.png', pres = False):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.7,0.7,0.7,1])
color1 = color_own([0.85,0,0,1])
color2 = color_own([0,0,1,1])
color3 = color_own([0,1,1,1])
color4 = color_own([0.5,1,0,1])
color5 = color_own([1,1,0,1])
else:
black = color_own([0.,0.,0.,1])
color1 = color_own([0.85,0,0,1])
color2 = color_own([0,0,1,1])
color3 = color_own([0,1,1,1])
color4 = color_own([0,1,0,1])
color5 = color_own([1,1,0,1])
fig = plt.figure(figsize = [12,10])
Gmag = np.arange(4,22,0.01)
datafile = cbook.get_sample_data(Gaia_precision)
img = imread(datafile)
z = 10 ** (0.4 * (np.maximum(Gmag, 14) - 15)) #(14-np.minimum(Gmag, 14))
z2 = 10 ** (0.4 * (np.maximum(Gmag, 12) - 15))
sig_pi = (-1.631 + 680.766 * z2 + 32.732 * z2**2)**0.5/1000
sig_fov2 =(-1.631 + 680.766 * z + 32.732 * z**2)**0.5/1000 *7.75 +0.1
sig_fov3 = sig_fov2 / np.sqrt(9)
plt.plot([0,1],[-5,-5], c = color1, linewidth = 3, label = 'formal precision from Gaia DR2 (per CCD)' )
plt.plot([0,1],[-5,-5], c = color2, linewidth = 3, label = 'actual precision from Gaia DR2 (per CCD)' )
plt.yticks([np.log10(i) for i in [20,10, 5,2,1, 0.5,0.2,0.1, 0.05,0.02, 0.01]],[20,10, 5,2,1, 0.5,0.2,0.1, 0.05,0.02,0.01], fontsize = 25)
plt.xticks( fontsize = 25)
plt.ylabel('Standard deviation of AL field angle [mas]', fontsize = 30)
plt.xlabel('G magnitude', fontsize = 30)
plt.imshow(img, zorder=0, extent=[5, 21.04, np.log10(0.0195),np.log10(10)])
plt.axis('auto')
plt.xlim([4,22])
plt.ylim([np.log10(0.005),np.log10(40)])
if pres:
plt.legend(loc = 'upper left',fontsize = 20)
fig.savefig(imagepath + string + '_1.png', format = 'png')
plt.plot(Gmag,np.log10(sig_pi), '--',c = color3, dashes =(5,5), linewidth = 3, label= 'predicted end-of-mission parallax error')
if pres:
plt.legend(loc = 'upper left',fontsize = 20)
fig.savefig(imagepath + string + '_2.png', format = 'png')
plt.plot(Gmag,np.log10(sig_fov2), ':' , c = color4, linewidth = 5, label= 'used Standard deviation (per CCD)' )
if pres:
plt.legend(loc = 'upper left',fontsize = 20)
fig.savefig(imagepath + string + '_3.png', format = 'png')
plt.plot(Gmag,np.log10(sig_fov3) ,c = color5,linewidth = 7, label= 'used Standard deviation for 9 CCD observations' )
plt.plot([5, 21.04, 21.04,5,5], [np.log10(0.0195),np.log10(0.0195),np.log10(10),np.log10(10),np.log10(0.0195)], linewidth = 2, color = [0.5,0.5,0.5,1], zorder = 0.1)
plt.axis('auto')
plt.xlim([4,22])
plt.ylim([np.log10(0.005),np.log10(40)])
plt.legend(loc = 'upper left',fontsize = 20)
fig.savefig(imagepath + string + '.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_Illustration(string = 'Illustration'):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.7,0.7,0.7,1])
color1 = color_own([0,1,1,1])
color2 = color_own([1,0.5,0,1])
color3 = color_own([0.5,1,0,1])
color4 = color_own([1,0,1,1])
color5 = color_own([0,1,1,1])
color6 = color_own([0,1,1,1])
else:
black = color_own([0.,0.,0.,1])
color1 = color_own([0,0,1,1])
color2 = color_own([1,0.5,0,1])
color3 = color_own([0,1,0,1])
color4 = color_own([1,0,1,1])
color5 = color_own([0,1,1,1])
color6 = color_own([0,1,1,1])
t = np.array([12, 35, 41, 61, 73, 89])
scandir = np.array([0.1, 0.7, 0.4, 0.8 , 0.2, 0.1])*np.pi
x1 = np.linspace(1,13,100) + 0.3 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y1 = np.linspace(1,3,100) + 0.3* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
x2 = np.linspace(3,7,100)# + 0.03 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y2 = np.linspace(7,4.5,100)# + 0.03* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
d = np.sqrt((x1-x2)**2 + (y1-y2)**2)
TE = 1.5
X2 = x2 + (x2-x1) * TE/(d**2 +2*TE)
Y2 = y2 + (y2-y1) * TE/(d**2 +2*TE)
dX2 = x1-X2
dY2 = y1-Y2
dx2 = x1-x2
dy2 = y1-y2
fig = plt.figure(figsize= (12,8))
ax = plt.subplot(111)
ax.axis('equal')
ax.axis('off')
for i in range(len(t)):
xm1 =np.array([-1,1]) * np.cos(scandir[i]) + x1[t[i]]
ym1 =np.array([-1,1]) * np.sin(scandir[i]) + y1[t[i]]
xm2 =np.array([-1,1]) * np.cos(scandir[i]) + X2[t[i]]
ym2 =np.array([-1,1]) * np.sin(scandir[i]) + Y2[t[i]]
dsc = ((dx2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dy2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
dSC = ((dX2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dY2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
ttX2 = np.array([0,-dSC[1]/2,dSC[1]/2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],X2[t[i]],X2[t[i]]])
ttY2 = np.array([0,-dSC[1]/2,dSC[1]/2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],Y2[t[i]],Y2[t[i]]])
ttx2 = np.array([0,-dsc[1]/2-0.2,dsc[1]/2-0.2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],x2[t[i]],x2[t[i]]])
tty2 = np.array([0,-dsc[1]/2-0.2,dsc[1]/2-0.2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],y2[t[i]],y2[t[i]]])
if i % 2 == 0:
plt.arrow(ttx2[2],tty2[2], 0.0001*(ttx2[2]-ttx2[1]),0.0001*(tty2[2]-tty2[1]),color= color1, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttx2[1],tty2[1], 0.0001*(ttx2[1]-ttx2[2]),0.0001*(tty2[1]-tty2[2]),color= color1, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[2],ttY2[2], 0.0001*(ttX2[2]-ttX2[1]),0.0001*(ttY2[2]-ttY2[1]),color= color2, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[1],ttY2[1], 0.0001*(ttX2[1]-ttX2[2]),0.0001*(ttY2[1]-ttY2[2]),color= color2, head_width=0.2,\
overhang = 0.5, length_includes_head=True, zorder = 10)
plt.plot(ttx2[0:2],tty2[0:2],color = black, linestyle= ':')
plt.plot(ttX2[0:2],ttY2[0:2],color = black, linestyle= ':')
plt.plot(ttx2[1:3],tty2[1:3],color = color1,linewidth = 3 , linestyle= '--',dashes=(10, 10))
plt.plot(ttX2[1:3],ttY2[1:3],color = color2, linewidth = 3,linestyle= '-')
plt.plot(ttx2[2:],tty2[2:],color = black, linestyle= ':')
plt.plot(ttX2[2:],ttY2[2:],color = black, linestyle= ':')
if i% 2 == 0:
plt.plot(xm2,ym2, color = black, linewidth = 3,zorder = 1)
plt.plot(xm1,ym1, color = black, linewidth = 3,zorder = 1)
else:
plt.plot(xm2,ym2, color = 'grey', linewidth = 2, zorder = -1)
plt.plot(xm1,ym1, color = 'grey', linewidth = 2, zorder = -1)
#if i ==0 :
plt.plot(x1,y1, color = color3, linewidth = 3)
plt.plot(x2,y2, color = color1, linestyle= '--',dashes=(10, 5), linewidth = 3, zorder = -1)
plt.plot(X2,Y2, color = color2, linewidth = 3)
plt.xlim([-0.5,14])
xr = 12
yr = 7
plt.text(xr-0.8,0,'RA $\cdot$ cos(Dec)',verticalalignment = 'center',fontsize = 25)
plt.text(0,yr + 0.25,'Dec',fontsize = 25, horizontalalignment = 'center', rotation = 90)
plt.arrow(-0.025,0,xr-1,0,width = 0.05,overhang = 0.5,head_width = 0.5, head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.arrow(0,-0.025,0,yr-0.5,width = 0.05,overhang = 0.5,head_width = 0.5,head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.text(2,1.5,'Lens',color = color3, fontsize = 25, horizontalalignment = 'center', rotation = 0, weight = 'bold')
plt.text(4,7.5,'Star 1',color = color2,fontsize = 25, horizontalalignment = 'center', rotation = 0,weight = 'bold')
fig.savefig(imagepath + string + '.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_Illustration2 (string = 'Illustration'):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.,0.,0.,1])
grey = color_own([.5,.5,0.5,1])
cyan = color_own([0,1,1,1])
blue = color_own([0,0,1,1])
lime = color_own([0.6,1.2,0,1])
green = color_own([0,1,0,1])
red = color_own([1,0,0,1])
orange = color_own([1,1,0,1])
else:
black = color_own([0.,0.,0.,1])
grey = color_own([.5,.5,0.5,1])
cyan = color_own([0,1,1,1])
blue = color_own([0,0,1,1])
lime = color_own([0.6,1.2,0,1])
green = color_own([0,1,0,1])
red = color_own([1,0,0,1])
orange = color_own([1,1,0,1])
t = np.array([12, 35, 41, 61, 73, 89])
scandir = np.array([0.1, 0.7, 0.4, 0.8 , 0.2, 0.1])*np.pi
#Position_lens
x1 = np.linspace(1,13,100) + 0.3 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y1 = np.linspace(1,3,100) + 0.3* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
#unlensed Position_source
x2 = np.linspace(5,9,100)# + 0.03 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y2 = np.linspace(7,4.5,100)# + 0.03* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
d = np.sqrt((x1-x2)**2 + (y1-y2)**2)
TE = 2
X2 = x2 + (x2-x1) * TE/(d**2 +2*TE)
Y2 = y2 + (y2-y1) * TE/(d**2 +2*TE)
dX2 = x1-X2
dY2 = y1-Y2
dx2 = x1-x2
dy2 = y1-y2
fig = plt.figure(figsize= (12,8))
ax = plt.subplot(111)
ax.axis('equal')
ax.axis('off')
#---------------------------------------------------------------
#axis
plt.xlim([-0.5,14])
xr = 12
yr = 7
plt.text(xr-0.8,0,'RA $\cdot$ cos(Dec)',verticalalignment = 'center',fontsize = 25)
plt.text(0,yr + 0.25,'Dec',fontsize = 25, horizontalalignment = 'center', rotation = 90)
plt.arrow(-0.025,0,xr-1,0,width = 0.05,overhang = 0.5,head_width = 0.5, head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.arrow(0,-0.025,0,yr-0.5,width = 0.05,overhang = 0.5,head_width = 0.5,head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.text(2,1.5,'Lens',color = grey, fontsize = 25, horizontalalignment = 'center', rotation = 0, weight = 'bold')
#---------------------------------------------------------------
# Motion source
plt.plot(x1,y1, color = grey, linewidth = 7)
fig.savefig(imagepath + string + '_1.png', format = 'png')
plt.text(4,7.5,'Source',color = blue,fontsize = 25, horizontalalignment = 'center', rotation = 0,weight = 'bold')
plt.plot(x2,y2, color = cyan, linestyle= '--',dashes=(10, 5), linewidth = 3, zorder = -1)
fig.savefig(imagepath + string + '_2.png', format = 'png')
plt.plot(X2,Y2, color = blue, linewidth = 3)
for i in range(len(t)):
plt.plot([x2[t[i]],X2[t[i]]],[y2[t[i]],Y2[t[i]]],':',color = black)
fig.savefig(imagepath + string + '_3.png', format = 'png')
delta = 0.05
for i in range(len(t)):
xm1 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + x1[t[i]]
ym1 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + y1[t[i]]
xm2 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + X2[t[i]]
ym2 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + Y2[t[i]]
plt.plot(xm2,ym2, color = black, linewidth = 1,zorder = 1)
plt.plot(xm1,ym1, color = black, linewidth = 1,zorder = 1)
fig.savefig(imagepath + string + '_4.png', format = 'png')
for i in range(len(t)):
xm1 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + x1[t[i]]
ym1 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + y1[t[i]]
xm2 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + X2[t[i]]
ym2 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + Y2[t[i]]
dsc = ((dx2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dy2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
dSC = ((dX2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dY2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
ttX2 = np.array([0,-dSC[1]/2,dSC[1]/2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],X2[t[i]],X2[t[i]]])
ttY2 = np.array([0,-dSC[1]/2,dSC[1]/2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],Y2[t[i]],Y2[t[i]]])
ttx2 = np.array([0,-dsc[1]/2-0.2,dsc[1]/2-0.2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],x2[t[i]],x2[t[i]]])
tty2 = np.array([0,-dsc[1]/2-0.2,dsc[1]/2-0.2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],y2[t[i]],y2[t[i]]])
if i % 2 == 0:
plt.arrow(ttx2[2],tty2[2], 0.0001*(ttx2[2]-ttx2[1]),0.0001*(tty2[2]-tty2[1]),color= red, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttx2[1],tty2[1], 0.0001*(ttx2[1]-ttx2[2]),0.0001*(tty2[1]-tty2[2]),color= red, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[2],ttY2[2], 0.0001*(ttX2[2]-ttX2[1]),0.0001*(ttY2[2]-ttY2[1]),color= orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[1],ttY2[1], 0.0001*(ttX2[1]-ttX2[2]),0.0001*(ttY2[1]-ttY2[2]),color= orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True, zorder = 10)
plt.plot(ttx2[0:2],tty2[0:2],color = black, linestyle= ':')
plt.plot(ttX2[0:2],ttY2[0:2],color = black, linestyle= ':')
plt.plot(ttx2[1:3],tty2[1:3],color = red,linewidth = 3 , linestyle= '--',dashes=(10, 10))
plt.plot(ttX2[1:3],ttY2[1:3],color = orange, linewidth = 3,linestyle= '-')
plt.plot(ttx2[2:],tty2[2:],color = black, linestyle= ':')
plt.plot(ttX2[2:],ttY2[2:],color = black, linestyle= ':')
#if i ==0 :
fig.savefig(imagepath + string + '_5.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_Illustration_Multi(string = 'Illustration_Multi'):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.7,0.7,0.7,1])
grey = color_own([.5,.5,0.5,1])
cyan = color_own([0,1,1,1])
blue = color_own([0,0,1,1])
lime = color_own([0.6,1.2,0,1])
green = color_own([0,1,0,1])
red = color_own([1,0,0,1])
orange = color_own([1,.5,0,1])
else:
black = color_own([0.,0.,0.,1])
grey = color_own([.5,.5,0.5,1])
cyan = color_own([0,1,1,1])
blue = color_own([0,0,1,1])
lime = color_own([0.6,1.2,0,1])
green = color_own([0,1,0,1])
red = color_own([1,0,0,1])
orange = color_own([1,1,0,1])
t = np.array([12, 35, 41, 61, 73, 89])
scandir = np.array([0.1, 0.7, 0.4, 0.8 , 0.2, 0.1])*np.pi
x1 = np.linspace(1,13,100) + 0.3 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y1 = np.linspace(1,3,100) + 0.3* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
x2 = np.linspace(3,7,100)# + 0.03 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y2 = np.linspace(7,4.5,100)# + 0.03* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
x3 = np.linspace(12,10,100)# + 0.03 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y3 = np.linspace(8,6,100)# + 0.03* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
d2 = np.sqrt((x1-x2)**2 + (y1-y2)**2)
d3 = np.sqrt((x1-x3)**2 + (y1-y3)**2)
TE = 1.5
X2 = x2 + (x2-x1) * TE/(d2**2 +2*TE)
Y2 = y2 + (y2-y1) * TE/(d2**2 +2*TE)
X3 = x3 + (x3-x1) * TE/(d3**2 +2*TE)
Y3 = y3 + (y3-y1) * TE/(d3**2 +2*TE)
dX2 = x1-X2
dY2 = y1-Y2
dx2 = x1-x2
dy2 = y1-y2
dX3 = x1-X3
dY3 = y1-Y3
dx3 = x1-x3
dy3 = y1-y3
fig = plt.figure(figsize= (12,8.8))
plt.subplots_adjust(
top=0.95,
bottom=0.05,
left=0.05,
right=0.95,
hspace=0.0,
wspace=0.2)
ax = plt.subplot(111)
ax.axis('equal')
ax.axis('off')
for i in range(len(t)):
delta = 0.05
xm1 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + x1[t[i]]
ym1 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + y1[t[i]]
xm2 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + X2[t[i]]
ym2 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + Y2[t[i]]
xm3 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + X3[t[i]]
ym3 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + Y3[t[i]]
dSC3 = ((dX3[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dY3[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
dSC2 = ((dX2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dY2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
dsc3 = ((dx3[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dy3[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
dsc2 = ((dx2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dy2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
ttX2 = np.array([0,-dSC2[1]/2,dSC2[1]/2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],X2[t[i]],X2[t[i]]])
ttY2 = np.array([0,-dSC2[1]/2,dSC2[1]/2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],Y2[t[i]],Y2[t[i]]])
ttX3 = np.array([0,-dSC3[1]/2,dSC3[1]/2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],X3[t[i]],X3[t[i]]])
ttY3 = np.array([0,-dSC3[1]/2,dSC3[1]/2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],Y3[t[i]],Y3[t[i]]])
ttx2 = np.array([0,-dsc2[1]/2-0.3,dsc2[1]/2-0.3,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],x2[t[i]],x2[t[i]]])
tty2 = np.array([0,-dsc2[1]/2-0.3,dsc2[1]/2-0.3,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],y2[t[i]],y2[t[i]]])
if i == 3: off = [-0.4,-0.2]
else: off = [0,-0.2]
ttx3 = np.array([0,-dsc3[1]/2+off[1],dsc3[1]/2+off[1],0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],x3[t[i]],x3[t[i]]])
tty3 = np.array([0,-dsc3[1]/2+off[1],dsc3[1]/2+off[1],0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],y3[t[i]],y3[t[i]]])
ttX3 = np.array([0,-dSC3[1]/2+off[0],dSC3[1]/2+off[0],0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],X3[t[i]],X3[t[i]]])
ttY3 = np.array([0,-dSC3[1]/2+off[0],dSC3[1]/2+off[0],0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],Y3[t[i]],Y3[t[i]]])
'''
if i % 2 == 0:
plt.arrow(ttX2[2],ttY2[2], 0.0001*(ttX2[2]-ttX2[1]),0.0001*(ttY2[2]-ttY2[1]),color = color_own([0,0,1,1]),head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[1],ttY2[1], 0.0001*(ttX2[1]-ttX2[2]),0.0001*(ttY2[1]-ttY2[2]),color = color_own([0,0,1,1]),head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.plot(ttX2[0:2],ttY2[0:2],color = black, linestyle= ':')
plt.plot(ttX2[1:3],ttY2[1:3],color = [158/200,1/200,66/200, 1], linewidth = 3,linestyle= '-')
plt.plot(ttX2[2:],ttY2[2:],color = black, linestyle= ':')
'''
if i% 2 == 0:
plt.plot(xm2,ym2, color = black, linewidth = 1,zorder = 1)
plt.plot(xm1,ym1, color = black, linewidth = 1,zorder = 1)
plt.arrow(ttX2[2],ttY2[2], 0.0001*(ttX2[2]-ttX2[1]),0.0001*(ttY2[2]-ttY2[1]),color=red ,head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[1],ttY2[1], 0.0001*(ttX2[1]-ttX2[2]),0.0001*(ttY2[1]-ttY2[2]),color=red ,head_width=0.2,\
overhang = 0.5, length_includes_head=True, zorder = 10)
plt.plot(ttX2[0:2],ttY2[0:2],color = black, linestyle= ':')
plt.plot(ttX2[1:3],ttY2[1:3],color = red, linewidth = 3)
plt.plot(ttX2[2:],ttY2[2:],color = black, linestyle= ':')
plt.plot(ttx2[0:2],tty2[0:2],color = black, linestyle= ':')
plt.plot(ttx2[1:3],tty2[1:3],color = orange, linewidth = 3,linestyle= '-', dashes=(10, 2))
plt.plot(ttx2[2:],tty2[2:],color = black, linestyle= ':')
plt.arrow(ttx2[2],tty2[2], 0.0001*(ttx2[2]-ttx2[1]),0.0001*(tty2[2]-tty2[1]),color = orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttx2[1],tty2[1], 0.0001*(ttx2[1]-ttx2[2]),0.0001*(tty2[1]-tty2[2]),color = orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True, zorder = 10)
if i >=3:
plt.plot(ttX3[0:2],ttY3[0:2],color = black, linestyle= ':')
plt.plot(ttX3[1:3],ttY3[1:3],color = red, linewidth = 3)
plt.plot(ttX3[2:],ttY3[2:],color = black, linestyle= ':')
plt.arrow(ttX3[2],ttY3[2], 0.0001*(ttX3[2]-ttX3[1]),0.0001*(ttY3[2]-ttY3[1]),color=red, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX3[1],ttY3[1], 0.0001*(ttX3[1]-ttX3[2]),0.0001*(ttY3[1]-ttY3[2]),color= red, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.plot(xm3,ym3, color = black, linewidth = 1,zorder = 1)
plt.plot(ttx3[0:2],tty3[0:2],color = black, linestyle= ':')
plt.plot(ttx3[1:3],tty3[1:3],color = orange, linewidth = 3,linestyle= '-', dashes=(10, 2))
plt.plot(ttx3[2:],tty3[2:],color = black, linestyle= ':')
plt.arrow(ttx3[2],tty3[2], 0.0001*(ttx3[2]-ttx3[1]),0.0001*(tty3[2]-tty3[1]),color = orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttx3[1],tty3[1], 0.0001*(ttx3[1]-ttx3[2]),0.0001*(tty3[1]-tty3[2]),color = orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True, zorder = 10)
'''
else:
plt.plot(xm3,ym3, color = 'grey', linewidth = 2, zorder = -1)
'''
elif i >=3:
plt.plot(xm3,ym3, color = black, linewidth = 1,zorder = 1)
plt.plot(xm1,ym1, color = black, linewidth = 1,zorder = 1)
#plt.plot(xm2,ym2, color = 'grey', linewidth = 2, zorder = -1)
plt.plot(ttX3[0:2],ttY3[0:2],color = black, linestyle= ':')
plt.plot(ttX3[1:3],ttY3[1:3],color = red, linewidth = 3 )
plt.plot(ttX3[2:],ttY3[2:],color = black, linestyle= ':')
plt.arrow(ttX3[2],ttY3[2], 0.0001*(ttX3[2]-ttX3[1]),0.0001*(ttY3[2]-ttY3[1]),color= red, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX3[1],ttY3[1], 0.0001*(ttX3[1]-ttX3[2]),0.0001*(ttY3[1]-ttY3[2]),color= red, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.plot(ttx3[0:2],tty3[0:2],color = black, linestyle= ':')
plt.plot(ttx3[1:3],tty3[1:3],color = orange, linewidth = 3,linestyle= '-', dashes=(10, 2))
plt.plot(ttx3[2:],tty3[2:],color = black, linestyle= ':')
plt.arrow(ttx3[2],tty3[2], 0.0001*(ttx3[2]-ttx3[1]),0.0001*(tty3[2]-tty3[1]),color= orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttx3[1],tty3[1], 0.0001*(ttx3[1]-ttx3[2]),0.0001*(tty3[1]-tty3[2]),color= orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True, zorder = 10)
'''
else:
plt.plot(xm3,ym3, color = 'grey', linewidth = 2, zorder = -1)
plt.plot(xm2,ym2, color = 'grey', linewidth = 2, zorder = -1)
plt.plot(xm1,ym1, color = 'grey', linewidth = 2, zorder = -1)
'''
#if i ==0 :
plt.plot(x1,y1, color = grey, linewidth = 7)
plt.plot(x2,y2, color = cyan, linestyle= '--',dashes=(6, 2), linewidth = 3, zorder = -1)
plt.plot(X2,Y2, color = blue, linewidth = 3)
plt.plot(x3,y3, color = lime, linestyle= '--',dashes=(6, 2), linewidth = 3, zorder = -1)
plt.plot(X3,Y3, color = green, linewidth = 3)
plt.xlim([-0.2,13.5])
xr = 12
yr = 7
plt.text(xr-0.8,0,'RA $\cdot$ cos(Dec)',verticalalignment = 'center',fontsize = 25)
plt.text(0,yr + 0.25,'Dec',fontsize = 25, horizontalalignment = 'center', rotation = 90)
plt.text(2,1.5,'Lens',color = grey,fontsize = 25, horizontalalignment = 'center', rotation = 0, weight = 'bold')
plt.text(4,7.5,'Star 1',color = blue, fontsize = 25, horizontalalignment = 'center', rotation = 0,weight = 'bold')
plt.text(11,8,'Star 2',color = green, fontsize = 25, horizontalalignment = 'center', rotation = 0,weight = 'bold')
plt.arrow(-0.025,0,xr-1,0,width = 0.05,overhang = 0.5,head_width = 0.5, head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.arrow(0,-0.025,0,yr-0.5,width = 0.05,overhang = 0.5,head_width = 0.5,head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
fig.savefig(imagepath + string + '.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_compare_micro(string = 'aml_vs_pml.png'):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
c1 =color_own([1,1,0,1])
c2 =color_own([1,0,1,1])
c3 =color_own([0.6,1.2,0,1])
c4 =color_own([0,1,1,1])
else:
c1 =color_own([0,0,1,1])
c2 =color_own([1,0,0,1])
c3 =color_own([0.5,1,0,1])
c4 =color_own([1,0,1,1])
rc('xtick', labelsize=24)
rc('ytick', labelsize=24)
Separation_E1 = 20
Separation_E2 = 2
Separation_E3 = 20
xx1 = np.array(range(-1000,1000,1))
xx2 = xx1
xx3 = xx1
yy1 = xx1*0+10
yy2 = xx1*0+1
yy3 = xx1*0+200
uu1 = np.sqrt(xx1*xx1+yy1*yy1)/Separation_E1
uu2 = np.sqrt(xx2*xx2+yy2*yy2)/Separation_E2
uu3 = np.sqrt(xx3*xx3+yy3*yy3)/Separation_E3
dSeparation1 = uu1/(uu1*uu1 + 2)*Separation_E1
dSeparation2 = uu2/(uu2*uu2 + 2)*Separation_E2
dSeparation3 = uu3/(uu3*uu3 + 2)*Separation_E3
A1 = (uu1*uu1+2)/(uu1*np.sqrt(uu1*uu1+4))
A2 = (uu2*uu2+2)/(uu2*np.sqrt(uu2*uu2+4))
A3 = (uu3*uu3+2)/(uu3*np.sqrt(uu3*uu3+4))
dm1 = 2.5*np.log10(A1)
dm2 = 2.5*np.log10(A2)
dm3 = 2.5*np.log10(A3)
xx1 = xx1/250
xx2 = xx2/250
xx3 = xx3/250
figure = plt.figure(figsize = (12,12))
plt.subplots_adjust(hspace=0.1)
ax1 = plt.subplot2grid((2,1), (0, 0), rowspan=1)
plt.plot(xx1,dSeparation1, color = c1,linewidth = 4)
line1, = plt.plot(xx2,dSeparation2,color = c3 ,linewidth = 4)
plt.plot(xx3,dSeparation3,linestyle = '--',color = c4,linewidth = 4)
line1.set_dashes([10, 2, 10, 2])
plt.yticks([1,2,3,4,5,6,7,8],['1.0 ','2.0 ','3.0 ','4.0 ','5.0 ','6.0 ','7.0 ','8.0 '])
plt.ylim([0,8])
plt.ylabel('Shift [mas]',fontsize = 30)
plt.plot(xx1,xx1*0+0.5, color=c2,linewidth = 3)
ax1.tick_params(length=6, width=2)
ax1.tick_params(which='minor', length=4,width=2)
xticklabels1 = ax1.get_xticklabels()
plt.setp(xticklabels1, visible=False)
ax2 = plt.subplot2grid((2,1), (1, 0), rowspan=1,sharex=ax1)
plt.semilogy(xx1,dm1,color = c1,linewidth = 4)
line1, = plt.semilogy(xx2,dm2,color = c3,linewidth = 4)
plt.semilogy(xx3,dm3,linestyle = '--',color = c4,linewidth = 4)
line1.set_dashes([10, 2, 10, 2])
plt.semilogy(xx1,xx1*0+0.001, color= c2 ,linewidth = 3)
plt.ylim([0.0001,1])
plt.ylabel('Magnification [mag]',fontsize = 30)
ax2.tick_params(length=6, width=2)
ax2.tick_params(which='minor', length=4,width=2)
plt.xlabel('$\Delta$ T [yr]',fontsize = 30)
figure.savefig(imagepath+ string,format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
plt.close(figure)
def Image_astroshift(string = 'astroshift'):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
fig,ax = plt.subplots(figsize = [12,12] )
x = np.array([-100+0.1*t for t in range(2000)])
ThetaE = 12.75
umin = 0.75
ThetaE = 12.75
fls = 10
siz = 10
y = np.array([ThetaE*umin for i in x])
ax.set_xlim(-30,30)
ax.set_ylim(-30,30)
ux =np.array([x[i]/ThetaE for i in range(len(x))])
uy =np.array([y[i]/ThetaE for i in range(len(x))])
u = np.array([np.sqrt(np.power(ux[i],2)+np.power(uy[i],2)) for i in range(len(x))])
theta_px = - (np.sqrt(u*u+4) - u)/(2 * u) * x
theta_py = - (np.sqrt(u*u+4) - u)/(2 * u) * y
theta_mx = - (-np.sqrt(u*u+4) - u)/(2 * u) * x
theta_my = - (-np.sqrt(u*u+4) - u)/(2 * u) * y
delta_thetax = -x/(np.power(u,2)+2)
delta_thetay = -y/(np.power(u,2)+2)
plt.plot(x,y,'r',linewidth = 2)
plt.plot(theta_px,theta_py,'b--',linewidth = 2)
plt.plot(theta_mx,theta_my,'b--',linewidth = 2)
plt.plot(delta_thetax,delta_thetay,color='green',linewidth = 2)
xx = [ThetaE * np.sin(2*np.pi*i/1000)+6 for i in range(1000)]
yy = [ThetaE * np.cos(2*np.pi*i/1000)+ThetaE*umin for i in range(1000)]
step = 30
x = x[10::step]
y = y[10::step]
theta_px = theta_px[10::step]
theta_py = theta_py[10::step]
theta_mx = theta_mx[10::step]
theta_my = theta_my[10::step]
delta_thetax = delta_thetax[10::step]
delta_thetay = delta_thetay[10::step]
plt.plot(x,y,'rx', markersize = siz, markeredgewidth = 3)
plt.plot(theta_px,theta_py,'bs', markersize = siz)
plt.plot(theta_mx,theta_my,'bs', markersize = siz)
plt.plot(delta_thetax,delta_thetay,'o', color='green')
n = np.where(np.array(x) == 6.0)[0][0]
plt.plot(x[n],y[n],'rx',markersize = siz * 1.5,markeredgewidth = 4)
plt.plot(theta_px[n],theta_py[n],'bs',markersize = siz * 1.5)
plt.plot(theta_mx[n],theta_my[n],'bs',markersize = siz * 1.5)
plt.plot(delta_thetax[n],delta_thetay[n],'o', color='green',markersize = siz * 1.5)
plt.plot(xx,yy,color='black')
plt.plot([-60,60],[-10*ThetaE*umin ,10 * ThetaE*umin ], '--', color = 'black', dashes=[10,5])
plt.xlabel(r'$\delta\theta_{x} [\mathrm{mas}]$', fontsize = 30)
plt.ylabel(r'$\delta\theta_{y} [\mathrm{mas}]$', fontsize = 30)
plt.arrow(13.5,21,2,-1,width = 0.1,head_width = 0.70,head_length = 1,color= 'black', zorder = 100)
plt.arrow(6.75,10.75,2,0,width = 0.1,head_width = 0.70,head_length = 1,color= 'red', zorder = 100)
plt.arrow(11,18,2,-0.6,width = 0.1,head_width = 0.70,head_length = 1,color= 'blue', zorder = 100)
plt.arrow(-1,-5,-2,0.7,width = 0.1,head_width = 0.7,head_length = 1,color= 'green', zorder = 100)
plt.arrow(-4,-9,-2,1,width = 0.1,head_width = 0.7,head_length = 1,color= 'blue', zorder = 100)
#plt.scatter([0],[0],s = 100, c='k', zorder = 1000)
plt.plot([0],[0],'k*',markersize = siz * 2)
plt.text(-10,20, '$-$',color = 'blue', fontsize = 50)
plt.text(7,-10, '$+$',color = 'blue',fontsize = 50)
plt.xticks( fontsize = 25)
plt.yticks( fontsize = 25)
fig.savefig(imagepath + string + '.png')
if paperpath is not None: fig.savefig(paperpath+ string + '.png')
print('Create Image: '+ imagepath+ string + '.png')
def create_all_Image():
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
# code to create all images in default mode at once
for j in range(1,-1,-1):
dark(j)
for i in __all__: #loop over all functions in this code
if 'Image_' in i:
exec(i+'()')
if __name__ == '__main__': create_all_Image()
```
#### File: jkluter/MLG/name.py
```python
import numpy as np
__all__ = ['name']
def name(event_id, no_name = True, silent = False):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
event_id = np.int64(event_id)
where = np.where(namelist['ID'] == event_id)[0]
if len(where)== 0:
if not silent:print('Not in list:', event_id)
if no_name:
return 'DR2: '+ str(event_id)
else: return ''
if len(where)==1:
if namelist[where[0]]['Name'] == '' :
if no_name:
return 'DR2: '+ str(event_id)
else:
return ''
else:
return namelist[where[0]]['Name']
if len(where)>1:
if not silent:print('douplikat:', event_id)
return namelist[where[0]]['Name']
def name_good(event_id, silent = False):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
event_id = np.int64(event_id)
where = np.where(namelist['ID'] == event_id)[0]
if len(where)== 0:
return True
elif len(where)==1:
return bool(namelist[where[0]]['Good'])
print(1)
elif len(where)>1:
if not silent:print('douplikat:', event_id)
return namelist[where[0]]['Good']
return True
namelist = np.array([
(5853498713160606720, '<NAME>', True),
(1872046574983497216, '61 C<NAME>', True),
(5332606522595645952, 'LAWD 37', True),
(1872046574983507456, '61 Cyg A', True),
(5254061535097566848, 'L 143-23', True),
(4516199240734836608, 'Ross 733', True),
(4472832130942575872, "Barnard's star" , True),
(5951824121022278144, 'GJ 674', True),
(5339892367683264384, "Innes' star", True),
(470826482635701376, '<NAME> B', True),
(6710522912622915712, 'UCAC3 86-396928', True),
(1543076475514008064, 'G 123-61B', True),
(5696640563228494464, 'WT 1550', True),
(1964791583368232832, 'tau Cyg A', True),
(4687445500635789184, 'OGLE SMC115.5 319', True),
(1978296747258230912, 'LSPM J2129+4720', True),
(2315857227976341504, 'HD 2404', True),
(429297924157113856, 'G 217-32', True),
(1157210463244589568, 'G 15-11', True),
(2390377345808152832, 'HD 222506', True),
(1935209944575937024, 'UCAC4 677-123934', True),
(4793869292650732160, 'UPM J0537-4924', True),
(6677000246203170944, 'HD 197484', True),
(4687511776265158400, 'L 51-47', True),
(5492622847798742272, 'L 240-16', True),
(1543076475509704192, 'G 123-61A', True),
(4544176352376541312, 'LP 506-50 A', True),
(6522256697697129600, 'LEHPM 6274', True),
(4529285391522200320, '109 Her', True),
(6128259487711387008, 'L 328-36', True),
(6585158207436506368, 'HD 207450', True),
(5309386791195469824, 'HD 85228', True),
(1961141200470502400, 'LSPM J2154+4344', True),
(3347284175186268160, 'UCAC4 527-018168', True),
(5396870945686245504, 'HD 97073', True),
(5902780301768699392, 'HD 136466A', True),
(3398414352092062720, 'G 100-35B', True),
(4924486979061864832, 'CD-53 40', True),
(4657982643495556608, 'OGLE LMC162.5 41235', True),
(577478114092768384, 'G 114-33', True),
(6368299918479525632, 'UCAC3 27-74415', True),
(6036143915373978880, 'HD 144179A', True),
(5225957639881367552, 'L 66-82', True),
(5921433963191695872, 'LAWD 68', True),
(4623882630333283328, 'L 31-84', True),
(5559827605531610240, 'CD-43 3055', True),
(2534338503471881728, 'G 70-55A', True),
(2846620421603149312, 'G 129-51', True),
(5302618648583292800, 'PM J08503-5848', True),
(4451575895403432064, 'G 16-29', True),
(5930568598406530048, 'HD 149192', True),
(3763681215875388288, 'G 163-21', True),
(4116840541184279296, 'L 702-43', True),
(2759428629829608064, 'G 29-73', True),
(5736464668224470400, 'L 820-19', True),
(2912585037597833856, 'LP 838-28 A', True),
(5235714289453482240, 'L 103-12', True),
(1382796061323905024, 'TYC 3064-1282-1', True),
(546488928621555328, 'G 245-47A', True),
(6213824650812054528, 'LP 859-51', True),
(5134154157033028352, 'LP 828-36', True),
(2790883634570755968, 'LP 350-66', True),
(5600272625752039296, 'L 601-78', True),
(6874112547575038336, 'LP 814-25', True),
(6549824370485770112, 'UCAC4 260-200149', True),
(5608893346278863616, 'LP 896-12 A', True),
(4056233810947522304, 'HD 316899', True),
(654826970401335296, 'HD 66553', True),
(6054954738289909632, 'HD 108500A', True),
(4149572555606973568, 'LP 748-33', True),
(2826159850241446784, 'HD 222474A', True),
(2358524597030794112, 'V* YZ Cet', True),
(1875076004382831616, 'BD+21 4747A', True),
(6415630939116638464, 'EC 19249-7343', True),
(1444367711752056960, 'LP 379-98', True),
(1625058605098521600, 'HD 146868', True),
(239070631455336064, 'BD+39 710A', True),
(568432427635050240, 'G 245-68', True),
(4937000898855759104, 'GJ 86', False),
(5032229910174557056, 'HD 5425A', True),
(2870877194301321088, 'TYC 2752-1121-1', True),
(6916644779774493696, 'LP 636-60', True),
(5359745012164917632, 'L 250-10', True),
(2646280705713202816, 'BD+00 5017', True),
(5243594081269535872, 'L 100-115', True),
(5429919108417092096, 'LP 462-88', True),
(5979367986779538432, 'CD-32 12693', True),
(4041006743145526656, 'LP 486-32', True),
(835871554301614848, 'LSPM J1047+5016', True),
(5895473600324039680, 'L 260-120', True),
(3585636855608873984, 'HD 102392A', True),
(4228023259961831296, 'HD 197623', True),
(101283987497967360, 'HD 13482A', True),
(5393446658454453632, 'DENIS J1048.0-3956', True),
(3842095911266162432, 'HD 78663', True),
(4198685678421509376, 'HD 177758', True),
(643819484616249984, 'mu. Leo', True),
(4786715182805934592, 'HD 30361A', True),
(4269932382606282112, 'eta Ser', True),
(5941940988948890112, 'HD 330763A', True),
(6031475835680030720, 'L 555-14', True),
(1568219729458240128, 'HD 110833', True),
(1015799283499485440, 'HD 77006', True),
(3202470247468181632, 'BD-06 855', True),
(488099359330834432, 'HD 22399', True),
(1251328585567327744, 'HD 120065', True),
(962141157557751552, 'G 101-35', True),
(416158489625704320, 'G 172-11', True),
(1962597885872344704, 'BD+43 4138', True),
(4361365433110271232, 'HD 156826', True),
(1817982759302341376, 'HD 347427', True),
(2937651222655480832, 'HD 44573', True),
(5657306462454704640, 'HD 85725', True),
(4780100658292046592, 'L 230-188', True),
(5941478335062267392, 'L 338-152', True),
(6254033894120917760, 'L 768-119', True),
(6209995700348976896, 'LP 915-41', True),
(1136512191212093440, 'G 251-35', True),
(5650153825784353280, 'HD 78643', True),
(5413531987125139712, 'PM J10195-4622', True),
(2028065934962125184, 'Ross 165A', True),
(6787883382523590400, 'HD 201989', True),
(3934198156329809152, 'HD 110315', True),
(4657193606465368704, 'HD 39194', True),
(598180646733241216, 'G 46-2', True),
(5801950515627094400, 'HD 155918', True),
(829777377964973952, 'HD 92855', True),
(5856411869205581568, 'HD 109200', True),
(466294295706341760, 'HD 18757', True),
(533621859441169920, 'Ross 318', True),
(4535144998232287232, 'HD 171314', True),
(5534742080239458048, 'HD 66020', True),
(5849427049801267200, 'HD 124584', True),
(4293318823182081408, 'HD 180617', True),
(1193030490492925824, 'gam Ser', True),
(689004018040211072, '75 Cnc', True)
], dtype=[('ID','i8'),('Name','U25'), ('Good',bool)])
```
#### File: MLG/Simulation/MonteCarlo.py
```python
from MLG import path,default_table, message_folder, Version,Subversion
from MLG.Simulation import RawData
from MLG.Simulation import RealData
from MLG.Modeling import fitting_micro , fitting_motion
from MLG.Math import percentile
from joblib import Parallel, delayed
import numpy as np
import time
import pickle
import datetime
import glob
import os
__all__ = ['StartMC','loadPkl', 'calcParallel']
def StartMC(EventList, namelist = None, keywords = {}, **kwargs):
'''------------------------------------------------------------
Description:
This Function distribute the calculation for the MonteCarlo
simulation on multiple CPU_Cores and stors the results in an PKL file
uses the joblib package for parallel computing
---------------------------------------------------------------
Input:
EventList: String of the input table or
List of Lens source paris for each event or
MC_results for vary the mass also
namelist: List of names for each event (for ploting routine, if None use of the index)
---------------------------------------------------------------
Output:
MC_result: Dictnary of the results. Contains:
'Header': List of all control imputs, and Code Version
'Results': List of the fit parameters
'Input_parameter': List of the input parameters
'Results_ext_obs': List of the fit parameters with external_obs if ext_obs > 0
'Chi2': List of the reduced CHI2
'Eventlist': (see Input)
'Names': (see Input)
------------------------------------------------------------'''
#--------------------------------------------------------------
#Update controle keywords
keywords.update(kwargs)
num_core = keywords.get('num_core', 6) # Number of cores
instring = keywords.get('instring', '') # string for the save files
message = keywords.get('message', False) # write the process of the computation to an file
DR3 = keywords.get('DR3', False) # Use of the Data of DR3 only
extended = keywords.get('extended', False) # Use of the 10 years data of the extended mission
vary_eta = keywords.get('vary_eta', False) # Use an new scaninglaw for 2020.5-2024.5
ext_obs = keywords.get('ext_obs', False) # include external observations
n_error_picks = keywords.get('n_error_picks', 500) # Number of pickt Observation from the error ellips
n_par_picks = keywords.get('n_par_picks', 1) # Number of pickt input Parameters from the error ellips
namelist = keywords.get('namelist', namelist) # Check if namelist is given in the keyword dictionary
#--------------------------------------------------------------
#--------------------------------------------------------------
#Create random seeds for the calculation
seed = []
for i in range(num_core):
seed.append((int(time.time()*10**9))**4 %4294967295)
keywords['seed']=seed
#--------------------------------------------------------------
#--------------------------------------------------------------
#Load Table if EventList is an string
if isinstance(EventList, str) == True:
if EventList == '':
EventList,_ = RealData.loadRealData(default_table)
else:
EventList,_ = RealData.loadRealData(EventList)
#--------------------------------------------------------------
#--------------------------------------------------------------
# start calculations with varing the mass if a dict is given
if isinstance(EventList, dict) == True:
print('vary mass')
#--------------------------------------------------------------
# extract lists from Dictionary
MC_Results = EventList
res_all_events = MC_Results['Results']
par_all_events = MC_Results['Input_parameter']
EventList = MC_Results['Eventlist']
namelist = MC_Results['Names']
header = MC_Results['Header']
#--------------------------------------------------------------
#--------------------------------------------------------------
# only consider the events with an error < 100%
EventList = [EventList[i] for i in range(len(res_all_events)) \
if (percentile(res_all_events[i][:,0])[0]> 0)]
if namelist is None:
namelist_good = None
else:
namelist_good = [namelist[i] for i in range(len(res_all_events)) \
if (percentile(res_all_events[i][:,0])[0]> 0)]
#--------------------------------------------------------------
#--------------------------------------------------------------
# update control keywords
keywords['Good']=True # indication calculation of the good events only
goodstr = 'Good_' # string for indication calculation of the good events only
keywords['vary_par']=5 # vary all parameters
n_error_picks = keywords.get('n_error_picks', 500) #check if value is given in keywords else set to defaut
keywords['n_par_picks']=n_par_picks
n_par_picks = keywords.get('n_par_picks', 100) #check if value is given in keywords else set to defaut
keywords['n_error_picks']=n_error_picks
#--------------------------------------------------------------
#--------------------------------------------------------------
# start first calculation
elif isinstance(EventList, list) == True:
#--------------------------------------------------------------
# update control keywords
keywords['n_par_picks']=n_par_picks # Update keyword
keywords['n_error_picks']=n_error_picks # Update keyword
keywords['vary_par']=keywords.get('vary_par', 1) # vary only non given parameters
keywords['Good']=False # indication calculation of the good events only
goodstr=''
#--------------------------------------------------------------
#--------------------------------------------------------------
#set default namelist to integer (not comparable within different inputs)
if namelist == None:
namelist == [str(i) for i in range(len(EventList))]
#--------------------------------------------------------------
#--------------------------------------------------------------
# exclude different filetypes
else:
print ('Input Error!')
return
#--------------------------------------------------------------
#--------------------------------------------------------------
# create header
if instring is not '':
instring = instring + '_'
if DR3:
#use only events before 2019.5 for DR3
EventList = [EventList[kkk] for kkk in range(len(EventList)) if EventList[kkk][0].getTca() < 2019.5 ]
header = len(EventList),3,n_error_picks,n_par_picks, keywords,Version+'.'+Subversion
elif extended or vary_eta or ext_obs:
#use the data of the extended mission
header = len(EventList),10,n_error_picks,n_par_picks,keywords, Version+'.'+Subversion
else:
header = len(EventList),5,n_error_picks,n_par_picks,keywords, Version+'.'+Subversion
print(time.ctime())
print(header)
#--------------------------------------------------------------
#--------------------------------------------------------------
# Distribute on different cores
num_events = len(EventList)
events_per_core = num_events/num_core
#calculation of multiple events (proxima centauri tend to take as much computation time as all other events)
if len(EventList[0]) > 10 and num_core > 2:
events_per_core = (num_events-1)/(num_core -1)
for core in range(num_core):
partstring = path+'Simulation/evpart/eventlist_'+goodstr+instring+'part_%i.pkl' % core
if core == 0:
f = open(partstring, 'wb')
pickle.dump([EventList[0],], f)
f.close()
elif core == num_core -1:
f = open(partstring, 'wb')
pickle.dump(EventList[1 + round(events_per_core * (core - 1)):], f)
f.close()
else:
f = open(partstring, 'wb')
pickle.dump(EventList[1 + round(events_per_core * (core - 1)) : 1 + round(events_per_core * (core))], f)
f.close()
#distribute events equaly
else:
for core in range(num_core):
partstring = path+'Simulation/evpart/eventlist_'+goodstr+instring+'part_%i.pkl' % core
if core == num_core -1:
f = open(partstring, 'wb')
pickle.dump(EventList[round(events_per_core * core):], f)
f.close()
else:
f = open(partstring, 'wb')
pickle.dump(EventList[round(events_per_core * core) : round(events_per_core * (core + 1))], f)
f.close()
#--------------------------------------------------------------
#--------------------------------------------------------------
#start calculations parallel
if num_core != 1:
res_par = Parallel(n_jobs=num_core)(delayed(calcParallel)(i,instring, keywords) for i in range(num_core))
else:
res_par = [calcParallel(0,instring, keywords),]
#--------------------------------------------------------------
#--------------------------------------------------------------
#merge the results for the parallel computations
res_all_events = []
par_all_events = []
res_no_ML_events = []
chi2_events = []
for res_par_core in res_par:
for par_event in res_par_core[1]:
par_all_events.append(par_event)
for res_event in res_par_core[0]:
res_all_events.append(res_event)
for res_no_event in res_par_core[2]:
res_no_ML_events.append(res_no_event)
for res_no_event in res_par_core[3]:
chi2_events.append(res_no_event)
if ext_obs:
MC_Results = {'Header': header,'Results':res_all_events,'Input_parameter':par_all_events,\
'Results_ext_obs':res_no_ML_events, 'Chi2':chi2_events,'Eventlist':EventList,'Names':namelist}
else:
MC_Results = {'Header': header,'Results':res_all_events,'Input_parameter':par_all_events,\
'Results_no_ML':res_no_ML_events, 'Chi2':chi2_events,'Eventlist':EventList,'Names':namelist}
#--------------------------------------------------------------
#--------------------------------------------------------------
# save results as pkl file
string = path + 'Data/MC'+goodstr[:-1] +'_'+ instring + datetime.datetime.today().strftime('%d-%m-%Y')\
+ '_%.f_%.f_%.f_%.f.pkl' % (header[:4])
f = open(string, 'wb')
pickle.dump(MC_Results, f)
print(string)
if message:
os.system('cp ' + string + ' ' + message_folder)
return MC_Results
def loadPkl(filename = '',Good = False, extended = False, n_error_picks = False, n_par_picks=False):
'''------------------------------------------------------------
Load the MC_results PKL files ()
---------------------------------------------------------------
Input:
filename: filename expected in the MLG/Data Folder
---------------------------------------------------------------
Output:
MC_Results: Dictonary containg results from StartMC
------------------------------------------------------------'''
if len(glob.glob(filename)) == 0:
if Good: good = 'Good'
else: good = ''
if extended: ex = string(extended) + '_'
else: ex = '*_'
if n_error_picks: er = string(extended) + '_'
else: er = '*_'
if n_par_picks: pa = string(extended) + '.pkl'
else: pa = '*.pkl'
gstring= (path + 'Data/MC' + good + '*_' + ex + er + pa)
g = glob.glob(gstring)
string = g[-1]
if filename != '':
if len(glob.glob(path + 'Data/' + filename)) == 0:
print('File not found! Using standard file')
else:
string = glob.glob(path + 'Data/' + filename)[0]
else:
print('Using standard file')
else: string = glob.glob(filename)[0]
print(string)
f = open(string,'rb')
pkl = pickle.load(f)
f.close()
if isinstance(pkl,dict): #from Version 3.1
if 'Results_comp' in pkl.keys():
pkl['Results_ext_obs'] = pkl.pop('Results')
pkl['Results'] = pkl.pop('Results_comp')
MC_Results = pkl
else:
#until Version 3.1
if len(pkl) == 6:
chi2_events = None
EventList, res_all_events, par_all_events,res_no_ML_events,namelist,header = pkl
try:
par_all_events[0].x
print(1)
except AttributeError: pass
else:
qq = par_all_events
par_all_events = res_no_ML_events
res_no_ML_events = par_all_events
elif len(pkl) == 7:
EventList, res_all_events, par_all_events,res_no_ML_events,\
chi2_events,namelist,header = pkl
try:
par_all_events[0].x
print(1)
except AttributeError: pass
else:
qq = par_all_events
par_all_events = res_no_ML_events
res_no_ML_events = qq
# Transform format to 3.1
MC_Results = {'Header': header,'Results':res_all_events,'Input_parameter':par_all_events,\
'Results_no_ML':res_no_ML_events, 'Chi2':chi2_events,'Eventlist':EventList,'Names':namelist}
return MC_Results
def calcParallel(part, instring, keywords = {} ,**kwargs):
'''------------------------------------------------------------
Description:
creats sets of observations for a part of the Eventlist
and fits the data
---------------------------------------------------------------
Input:
part: which part of the EventList
instring: file string of the EventList
keywords/kwargs: setup values for the simulation (see below)
---------------------------------------------------------------
Output:
res_part: results of the individual fits (contains a separate list for each events)
par_part: Inputparameters of the indiciual fits (contains a separate list for each events)
res_no_ML: result without microlensing
chi2_red_part: Chi2 values of the individual fits (contains a separate list for each events)
------------------------------------------------------------'''
#---------------------------------------------------------------
#extract keywords
keywords.update(kwargs)
seed = keywords.get('seed', None) # seed's for the randomised process
Good = keywords.get('Good', False) # Second loop with variation of the mass
extended = keywords.get('extended', False) # Use of the data for extended Mission
ext_obs = keywords.get('ext_obs', False) # ext_obs include external observations
DR3 = keywords.get('DR3', False) # Use of the data for DR3 only
exact = keywords.get('exact', False) # Use the exact astrometric shift or the approximation
n_error_picks= keywords.get('n_error_picks', 500) # number of picks from the error elips of the measurements
n_par_picks = keywords.get('n_par_picks', 1) # number of different picks of input parameters
vary_par = keywords.get('vary_par', 1) # which parameters should be varied for n_par_picks
prefit_motion= keywords.get('prefit_motion', False) # fit the propermotion with out microlensing as preput
vary_eta = keywords.get('vary_eta', False) #Use data while vary eta
onlysource = keywords.get('onlysource', False) # use the data of the source only
timer = keywords.get('timer', False) # print computing-time for different steps
message = keywords.get('message', False) # save messeage file for keeping track of the process
silent = keywords.get('silent', True) # boolen if information shoul not be printed
#---------------------------------------------------------------
#---------------------------------------------------------------
# computationtime tracker
cputt = [0.,0.,0.,0.,0.,0.,0]
cputt[0] -= time.time()
# update seed for the randomised process
if seed is not None:
np.random.seed(seed[part])
#---------------------------------------------------------------
#---------------------------------------------------------------
# initilise arrays for storing the results
# fit results
res_part = []
# fit results without microlensing
res_no_ML = []
# input parameters
par_part = []
# Chi2_reduced value
chi2_red_part = []
chi2_red_mot_part = []
#---------------------------------------------------------------
#---------------------------------------------------------------
# load part of the Eventlist
if Good:
partstring = path+'Simulation/evpart/eventlist_Good_'+instring+'part_%i.pkl' % part
f = open(partstring,'rb')
EventList = pickle.load(f)
f.close
os.remove(path+'Simulation/evpart/eventlist_Good_'+instring+'part_%i.pkl' % part)
else:
partstring = path+'Simulation/evpart/eventlist_'+instring+'part_%i.pkl' % part
f = open(partstring,'rb')
EventList = pickle.load(f)
f.close
os.remove(path+'Simulation/evpart/eventlist_'+instring+'part_%i.pkl' % part)
cputt[0] += time.time()
#---------------------------------------------------------------
for i1 in range(len(EventList)):
#---------------------------------------------------------------
# updat process tracker
if message:
os.system('touch %s.%s-%s-0.message'%(message_folder+instring[:-1],part,i1))
#---------------------------------------------------------------
cputt[1] -= time.time()
#---------------------------------------------------------------
# initilise list for each event
# fit results
res_single = []
res_external_obs = [] #for comparison (test)
# input parameters
par_single = []
# Observations (for fitting without microlensing)
Obs_save = []
# Chi2_reduced value
chi2_red_single = []
#---------------------------------------------------------------
#get Stars
Stars = EventList[i1]
#---------------------------------------------------------------
# create observations from real scanning law
RealObs = RealData.getRealObs(Stars[0],Stars[1],**keywords)
Data = RawData.Data(Stars,*RealObs,**keywords)
Obs,Starnumber,Epoch,Obs_err, sc_scandir = Data.Observations(**keywords)
#---------------------------------------------------------------
cputt[1] += time.time()
for i2 in range(n_par_picks):
#loop over each set of differen input parameters
par = Data.par[i2]
for i3 in range(n_error_picks):
#loop over each set of differen picks from the error ellips of the Observation
cputt[6] = -time.time()
cputt[2] -= time.time()
# include outliers 5% chance
if ext_obs:
outliers = np.random.uniform(0,1,len(Starnumber[i2]))
outliers[-4*ext_obs:] = 1 #keep the external observations
n = np.where(outliers > 0.05)
else:
n = np.where(np.random.uniform(0,1,len(Starnumber[i2])) > 0.05)
#---------------------------------------------------------------
# get the right set of observations
Obs_i = Obs[i2][i3][n]
if len(Obs_i) < 11+4*ext_obs:
# not enought observation
re = -999* np.ones(len(Stars)*5+1)
res_single.append(re)
chi2_red_single.append(-999.)
par_single.append(par)
cputt[2] += time.time()
res_external_obs.append(re)
else:
# get the right set of observations
# (identical within each set of input parameters)
Starnumber_i = Starnumber[i2][n]
Epoch_i = Epoch[i2][n]
Obs_err_i = Obs_err[i2][n]
sc_scandir_i = sc_scandir[i2][n]
#---------------------------------------------------------------
#---------------------------------------------------------------
#compare with external observations (test)
if ext_obs:
res_ext= fitting_micro.Fit_Micro(Obs_i,Starnumber_i,Epoch_i,Obs_err_i, sc_scandir_i,\
bounds = True, **keywords)
res_external_obs.append(res_ext.x)
# remove external observations
Obs_i=Obs_i[:-4*ext_obs]
Obs_err_i = Obs_err_i[:-4*ext_obs]
Starnumber_i = Starnumber_i[:-4*ext_obs]
Epoch_i = Epoch_i[:-4*ext_obs]
sc_scandir_i = sc_scandir_i[:-4*ext_obs]
#---------------------------------------------------------------
#---------------------------------------------------------------
# store observations
Obs_save.append([Obs_i, Starnumber_i,Epoch_i, Obs_err_i, sc_scandir_i])
cputt[2] += time.time()
#---------------------------------------------------------------
#---------------------------------------------------------------
# Fit model to the observational data
cputt[3] -= time.time()
res= fitting_micro.Fit_Micro(Obs_i,Starnumber_i,Epoch_i,Obs_err_i, sc_scandir_i,\
bounds = True, **keywords)
cputt[3] -= time.time()
#---------------------------------------------------------------
#---------------------------------------------------------------
#store results
cputt[4] -= time.time()
if len(res.x) != len(Stars)*5+1:
#Check if all stars have observations
re = -999* np.ones(len(Stars)*5+1)
re[:len(res.x)] = res.x
re[len(res.x):] = None
if len(res.x) == 6 : re[0] = -999
res_single.append(re)
else:
res_single.append(res.x)
chi2_red_single.append(res.cost * 2 / (len(res.fun) - 11))
par_single.append(par)
cputt[4] += time.time()
#---------------------------------------------------------------
#---------------------------------------------------------------
#print computing time (for optimisation)
if timer:
if not (i3 % 100):
cputt[5] += time.time()
print('Part %i-%i Step %i %.2f' % (part,i1,i3,cputt[6]))
if time.time() - ti > 10: return 0
#---------------------------------------------------------------
#---------------------------------------------------------------
# updat process tracker
if message:
os.system('mv %s.%s-%s-%s.message %s.%s-%s-%s.message'%\
(message_folder + instring[:-1], part, i1, i2,\
message_folder + instring[:-1], part, i1, i2+1))
#---------------------------------------------------------------
if not silent: print(i3, time.time() - ti)
#---------------------------------------------------------------
# sort results
cputt[5] -= time.time()
res_single = np.stack(res_single)
res_part.append(res_single)
par_single = np.stack(par_single)
par_part.append(par_single)
chi2_red_single = np.stack(chi2_red_single)
chi2_red_part.append(chi2_red_single)
#---------------------------------------------------------------
#compare without external observations (test)
if ext_obs:
res_external_obs = np.stack(res_external_obs)
res_no_ML.append(res_external_obs)
#---------------------------------------------------------------
#---------------------------------------------------------------
# fit mean result also without microlensing
if len(res_single[:,0]) > 1:
if ext_obs:
chi2_red_micro_best = 0
chi2_red_motion = 0
else:
p50 = percentile(res_single[:,0])[1]
if p50 == -999:
chi2_red_mot_part.append(-999)
chi2_red_micro_best = -999
res_no_ML.append(-999*np.ones(5*len(Stars)))
chi2_red_motion = -999
else:
best = np.where(res_single[:,0] == p50)[0][0]
res_motion = fitting_motion.Fit_Motion(*Obs_save[best],**keywords)
res_no_ML.append(res_motion.x)
chi2_red_micro_best = chi2_red_single[best]
chi2_red_motion= res_motion.cost * 2 / (len(Obs_save[best][1]) - 10)
chi2_red_mot_part.append(chi2_red_motion)
cputt[5] += time.time()
#---------------------------------------------------------------
#---------------------------------------------------------------
# print computing time (for optimisation)
if timer:
print('MC %.0f-%.0f: %.3f, %.3f, %.3f, %.3f, %.3f, %.3f, %.3f' % (part,i1,*tuple(cputt)))
#---------------------------------------------------------------
else:
#---------------------------------------------------------------
# print statistics
if len(res_single[:,0]) > 1:
print('%.0f-%.0f: %.3f, %.3f, %.3f, %.3f, %.3f, %.3f, %.3f, %.3f, %s'\
% (part,i1, Stars[0].mass,*percentile(res_single[:,0]), chi2_red_micro_best,chi2_red_motion, time.ctime().split(' ')[3]))
#---------------------------------------------------------------
if not silent: print('Part {} done!'.format(part))
#---------------------------------------------------------------
# updat process tracker
if message:
os.system('rm %s.%s*.message'%(message_folder+instring[:-1],part))
os.system('touch %s.%s.Done.message'%(message_folder+instring[:-1],part))
#---------------------------------------------------------------
return res_part, par_part, res_no_ML,chi2_red_part
```
#### File: MLG/Simulation/RawData.py
```python
import numpy as np
from MLG.Math import dist, dist_scandir,cosdeg,sindeg,sinmas,cosmas, unitFac
from MLG.StellarMotion import getSun,t_0,stars_position_micro
import time
__all__ = ['Star','Data','sigmaGaia', 'resolvable', 'MassGmag']
class Star(object):
'''---------------------------------------------------------------
Defines a star object with
position in deg,
proper motion in mas/yr
parallax in mas
Gmag
Source_ID
mass in M_sun
epoch of the closest aproach in Julian Year
---------------------------------------------------------------'''
def __init__(self,pos,pos_err,pm,pm_err,px,px_err, Gmag, mass = 0,mass_err = 0, id = -1,
unit_pos = 'deg',unit_pos_err = 'mas',
unit_pm = 'mas/yr', unit_px = 'mas',tca = -1, eta = 0):
'''---------------------------------------------------------------
initialise the Star
------------------------------------------------------------------
Input:
pos: position vector [ra,dec]
pos_error: error of the position vector [ra_err,dec_err]
pm: propermotion vector: [pmra,pmdec]
pm_err: error of the propermotion vector [ra_err,dec_err]
px: parallax
px_err: error of the parallax
Gmag: Magnitude of the Star
mass: Mass of the lens
mass_err: error of the mass
id: source_id
unit_pos: unit of the given position
unit_pos_err: unit of the given error of the position
unit_pm: unit of the given propermotion
unit_px: unit of the given parallax
tca: epoach of the closest approach
eta: value to store eta
---------------------------------------------------------------'''
self.unit = ['deg','mas/yr','mas','mas']
self.alpha = pos[0] * unitFac(unit_pos,'deg')
self.delta = pos[1] * unitFac(unit_pos,'deg')
self.alpha_err = pos_err[0] * unitFac(unit_pos_err,'mas')
self.delta_err = pos_err[1] * unitFac(unit_pos_err,'mas')
self.pmalpha = pm[0] * unitFac(unit_pm,'mas/yr')
self.pmdelta = pm[1] * unitFac(unit_pm,'mas/yr')
self.pmalpha_err = pm_err[0] * unitFac(unit_pm,'mas/yr')
self.pmdelta_err = pm_err[1] * unitFac(unit_pm,'mas/yr')
self.px = px * unitFac(unit_pm,'mas')
self.px_err = px_err * unitFac(unit_pm,'mas')
self.Gmag = Gmag
if mass < 0: # use an random value for m
self.mass = np.random.uniform(0.07,1.5)
self.mass_err = 0.1*self.mass
else:
self.mass = mass
if mass_err == 0 : self.mass_err = 0.1 * mass # use an 10 % error
else: self.mass_err = mass_err
self.id = id
self.tca = tca
self.eta = eta
def getParameters(self, vary_par = 0, **kwargs ):
'''---------------------------------------------------------------
returns mass of the lens and the astrometric parameter including variation in the error
Input:
vary_par == 0: return the original values
vary_par == 1: returns the original values + a pm,px
from the typical distribution if pm + px is unknown
vary_par == 2: returns the o
vary_par == 3: returns random mass from the error distrbution + orignal astrometric parameters
vary_par == 4: returns random mass from the error distrbution + orignal astrometric
parameters + random pm,px from the typical distribution if pm + px is unknown
vary_par == 5: returns random pics from the error distrbution
---------------------------------------------------------------'''
#vary the mass
if vary_par > 2 and self.mass_err > 0: mass = np.random.normal(self.mass, self.mass_err)
else: mass = self.mass
#vary all astrometric parameters
if vary_par % 3 == 2:
alpha_r = np.random.normal(self.alpha, self.alpha_err * unitFac(self.unit[3],self.unit[0]))
delta_r = np.random.normal(self.delta, self.delta_err * unitFac(self.unit[3],self.unit[0]))
if self.pmalpha != 0:
pmalpha_r = np.random.normal(self.pmalpha, self.pmalpha_err)
pmdelta_r = np.random.normal(self.pmdelta, self.pmdelta_err)
px_r = np.random.normal(self.px, self.px_err)
else:
#pic from a typical distribution
pmtot = np.random.normal(5,3)
pmdir = np.random.uniform(-np.pi,np.pi)
pmalpha_r = pmtot * np.cos(pmdir)
pmdelta_r = pmtot * np.sin(pmdir)
px_r = np.random.normal(2,1)
while px_r <= 0:
px_r = np.random.normal(2,1)
else:
alpha_r = self.alpha
delta_r = self.delta
if self.pmalpha != 0 or vary_par % 3 == 0:
#orignal astrometric parameters
pmalpha_r = self.pmalpha
pmdelta_r = self.pmdelta
px_r = self.px
else:
#pic from a typical distribution
pmtot = np.random.normal(5,3)
pmdir = np.random.uniform(-np.pi,np.pi)
pmalpha_r = pmtot * np.cos(pmdir)
pmdelta_r = pmtot * np.sin(pmdir)
px_r = np.random.normal(2,1)
while px_r <= 0:
px_r = np.random.normal(2,1)
return [mass, alpha_r, delta_r, pmalpha_r, pmdelta_r, px_r]
def getPos(self,unit='deg'):
#returns the 2015.5 position
return self.alpha* unitFac('deg',unit),self.delta* unitFac('deg',unit)
def getPm(self, unit='mas/yr'):
#returns the propermotion
return self.pmalpha * unitFac('mas/yr',unit),self.pmdelta * unitFac('mas/yr',unit)
def getPx(self, unit='mas'):
#returns the parallax
try: return self.px * unitFac('mas',unit)
except: return self.parallax * unitFac('mas',unit) #old variable name
def getMag(self):
#returns the G Magnitude
return(self.Gmag)
def getMass(self):
#returns the Mass of the lens
return self.mass
def getId(self):
#returns the Source_id
return(self.id)
def getTca(self):
#returns the epoch of the closest approach
return self.tca
def getEta(self):
#returns eta
return(self.eta)
class Data(object):
'''---------------------------------------------------------------
Stars: List of Stars
NumStars: Number of stars
Mass_lens: Mass for each observational Data
data: [[Star_ID,alpha,delta,Epoch,Scandir, loc, NCCD],...] for each set
par: input parameters for each set
par: input parameters for each set
---------------------------------------------------------------'''
def __init__(self, Stars, Epoch, ScanDir, NCCD, Mass = None,\
out_unit = 'deg', n_par_picks = None, onlysource = False,timer =False, **kwargs):
'''---------------------------------------------------------------
Creats simulated sets of noice free observational Data (i.e no observational
errors are included)
Creats one observation for every source and every epoch (if resolvable)
------------------------------------------------------------------
Input:
Stars: vector of 1 lens and multiple source stars
Epoch: epoch of observation for the event
ScanDir: Position angle of the scan dirrection for each epoch [rad]
NCCD: Number of CCD for the observation
Mass: mass of the lens
out_unit: unit of the observations
n_par_picks2: number of different Sets
if none return one set 1
if list: [number of differne parameters, number of different masses]
onlysource: returns only data of the source
timer: print computing time for different steps)
**kwargs: vary_par for Star.getParameters
exact for StelarMotion.stars_position_micro
ext_obs for resolvable
---------------------------------------------------------------'''
#compunting-time-tracker
cputt = [0,0,0]
cputt[0] -= time.time()
#-----------------------------------------------------------------
# Determine number of different sets
if n_par_picks is None:
n_par = 1
n_mass = 1
elif isinstance(n_par_picks, list):
n_par = n_par_picks[0]
n_mass = n_par_picks[1]
else:
n_mass = n_par_picks
n_par= n_par_picks
#-----------------------------------------------------------------
#-----------------------------------------------------------------
#unit of the data and input parameters
self.Stars = np.array(Stars)
self.unit = [out_unit, *Stars[0].unit]
# Number of stars
self.NumStars = len(Stars)
# setup Data array
dat_multi = []
# setup array for multiple sets of input parameters
par_multi = np.zeros((n_par,len(Stars) * 5 + 1))
# setup array for G magnitudes
Gmag =np.zeros(len(Stars))
for pick in range(n_par):
for i in range(len(Stars)):
if i == 0: #store parameters of the lens
par_multi[pick,0:6] = Stars[i].getParameters(**kwargs)
if Mass is not None:
par_multi[pick,0] = Mass
elif pick%(n_par/n_mass) != 0:
par_multi[pick,0] = par_multi[pick-1,0]
elif pick == 0:
self.Mass_lens = par_multi[pick,0]
else:
#store parameters of the Sources
par_multi[pick,5*i+1 : 5*i+6] = Stars[i].getParameters(**kwargs)[1:]
# store Gmagnitude of lens and source
if pick == 0:
Gmag[i] = Stars[i].getMag()
#-----------------------------------------------------------------
#-----------------------------------------------------------------
#caluclate sin(alpha),cos(alpha),sin(delta),cos(delta)
if 'deg' in self.unit[1]:
scsc = sindeg(par_multi[0,1]), cosdeg(par_multi[0,1]), sindeg(par_multi[0,2]),\
max(cosdeg(par_multi[0,2]), 1e-16)
if 'mas' in self.unit[1]:
scsc = sinmas(par_multi[0,2]), cosmas(par_multi[0,1]), sinmas(par_multi[0,2]),\
max(cosmas(par_multi[0,2]), 1e-16)
if 'arcsec' in self.unit[1]:
scsc = sinmas(par_multi[0,1]/1000), cosmas(par_multi[0,1]/1000), sinmas(par_multi[0,2]/1000),\
max(cosmas(par_multi[0,2]/1000), 1e-16)
#-----------------------------------------------------------------
cputt[0] += time.time()
cputt[1] -= time.time()
#-----------------------------------------------------------------
# positional shift due to the parallax
# calculat position of Lagrange point 2
L2 = getSun(t_0 + Epoch).T
# caluate the invers Jacobean for the parallax effect
loc = np.repeat(np.stack([(scsc[0] * L2[:,0] - scsc[1] * L2[:,1])\
/ max(scsc[3],1e-6),\
scsc[1] * scsc[2] * L2[:,0] + scsc[0] * scsc[2]\
* L2[:,1] - scsc[3] * L2[:,2]]).T.reshape(-1,1,2),self.NumStars,axis = 1)
#-----------------------------------------------------------------
#-----------------------------------------------------------------
# Calculat position for each star
# epoch for all Stars
Epoch_vec = np.repeat(Epoch.reshape(-1,1),self.NumStars,axis = 1)
# identifier for all Stars
num = np.arange(self.NumStars)
num_vec = np.repeat(num.reshape(1,-1),len(Epoch), axis = 0)
# observed position of lens and sources for every parameterpick
pos_vec = stars_position_micro(par_multi,num_vec.reshape(-1),Epoch_vec.reshape(-1),
loc_vec = loc.reshape(-1,2),scsc = scsc, out_unit = self.unit[0],
unit_pos = self.unit[1], unit_pm = self.unit[2],
unit_px = self.unit[3], **kwargs)
pos_vec = pos_vec.reshape(n_par,-1,self.NumStars,2)
#-----------------------------------------------------------------
#Translate NCCD and ScanDir into numpy arrays
ScanDir= np.array(ScanDir)
NCCD = np.array(NCCD)
cputt[1] += time.time()
cputt[2] -= time.time()
#loop over parameter pics
for k in range(n_par):
#get position for this set of parameters
pos_k = pos_vec[k]
if onlysource:
#return only the observations of the source
W = np.ones((len(pos_k),len(Gmag)))
W[:,0]=0
else:
#check if lens and sources are resolvable
which = resolvable(pos_k,Gmag,ScanDir, **kwargs)
#store ID,alpha,delta,Epoch,ScanDir, Loc, NCCD for each resolved datapoint
dat = np.hstack([which[1].reshape(-1,1),pos_k[which],Epoch[which[0]].reshape(-1,1),\
ScanDir[which[0]].reshape(-1,1),loc[which[0],0,:],NCCD[which[0]].reshape(-1,1)])
dat_multi.append(dat)
cputt[2] += time.time()
#print computing time
if timer: print('RD: %.2f, %.2f, %.2f' % tuple(cputt))
self.data= dat_multi
self.par = par_multi
def Observations(self, n_error_picks = None, timer = False,**kwargs):
'''---------------------------------------------------------------
Description:
Creates sets of observations (including noise)
from Noise free data
------------------------------------------------------------------
Input:
n_error_picks: number of picks from the errorelipse
------------------------------------------------------------------
Output:
Obs: list of [alpha,delta] for all Observation
StarID: list of StarID's for all Observation
Epoch: list of Epochs for all Observation
sigAL: list of precision in along-scan dirrection for all Observations
sc_ScanDir: list of [sin(ScanDir),cos(ScanDir)] for all Observations
---------------------------------------------------------------'''
#compunting-time-tracker
cputt = [0,0,0,0]
cputt[0] -= time.time()
#if isinstance(self.data, list):
#combine all datasets
dat = np.vstack(self.data)
#determine length of each dataset
length = np.array([len(i) for i in self.data])
# first Index of each dataset
index = np.array([np.sum(length[:i]) for i in range(1,len(self.data))]).astype(int)
#else:
# dat=self.data
if n_error_picks is None:
n_error_picks = 1
cputt[0] += time.time()
cputt[1]-= time.time()
#---------------------------------------------------------------
#get StarID,Epoch ScanDir NCCD for each observation
StarID = dat[:,0].astype(int)
Epoch = dat[:,3]
ScanDir = dat[:,4]
NCCD = dat[:,7]
#---------------------------------------------------------------
#---------------------------------------------------------------
#calculate accuracy of gaia
if self.Stars is None or len(StarID) == 0:
#accuracy in along-scan dirrection
sigAL = sigmaGaia() * np.ones([len(dat[:,0])])
#set across-scan accuracy to 1 arcsec (is not used for fitting)
sigAC = 1000 * np.ones([len(dat[:,0])])
else:
#accuracy in along-scan dirrection
sigAL = sigmaGaia_np(self.Stars[StarID])/np.sqrt(NCCD)
#set across-scan accuracy to 1 arcsec (is not used for fitting)
sigAC = 1000 * np.ones([len(dat[:,0])])
#---------------------------------------------------------------
#---------------------------------------------------------------
#create multidimensional random valu from gaussian distribution
rand = np.random.normal(0,1,len(dat)*2*n_error_picks).reshape(n_error_picks,-1, 1,2)
cputt[1] += time.time()
#---------------------------------------------------------------
#---------------------------------------------------------------
#Transform into alpha-delta for each observation
cputt[2] -= time.time()
co = np.cos(ScanDir)
si = np.sin(ScanDir)
sc_ScanDir = np.vstack([si,co]).T
cd = cosdeg(dat[:,2])
trans = np.zeros((1,len(dat),2,2))
trans[0,:,0,0] = sigAL * si/cd/3.6e6
trans[0,:,0,1] = - sigAC * co/cd/3.6e6
trans[0,:,1,0] = co*sigAL/3.6e6
trans[0,:,1,1] = si*sigAC/3.6e6
Obs = np.sum(trans*rand,3) + dat[:,1:3]
cputt[2] += time.time()
#---------------------------------------------------------------
#---------------------------------------------------------------
# order output
cputt[3] -= time.time()
#if isinstance(self.data, list):
Obs = np.split(Obs,index, axis = 1)
StarID = np.split(StarID,index)
Epoch =np.split(Epoch,index)
sigAL = np.split(sigAL,index)
sc_ScanDir = np.split(sc_ScanDir,index)
cputt[3] += time.time()
#---------------------------------------------------------------
if timer:
print('OB: %.4f,%.4f,%.4f,%.4f' % tuple(cputt))
return Obs,StarID,Epoch, sigAL, sc_ScanDir
def sigmaGaia(Stars=None, Gmag = None):
'''---------------------------------------------------------------
Calculate Gaias accuracy in along scan direction from G magnitud
------------------------------------------------------------------
Input:
Stars: Star Object
Gmag: apparent G magnitude (only if Stars == None)
------------------------------------------------------------------
Output:
sigmaCCD: Sigma in alongscan direction for one CCD observation in mas
---------------------------------------------------------------'''
if Stars is None:
if Gmag is not None:
z = 10 ** (0.4 * (np.maximum(Gmag, 14) - 15))
sigmaCCD =((-1.631 + 680.766 * z + 32.732 * z**2)**0.5/1000 *7.75+0.1)
else: return 1
else:
z = 10 ** (0.4 * (np.maximum(Stars.getMag(), 12) - 15))
sigmaCCD =((-1.631 + 680.766 * z + 32.732 * z**2)**0.5/1000 *7.75+0.1)
return sigmaCCD
sigmaGaia_np = np.vectorize(sigmaGaia)
def resolvable(pos_vec, Gmag , ScanDir, ext_obs = False,**kwargs):
''' ---------------------------------------------------------------
checks if two or more stars are within one readout window
Input
pos_vec: position for each star and each observation
Gmag: G Magnitude of the Stars
ScanDir: Position angle of the scan direction for each observation
ext_obs: external observations: exclude the last 2*n data points from checking
------------------------------------------------------------------
Output
Indicees for resolvabel observation
---------------------------------------------------------------'''
#-----------------------------------------------------------------
# limits for the allong-scan and crosscan dirrections direction
AL_lim= 59*6 *np.ones(len(Gmag))
AL_lim[np.where(Gmag < 13)] = 59*9 # bright sources hav an larger readout window
AC_lim= 177*6 *np.ones(len(Gmag))
Lim = np.vstack([AL_lim,AC_lim]).T
#-----------------------------------------------------------------
unitfactor = 3.6e6 #translate deg to mas
#-----------------------------------------------------------------
if len(pos_vec.shape) == 3: # check if multiple observations are given
#split pos to ra and dec
ra = pos_vec[:,:,0]
dec = pos_vec[:,:,1]
Gmag = np.repeat([Gmag],len(ra),axis = 0) # repeat Gmag for each observation
# [sin cos] and [cos -sin] of the scan dirrection
sc_scandir = np.array([np.sin(ScanDir),np.cos(ScanDir)]).T
cs_scandir = np.array([np.cos(ScanDir),-np.sin(ScanDir)]).T
#Check if all other stars are outside of therir readout window
Window = np.sum(np.floor(np.abs(((np.repeat(ra.reshape(len(ra),-1,1,1),len(ra[0]),axis = 2)\
- np.repeat(ra.reshape(len(ra),1,-1,1),len(ra[0]),axis = 1))\
*sc_scandir.reshape(-1,1,1,2) * np.cos(dec[:,0]).reshape(-1,1,1,1)\
+(np.repeat(dec.reshape(len(dec),-1,1,1),len(dec[0]),axis = 2) \
- np.repeat(dec.reshape(len(dec),1,-1,1),len(dec[0]),axis = 1))\
*cs_scandir.reshape(-1,1,1,2))*unitfactor/Lim.reshape(1,1,-1,2))),axis =3)
# Check if on star is brighter than the other
CompG = np.maximum(np.repeat(Gmag.reshape(len(ra),1,-1),len(ra[0]),axis = 1) \
- np.repeat(Gmag.reshape(len(ra),-1,1),len(ra[0]),axis = 2) -1, 0)
# Identity matrix to avoid comparison with itself
I = np.repeat([np.eye(len(ra[0]))],len(ra), axis = 0)
for i in range(0,2 * ext_obs):
I[-i-1] = np.ones(len(ra[0]))
resolve = np.prod(Window+CompG+I, axis = 2) #exclude observation if all are False
else: #only one observation is given
#split pos to ra and dec
ra = pos_vec[:,1]
dec = pos_vec[:,1]
# [sin cos] and [cos -sin] of the scan dirrection
sc_scandir = [np.sin(scandir),np.cos(scandir)]
cs_scandir = [np.cos(scandir),-np.sin(scandir)]
#Check if all other stars are outside of therir readout window
Window = np.sum(np.floor(np.abs(((np.repeat(ra.reshape(-1,1,1),len(ra),axis = 1) \
- np.repeat(ra.reshape(1,-1,1),len(ra),axis = 0)) * np.cos(dec[0])*sc_scandir\
+ (np.repeat(dec.reshape(-1,1,1),len(ra),axis = 1 ) \
- np.repeat(dec.reshape(1,-1,1),len(ra),axis = 0)) * cs_scandir)*unitfactor/Lim)),\
axis =2)
# Check if on star is brighter than the other
CompG = np.maximum(np.repeat(Gmag.reshape(1,-1),len(ra),axis = 0) \
- np.repeat(Gmag.reshape(-1,1),len(ra),axis = 1) -1, 0)
# Identity matrix to avoid comparison with itself
I = np.eye(len(ra))
resolve = np.prod(Window+CompG+I, axis = 1)
return np.where(resolve)
def MassGmag(Gmag,px):
'''---------------------------------------------------------------
Returns the mass of an star based on the absolut G magnitude
and Mass luminosity relations (see Klüter et al 2018)
---------------------------------------------------------------
Input
Gmag: aparent G magnitude
px: Parallax in mas
Output
mass: in M_sun
---------------------------------------------------------------'''
Gabs = Gmag + 5 * math.log(px/100, 10)
a1,b1,c1,a2,b2,n = [7.86232823e-03, -2.90891912e-01, 1.18248766e+00, -3.01175062e-01, 1.88952947e+00, 8.71127084e+01]
Mass = pow(pow(np.exp(a1*Gabs*Gabs+b1*Gabs +c1),-n)+pow(np.exp(a2*Gabs+b2),-n), -1/n)
return max(0.07, Mass)
```
#### File: jkluter/MLG/utils.py
```python
import matplotlib.pyplot as plt
import numpy as np
__all__ = ['color_own']
def color_own(length, rng = [0,1], colormap = plt.cm.gist_rainbow, ):
m = 1
w = np.array([245,245,245,0]).reshape(-1,4)
m = np.array([145,22,104,0]).reshape(-1,4)
c = np.array([0,176,230,0]).reshape(-1,4)
y = np.array([250,194,0,0]).reshape(-1,4)
r = np.array([225,0,35,0]).reshape(-1,4)
g = np.array([1,143,53,0]).reshape(-1,4)
b = np.array([0,66,148,0]).reshape(-1,4)
alpha = [0,0,0,255]
if isinstance(length, int):
color_give = False
c1 = colormap(np.linspace(*rng,length)).reshape(-1,4)
else:
color_give = True
c1 = np.array(length).reshape(-1,4)
cmin = np.argmin(c1[:,:3],axis = 1)
cmax = np.argmax(c1[:,:3],axis = 1)
case1 = np.where((cmin == 0))
case2 = np.where((cmin == 1))
case3 = np.where((cmin == 2))
case4 = np.where((cmax == 0))
case5 = np.where((cmax == 1))
case6 = np.where((cmax == 2))
wms = np.sort(c1[:,:3],axis = 1)
white = wms[:,0].reshape(-1,1)
wms = wms -white
mix = wms[:,1].reshape(-1,1)
wms = wms -mix
col = wms[:,2].reshape(-1,1)
color = w * white
color[case1] += c * mix[case1]
color[case2] += m * mix[case2]
color[case3] += y * mix[case3]
color[case4] += r * col[case4]
color[case5] += g * col[case5]
color[case6] += b * col[case6]
color += alpha
if color_give:
if len(color) == len(length):
return color/255
else: return color.reshape(4)/255
return color/255
``` |
{
"source": "jklvv/stock",
"score": 2
} |
#### File: stock/fund/fund_share_update.py
```python
"# -*- coding"
import math
import re
"""
@author:xda
@file:fund_share_update.py
@time:2021/01/20
"""
# 基金份额
import sys
sys.path.append('..')
from configure.settings import DBSelector, send_from_aliyun
from common.BaseService import BaseService
from configure.util import notify
import requests
import warnings
import datetime
warnings.filterwarnings("ignore")
from sqlalchemy.orm import relationship
from sqlalchemy import Column, String, INTEGER, VARCHAR, DATE, DateTime, ForeignKey, FLOAT
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# 创建对象的基类:
Base = declarative_base()
class FundBaseInfoModel(Base):
# 表的名字:
__tablename__ = 'LOF_BaseInfo'
# 表的结构:
id = Column(INTEGER, primary_key=True, autoincrement=True)
code = Column(VARCHAR(6), comment='基金代码', unique=True)
name = Column(VARCHAR(40), comment='基金名称')
category = Column(VARCHAR(8), comment='基金类别')
invest_type = Column(VARCHAR(6), comment='投资类别')
manager_name = Column(VARCHAR(48), comment='管理人呢名称')
issue_date = Column(DATE, comment='上市日期')
# child = relationship('ShareModel', back_populates='LOF_BaseInfo')
child = relationship('ShareModel')
def __str__(self):
return f'<{self.code}><{self.name}>'
class ShareModel(Base):
# 表的名字:
__tablename__ = 'LOF_Share'
# 表的结构:
id = Column(INTEGER, primary_key=True, autoincrement=True)
code = Column(VARCHAR(6), ForeignKey('LOF_BaseInfo.code'), comment='代码')
date = Column(DATE, comment='份额日期')
share = Column(FLOAT, comment='份额 单位:万份')
parent = relationship('FundBaseInfoModel')
# parent = relationship('FundBaseInfoModel', back_populates='LOF_Share')
crawltime = Column(DateTime, comment='爬取日期')
class Fund(BaseService):
def __init__(self, first_use=False):
super(Fund, self).__init__(f'../log/{self.__class__.__name__}.log')
self.first_use = first_use
self.engine = self.get_engine()
def get_engine(self):
return DBSelector().get_engine('db_stock')
def create_table(self):
# 初始化数据库连接:
Base.metadata.create_all(self.engine) # 创建表结构
def get_session(self):
return sessionmaker(bind=self.engine)
def get(self, url, retry=5, js=True):
start = 0
while start < retry:
try:
response = self.session.get(url, headers=self.headers,
verify=False)
except Exception as e:
self.logger.error(e)
start += 1
else:
if js:
content = response.json()
else:
content = response.text
return content
if start == retry:
self.logger.error('重试太多')
return None
class SZFundShare(Fund):
def __init__(self, first_use=False):
super(SZFundShare, self).__init__(first_use)
# self.url = 'http://fund.szse.cn/api/report/ShowReport/data?SHOWTYPE=JSON&CATALOGID=1000_lf&TABKEY=tab1&PAGENO={}&selectJjlb=LOF&random=0.019172632634173903'
self.all_fund_url = 'http://fund.szse.cn/api/report/ShowReport/data?SHOWTYPE=JSON&CATALOGID=1000_lf&TABKEY=tab1&PAGENO={}&random=0.1292751130110099'
self.session = requests.Session()
self.logger.info('start...sz fund')
self.LAST_TEXT = ''
if self.first_use:
self.create_table()
self.db_session = self.get_session()
self.sess = self.db_session()
self.logger.info(f'{self.today} start to crawl....')
@property
def headers(self):
_header= {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh,en;q=0.9,en-US;q=0.8,zh-CN;q=0.7",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Content-Type": "application/json",
"Host": "fund.szse.cn",
"Pragma": "no-cache",
"Referer": "http://fund.szse.cn/marketdata/fundslist/index.html?catalogId=1000_lf&selectJjlb=ETF",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36",
"X-Request-Type": "ajax",
"X-Requested-With": "XMLHttpRequest",
}
return _header
def convert(self, float_str):
try:
return_float = float(float_str)
except:
return_float = None
return return_float
def json_parse(self, js_data):
date = (datetime.date.today() + datetime.timedelta(days=-1)).strftime('%Y-%m-%d')
# 手动算的前一天 ?
data = js_data[0].get('data', [])
if not data:
self.stop = True
return None
for item in data:
jjlb = item['jjlb']
tzlb = item['tzlb'] #
ssrq = item['ssrq']
name = self.extract_name(item['jjjcurl'])
dqgm = self.convert_number(item['dqgm']) # 当前规模
glrmc = self.extract_glrmc(item['glrmc']) # 管理人名称
code = self.extract_code(item['sys_key'])
yield (jjlb, tzlb, ssrq, dqgm, glrmc, code, name, date)
def extract_name(self, name):
return re.search('<u>(.*?)</u>', name).group(1)
def extract_code(self, code):
return re.search('<u>(\d{6})</u>', code).group(1)
def extract_glrmc(self, glrmc):
if re.search(('\<a.*?\>(.*?)\</a\>'), glrmc):
glrmc = re.search(('\<a.*?\>(.*?)\</a\>'), glrmc).group(1).strip()
return glrmc
def model_process(self, jjlb, tzlb, ssrq, dqgm, glrmc, code, name, date):
obj = self.sess.query(FundBaseInfoModel).filter_by(code=code).first()
if not obj:
base_info = FundBaseInfoModel(
code=code,
name=name,
category=jjlb,
invest_type=tzlb,
manager_name=glrmc,
issue_date=ssrq,
)
self.sess.add(base_info)
self.sess.commit()
share_info = ShareModel(
code=code,
date=date,
share=dqgm,
crawltime=datetime.datetime.now(),
)
self.sess.add(share_info)
self.sess.commit()
def convert_number(self, s):
return float(s.replace(',', ''))
def run(self):
page = 1
self.stop = False
while not self.stop:
content = self.get(self.all_fund_url.format(page))
for item in self.json_parse(content):
self.model_process(*item)
page += 1
class SHFundShare(Fund):
def __init__(self, kind,date,first_use=False):
super(SHFundShare, self).__init__(first_use)
self.lof_url = 'http://query.sse.com.cn/commonQuery.do?=&jsonCallBack=jsonpCallback1681&sqlId=COMMON_SSE_FUND_LOF_SCALE_CX_S&pageHelp.pageSize=10000&FILEDATE={}&_=161146986468'
self.etf_url = 'http://query.sse.com.cn/commonQuery.do?jsonCallBack=jsonpCallback28550&isPagination=true&pageHelp.pageSize=25&pageHelp.pageNo={}&pageHelp.cacheSize=1&sqlId=COMMON_SSE_ZQPZ_ETFZL_XXPL_ETFGM_SEARCH_L&STAT_DATE={}&pageHelp.beginPage={}&pageHelp.endPage=30&_=1611473902414'
# self.today_ = '20210122' # LOF
if date=='now':
self.today_ = (datetime.datetime.now()+ datetime.timedelta(days=-1)).strftime('%Y%m%d')
else:
self.today_=self.today = date
# self.today ='2021-01-22' # ETF
self.ETF_COUNT_PER_PAGE = 25
self.url_option_dict = {
'ETF': {'url': self.etf_url, 'date': self.today},
'LOF': {'url': self.lof_url, 'date': self.today_}
}
self.kind=kind.lower()
self.session = requests.Session()
self.logger.info('start...sh fund')
self.LAST_TEXT = ''
if self.first_use:
self.create_table()
self.db_session = self.get_session()
self.sess = self.db_session()
@property
def headers(self):
return {
"Host": "query.sse.com.cn",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:84.0) Gecko/20100101 Firefox/84.0",
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
"Referer": "http://www.sse.com.cn/market/funddata/volumn/lofvolumn/",
}
def crawl_lof(self):
options = self.url_option_dict['LOF']
date = options.get('date')
url = options.get('url')
content = self.get(url.format(date), js=False)
js_data = self.jsonp2json(content)
self.process_lof(js_data)
def process_lof(self, js_data):
result = js_data.get('result')
for item in result:
code = item['FUND_CODE']
name = item['FUND_ABBR']
date = item['TRADE_DATE']
try:
share = float(item['INTERNAL_VOL'].replace(',',''))
except Exception as e:
print(e)
share=None
self.process_model(code, name, date, share, 'LOF')
def crawl_etf(self):
options = self.url_option_dict['ETF']
date = options.get('date')
url = options.get('url')
current_page = 1
while True:
content = self.get(url.format(current_page, date, current_page), js=False)
js_data = self.jsonp2json(content)
total_count = js_data.get('pageHelp').get('total')
print(f'page : {current_page}')
self.process_etf(js_data)
max_page = math.ceil(total_count / self.ETF_COUNT_PER_PAGE) # 每页 10个
if current_page > max_page:
break
current_page += 1
def process_etf(self, js_data):
result = js_data.get('result')
for item in result:
code = item['SEC_CODE']
name = item['SEC_NAME']
date = item['STAT_DATE']
share = item['TOT_VOL']
try:
share = float(share)
except Exception as e:
print(e)
self.process_model(code, name, date, share, 'ETF')
def run(self):
'LOF 与 ETF'
# for type_, options in self.url_option_dict.items():
if self.kind=='etf':
self.logger.info('crawling etf .....')
self.crawl_etf()
if self.kind=='lof':
self.logger.info('crawling lof .....')
self.crawl_lof()
def process_model(self, code, name, date, share, type_):
obj = self.sess.query(FundBaseInfoModel).filter_by(code=code).first()
if not obj:
obj = FundBaseInfoModel(
code=code,
name=name,
category=type_,
invest_type='',
manager_name='',
issue_date=None,
)
try:
self.sess.add(obj)
except Exception as e:
print(e)
else:
self.sess.commit()
print(f'插入一条记录{code},{date}')
if not self.sess.query(ShareModel).filter_by(code=code, date=date).first():
share_info = ShareModel(
code=code,
date=date,
share=share,
crawltime=datetime.datetime.now(),
)
try:
self.sess.add(share_info)
except Exception as e:
print(e)
else:
print(f'插入一条记录{code},{date}')
self.sess.commit()
if __name__ == '__main__':
app = SZFundShare(first_use=False)
app.run()
app = SHFundShare(first_use=False)
app.run()
``` |
{
"source": "jklwonder/youtube-data-api",
"score": 3
} |
#### File: youtube-data-api/youtube_api/youtube_api_utils.py
```python
import sys
import json
import datetime
import requests
import html
import tldextract
from bs4 import BeautifulSoup, Comment
import re
import signal
from urllib.parse import urlparse
from urllib.parse import parse_qs
'''
This contains utilities used by other functions in the YoutubeDataApi class, as well as a few convenience functions for data analysis.
'''
__all__ = [
'_chunker',
'_load_response',
'_text_from_html',
'parse_yt_datetime',
'strip_video_id_from_url',
'get_upload_playlist_id',
'get_liked_playlist_id',
'is_user',
'strip_youtube_id',
'get_channel_id_from_custom_url',
'get_url_from_video_id'
]
class TimeoutError(Exception):
pass
class timeout:
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def _chunker(l, chunksize):
"""Yield successive ``chunksize``-sized chunks from l."""
for i in range(0, len(l), chunksize):
yield l[i:i + chunksize]
def _load_response(response):
'''
Loads the response to json, and checks for errors.
'''
response.raise_for_status()
response_json = json.loads(response.text)
return response_json
def _text_from_html(html_body):
'''
Gets clean text from html.
'''
def _tag_visible(element):
'''Gets the text elements we're interested in'''
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
soup = BeautifulSoup(html_body, 'html.parser')
raw_text = soup.findAll(text=True)
visible_text = filter(_tag_visible, raw_text)
text = u" ".join(t.strip() for t in visible_text)
text = re.sub(r"[\n\t]", ' ', text)
text = re.sub(r'<.+?>', '', text)
text = html.unescape(text)
text = ' '.join(text.split())
return text
def parse_yt_datetime(date_str):
'''
Parses a date string returned from YouTube's API into a Python datetime.
'''
date = None
if date_str:
try:
date = datetime.datetime.strptime(date_str,"%Y-%m-%dT%H:%M:%S.%fZ")
except:
pass
return date
def strip_video_id_from_url(url):
'''Strips the video_id from YouTube URL.'''
domain = tldextract.extract(url).registered_domain
url_ = None
if 'youtu.be' in domain:
url_ = url[url.rindex('/') + 1:]
if '?' in url_:
url_ = url_[:url_.rindex('?')]
elif "youtube.com" in domain and "embed" in url:
url_ = url.rpartition("/")[-1].partition("?")[0]
elif "youtube.com" in domain and "attribution_link" in url:
u = urlparse(url)
# Get and parse the query string, which will look like:
#. a=--oPiH1x0pU&u=%2Fwatch%3Fv%3DHR1Ta25HkBM%26feature%3Dshare
q = parse_qs(u.query)
# Now we have a decoded query string, e.g., 'u':['/watch?v=HR1Ta25HkBM&feature=share']
if ( 'u' in q ):
# Parse like it was a normal /watch url
q = parse_qs(urlparse(q['u'][0]).query)
if ( 'v' in q ):
url_ = q['v'][0]
elif ( 'video_id' in q ):
url_ = q['video_id'][0]
elif "youtube.com" in domain:
u = urlparse(url)
q = parse_qs(u.query)
if ( 'v' in q ):
url_ = q['v'][0]
elif ( 'video_id' in q ):
url_ = q['video_id'][0]
return url_
def get_upload_playlist_id(channel_id):
'''Given a channel_id, returns the user uploaded playlist id.'''
playlist_id = 'UU' + channel_id[2:]
return playlist_id
def get_liked_playlist_id(channel_id):
'''Given a channel_id, returns the user liked playlist id.'''
playlist_id = 'LL' + channel_id[2:]
return playlist_id
def is_user(channel_url):
'''
Checks if url is channel or user
'''
if 'youtube.com/user/' in channel_url:
return True
elif 'youtube.com/channel/' in channel_url:
return False
else:
raise Exception("Didn't recognize url {}".format(channel_url))
def strip_youtube_id(channel_url):
'''
From a URL returns the YT ID.
'''
return (channel_url.rstrip('/').replace('/featured', '')
.split('/')[-1].split('#')[0])
def get_channel_id_from_custom_url(url):
'''
Gets channel id from a url of a custom url IE: http://youtube.com/stefbot
returns a channel_id IE: UCuMo0RRtnNDuMB8DV5stEag
'''
r = requests.get(url)
soup = BeautifulSoup(r.content, 'lxml')
class_ = ('yt-uix-button yt-uix-button-size-default '
'yt-uix-button-subscribe-branded '
'yt-uix-button-has-icon no-icon-markup '
'yt-uix-subscription-button yt-can-buffer')
channel_id = soup.find('button', class_=class_).get('data-channel-external-id')
return channel_id
def get_url_from_video_id(video_id):
'''
Given a video id, this function returns the full URL.
'''
url = "https://youtube.com/watch?v={}".format(video_id)
return url
``` |
{
"source": "jklymak/dolfyn",
"score": 3
} |
#### File: dolfyn/adp/clean.py
```python
import numpy as np
import xarray as xr
from scipy.signal import medfilt
from ..tools.misc import medfiltnan
from ..rotate.api import rotate2
from ..rotate.base import _make_model, quaternion2orient
def set_range_offset(ds, h_deploy):
"""
Adds an instrument's height above seafloor (for an up-facing instrument)
or depth below water surface (for a down-facing instrument) to the range
coordinate. Also adds an attribute to the Dataset with the current
"h_deploy" distance.
Parameters
----------
ds : xarray.Dataset
The adcp dataset to ajust 'range' on
h_deploy : numeric
Deployment location in the water column, in [m]
Returns
-------
None, operates "in place"
Notes
-----
`Center of bin 1 = h_deploy + blank_dist + cell_size`
Nortek doesn't take `h_deploy` into account, so the range that DOLfYN
calculates distance is from the ADCP transducers. TRDI asks for `h_deploy`
input in their deployment software and is thereby known by DOLfYN.
If the ADCP is mounted on a tripod on the seafloor, `h_deploy` will be
the height of the tripod +/- any extra distance to the transducer faces.
If the instrument is vessel-mounted, `h_deploy` is the distance between
the surface and downward-facing ADCP's transducers.
"""
r = [s for s in ds.dims if 'range' in s]
for val in r:
ds[val] = ds[val].values + h_deploy
ds[val].attrs['units'] = 'm'
if hasattr(ds, 'h_deploy'):
ds.attrs['h_deploy'] += h_deploy
else:
ds.attrs['h_deploy'] = h_deploy
def find_surface(ds, thresh=10, nfilt=None):
"""
Find the surface (water level or seafloor) from amplitude data and
adds the variable "depth" to the input Dataset.
Parameters
----------
ds : xarray.Dataset
The full adcp dataset
thresh : int
Specifies the threshold used in detecting the surface.
(The amount that amplitude must increase by near the surface for it to
be considered a surface hit)
nfilt : int
Specifies the width of the median filter applied, must be odd
Returns
-------
None, operates "in place"
"""
# This finds the maximum of the echo profile:
inds = np.argmax(ds.amp.values, axis=1)
# This finds the first point that increases (away from the profiler) in
# the echo profile
edf = np.diff(ds.amp.values.astype(np.int16), axis=1)
inds2 = np.max((edf < 0) *
np.arange(ds.vel.shape[1] - 1,
dtype=np.uint8)[None, :, None], axis=1) + 1
# Calculate the depth of these quantities
d1 = ds.range.values[inds]
d2 = ds.range.values[inds2]
# Combine them:
D = np.vstack((d1, d2))
# Take the median value as the estimate of the surface:
d = np.median(D, axis=0)
# Throw out values that do not increase near the surface by *thresh*
for ip in range(ds.vel.shape[1]):
itmp = np.min(inds[:, ip])
if (edf[itmp:, :, ip] < thresh).all():
d[ip] = np.NaN
if nfilt:
dfilt = medfiltnan(d, nfilt, thresh=.4)
dfilt[dfilt == 0] = np.NaN
d = dfilt
ds['depth'] = xr.DataArray(d, dims=['time'], attrs={'units': 'm'})
def find_surface_from_P(ds, salinity=35):
"""
Calculates the distance to the water surface. Temperature and salinity
are used to calculate seawater density, which is in turn used with the
pressure data to calculate depth.
Parameters
----------
ds : xarray.Dataset
The full adcp dataset
salinity: numeric
Water salinity in psu
Returns
-------
None, operates "in place" and adds the variables "water_density" and
"depth" to the input dataset.
Notes
-----
Requires that the instrument's pressure sensor was calibrated/zeroed
before deployment to remove atmospheric pressure.
Calculates seawater density at normal atmospheric pressure according
to the UNESCO 1981 equation of state. Does not include hydrostatic pressure.
"""
# Density calcation
T = ds.temp.values
S = salinity
# standard mean ocean water
rho_smow = 999.842594 + 6.793953e-2*T - 9.095290e-3*T**2 + \
1.001685e-4*T**3 - 1.120083e-6*T**4 + 6.536332e-9*T**5
# at water surface
B1 = 8.2449e-1 - 4.0899e-3*T + 7.6438e-5*T**2 - 8.2467e-7*T**3 + 5.3875e-9*T**4
C1 = -5.7246e-3 + 1.0227e-4*T - 1.6546e-6*T**2
d0 = 4.8314e-4
rho_atm0 = rho_smow + B1*S + C1*S**1.5 + d0*S**2
# Depth = pressure (conversion from dbar to MPa) / water weight
d = (ds.pressure*10000)/(9.81*rho_atm0)
if hasattr(ds, 'h_deploy'):
d += ds.h_deploy
description = "Water depth to seafloor"
else:
description = "Water depth to ADCP"
ds['water_density'] = xr.DataArray(
rho_atm0,
dims=['time'],
attrs={'units': 'kg/m^3',
'description': 'Water density according to UNESCO 1981 equation of state'})
ds['depth'] = xr.DataArray(d, dims=['time'], attrs={
'units': 'm', 'description': description})
def nan_beyond_surface(ds, val=np.nan):
"""
Mask the values of 3D data (vel, amp, corr, echo) that are beyond the surface.
Parameters
----------
ds : xarray.Dataset
The adcp dataset to clean
val : nan or numeric
Specifies the value to set the bad values to (default np.nan).
Returns
-------
ds : xarray.Dataset
The adcp dataset where relevant arrays with values greater than
`depth` are set to NaN
Notes
-----
Surface interference expected to happen at `r > depth * cos(beam_angle)`
"""
ds = ds.copy(deep=True)
# Get all variables with 'range' coordinate
var = [h for h in ds.keys() if any(s for s in ds[h].dims if 'range' in s)]
if 'nortek' in _make_model(ds):
beam_angle = 25 * (np.pi/180)
else: # TRDI
try:
beam_angle = ds.beam_angle
except:
beam_angle = 20 * (np.pi/180)
bds = ds.range > (ds.depth * np.cos(beam_angle) - ds.cell_size)
if 'echo' in var:
bds_echo = ds.range_echo > ds.depth
ds['echo'].values[..., bds_echo] = val
var.remove('echo')
for nm in var:
a = ds[nm].values
if 'corr' in nm:
a[..., bds] = 0
else:
a[..., bds] = val
ds[nm].values = a
return ds
def val_exceeds_thresh(var, thresh=5, val=np.nan):
"""
Find values of a variable that exceed a threshold value,
and assign "val" to the velocity data where the threshold is
exceeded.
Parameters
----------
var : xarray.DataArray
Variable to clean
thresh : numeric
The maximum value of velocity to screen
val : nan or numeric
Specifies the value to set the bad values to (default np.nan)
Returns
-------
ds : xarray.Dataset
The adcp dataset with datapoints beyond thresh are set to `val`
"""
var = var.copy(deep=True)
bd = np.zeros(var.shape, dtype='bool')
bd |= (np.abs(var.values) > thresh)
var.values[bd] = val
return var
def correlation_filter(ds, thresh=50, val=np.nan):
"""
Filters out velocity data where correlation is below a
threshold in the beam correlation data.
Parameters
----------
ds : xarray.Dataset
The adcp dataset to clean.
thresh : numeric
The maximum value of correlation to screen, in counts or %
val : numeric
Value to set masked correlation data to, default is nan
Returns
-------
ds : xarray.Dataset
Velocity data with low correlation values set to `val`
Notes
-----
Does not edit correlation or amplitude data.
"""
ds = ds.copy(deep=True)
# 4 or 5 beam
if hasattr(ds, 'vel_b5'):
tag = ['', '_b5']
else:
tag = ['']
# copy original ref frame
coord_sys_orig = ds.coord_sys
# correlation is always in beam coordinates
rotate2(ds, 'beam', inplace=True)
for tg in tag:
mask = (ds['corr'+tg].values <= thresh)
ds['vel'+tg].values[mask] = val
ds['vel'+tg].attrs['Comments'] = 'Filtered of data with a correlation value below ' + \
str(thresh) + ds.corr.units
rotate2(ds, coord_sys_orig, inplace=True)
return ds
def medfilt_orient(ds, nfilt=7):
"""
Median filters the orientation data (heading-pitch-roll or quaternions)
Parameters
----------
ds : xarray.Dataset
The adcp dataset to clean
nfilt : numeric
The length of the median-filtering kernel
*nfilt* must be odd.
Return
------
ds : xarray.Dataset
The adcp dataset with the filtered orientation data
See Also
--------
scipy.signal.medfilt()
"""
ds = ds.copy(deep=True)
if getattr(ds, 'has_imu'):
q_filt = np.zeros(ds.quaternion.shape)
for i in range(ds.quaternion.q.size):
q_filt[i] = medfilt(ds.quaternion[i].values, nfilt)
ds.quaternion.values = q_filt
ds['orientmat'] = quaternion2orient(ds.quaternion)
return ds
else:
# non Nortek AHRS-equipped instruments
do_these = ['pitch', 'roll', 'heading']
for nm in do_these:
ds[nm].values = medfilt(ds[nm].values, nfilt)
return ds.drop_vars('orientmat')
def fillgaps_time(var, method='cubic', max_gap=None):
"""
Fill gaps (nan values) in var across time using the specified method
Parameters
----------
var : xarray.DataArray
The variable to clean
method : string
Interpolation method to use
max_gap : numeric
Max number of consective NaN's to interpolate across
Returns
-------
ds : xarray.Dataset
The adcp dataset with gaps in velocity interpolated across time
See Also
--------
xarray.DataArray.interpolate_na()
"""
var = var.copy(deep=True)
time_dim = [t for t in var.dims if 'time' in t][0]
return var.interpolate_na(dim=time_dim, method=method,
use_coordinate=True,
max_gap=max_gap)
def fillgaps_depth(var, method='cubic', max_gap=None):
"""
Fill gaps (nan values) in var along the depth profile using the specified method
Parameters
----------
var : xarray.DataArray
The variable to clean
method : string
Interpolation method to use
max_gap : numeric
Max number of consective NaN's to interpolate across
Returns
-------
ds : xarray.Dataset
The adcp dataset with gaps in velocity interpolated across depth profiles
See Also
--------
xarray.DataArray.interpolate_na()
"""
var = var.copy(deep=True)
range_dim = [t for t in var.dims if 'range' in t][0]
return var.interpolate_na(dim=range_dim, method=method,
use_coordinate=False,
max_gap=max_gap)
```
#### File: dolfyn/adv/clean.py
```python
import numpy as np
import warnings
from ..tools.misc import group, slice1d_along_axis
warnings.filterwarnings('ignore', category=np.RankWarning)
sin = np.sin
cos = np.cos
def clean_fill(u, mask, npt=12, method='cubic', max_gap=None):
"""
Interpolate over mask values in timeseries data using the specified method
Parameters
----------
u : xarray.DataArray
The dataArray to clean.
mask : bool
Logical tensor of elements to "nan" out (from `spikeThresh`, `rangeLimit`,
or `GN2002`) and replace
npt : int
The number of points on either side of the bad values that
interpolation occurs over
method : string
Interpolation scheme to use (linear, cubic, pchip, etc)
max_gap : int
Max number of consective nan's to interpolate across, must be <= npt/2
Returns
-------
da : xarray.DataArray
The dataArray with nan's filled in
See Also
--------
xarray.DataArray.interpolate_na()
"""
if max_gap:
assert max_gap <= npt, 'Max_gap must be less than half of npt'
# Apply mask
u.values[..., mask] = np.nan
# Remove bad data for 2D+ and 1D timeseries variables
if 'dir' in u.dims:
for i in range(u.shape[0]):
u[i] = _interp_nan(u[i], npt, method, max_gap)
else:
u = _interp_nan(u, npt, method, max_gap)
return u
def _interp_nan(da, npt, method, max_gap):
"""
Interpolate over the points in `bad` that are True.
Parameters
----------
da : xarray.DataArray
The field to be cleaned
npt : int
The number of points on either side of the gap that the fit
occurs over
"""
searching = True
bds = da.isnull().values
ntail = 0
pos = 0
# The index array:
i = np.arange(len(da), dtype=np.uint32)
while pos < len(da):
if searching:
# Check the point
if bds[pos]:
# If it's bad, mark the start
start = max(pos - npt, 0)
# And stop searching.
searching = False
pos += 1
# Continue...
else:
# Another bad point?
if bds[pos]: # Yes
# Reset ntail
ntail = 0
else: # No
# Add to the tail of the block.
ntail += 1
pos += 1
if (ntail == npt or pos == len(da)):
# This is the block we are interpolating over
i_int = i[start:pos]
da[i_int] = da[i_int].interpolate_na(dim='time',
method=method,
use_coordinate=True,
max_gap=max_gap)
# Reset
searching = True
ntail = 0
return da
def spike_thresh(u, thresh=10):
"""
Returns a logical vector where a spike in `u` of magnitude greater than
`thresh` occurs. Both 'Negative' and 'positive' spikes are found.
Parameters
----------
u : xarray.DataArray
The timeseries data to clean.
thresh : int
Magnitude of velocity spike, must be positive.
Returns
-------
mask : |np.ndarray|
Logical vector with spikes labeled as 'True'
"""
du = np.diff(u.values, prepend=0)
bds1 = ((du > thresh) & (du < -thresh))
bds2 = ((du < -thresh) & (du > thresh))
return bds1 + bds2
def range_limit(u, range=[-5, 5]):
"""
Returns a logical vector that is True where the values of `u` are
outside of `range`.
Parameters
----------
u : xarray.DataArray
The timeseries data to clean.
range : list
Min and max magnitudes beyond which are masked
Returns
-------
mask : |np.ndarray|
Logical vector with spikes labeled as 'True'
"""
return ~((range[0] < u.values) & (u.values < range[1]))
def _calcab(al, Lu_std_u, Lu_std_d2u):
"""Solve equations 10 and 11 of Goring+Nikora2002
"""
return tuple(np.linalg.solve(
np.array([[cos(al) ** 2, sin(al) ** 2],
[sin(al) ** 2, cos(al) ** 2]]),
np.array([(Lu_std_u) ** 2, (Lu_std_d2u) ** 2])))
def _phaseSpaceThresh(u):
if u.ndim == 1:
u = u[:, None]
u = np.array(u)
Lu = (2 * np.log(u.shape[0])) ** 0.5
u = u - u.mean(0)
du = np.zeros_like(u)
d2u = np.zeros_like(u)
# Take the centered difference.
du[1:-1] = (u[2:] - u[:-2]) / 2
# And again.
d2u[2:-2] = (du[1:-1][2:] - du[1:-1][:-2]) / 2
p = (u ** 2 + du ** 2 + d2u ** 2)
std_u = np.std(u, axis=0)
std_du = np.std(du, axis=0)
std_d2u = np.std(d2u, axis=0)
alpha = np.arctan2(np.sum(u * d2u, axis=0), np.sum(u ** 2, axis=0))
a = np.empty_like(alpha)
b = np.empty_like(alpha)
with warnings.catch_warnings() as w:
warnings.filterwarnings('ignore', category=RuntimeWarning, message='invalid value encountered in ')
for idx, al in enumerate(alpha):
a[idx], b[idx] = _calcab(al, Lu * std_u[idx], Lu * std_d2u[idx])
if np.any(np.isnan(a)) or np.any(np.isnan(a[idx])):
print('Coefficient calculation error')
theta = np.arctan2(du, u)
phi = np.arctan2((du ** 2 + u ** 2) ** 0.5, d2u)
pe = (((sin(phi) * cos(theta) * cos(alpha) +
cos(phi) * sin(alpha)) ** 2) / a +
((sin(phi) * cos(theta) * sin(alpha) -
cos(phi) * cos(alpha)) ** 2) / b +
((sin(phi) * sin(theta)) ** 2) / (Lu * std_du) ** 2) ** -1
pe[:, np.isnan(pe[0, :])] = 0
return (p > pe).flatten('F')
def GN2002(u, npt=5000):
"""
The Goring & Nikora 2002 'despiking' method, with Wahl2003 correction.
Returns a logical vector that is true where spikes are identified.
Parameters
----------
u : xarray.DataArray
The velocity array (1D or 3D) to clean.
npt : int
The number of points over which to perform the method.
Returns
-------
mask : |np.ndarray|
Logical vector with spikes labeled as 'True'
"""
if not isinstance(u, np.ndarray):
return GN2002(u.values, npt=npt)
if u.ndim > 1:
mask = np.zeros(u.shape, dtype='bool')
for slc in slice1d_along_axis(u.shape, -1):
mask[slc] = GN2002(u[slc], npt=npt)
return mask
mask = np.zeros(len(u), dtype='bool')
# Find large bad segments (>npt/10):
# group returns a vector of slice objects.
bad_segs = group(np.isnan(u), min_length=int(npt//10))
if bad_segs.size > 0:
# Break them up into separate regions:
sp = 0
ep = len(u)
# Skip start and end bad_segs:
if bad_segs[0].start == sp:
sp = bad_segs[0].stop
bad_segs = bad_segs[1:]
if bad_segs[-1].stop == ep:
ep = bad_segs[-1].start
bad_segs = bad_segs[:-1]
for ind in range(len(bad_segs)):
bs = bad_segs[ind] # bs is a slice object.
# Clean the good region:
mask[sp:bs.start] = GN2002(u[sp:bs.start], npt=npt)
sp = bs.stop
# Clean the last good region.
mask[sp:ep] = GN2002(u[sp:ep], npt=npt)
return mask
c = 0
ntot = len(u)
nbins = int(ntot // npt)
mask_last = np.zeros_like(mask) + np.inf
mask[0] = True # make sure we start.
while mask.any():
mask[:nbins * npt] = _phaseSpaceThresh(
np.array(np.reshape(u[:(nbins * npt)], (npt, nbins), order='F')))
mask[-npt:] = _phaseSpaceThresh(u[-npt:])
c += 1
if c >= 100:
raise Exception('GN2002 loop-limit exceeded.')
if mask.sum() >= mask_last.sum():
break
mask_last = mask.copy()
return mask
```
#### File: dolfyn/tests/test_motion.py
```python
from dolfyn.tests import test_read_adv as tv
#from dolfyn.tests import test_read_adp as tp
from dolfyn.tests.base import load_netcdf as load, save_netcdf as save, assert_allclose
import dolfyn.adv.api as avm
def test_motion_adv(make_data=False):
tdm = tv.dat_imu.copy(deep=True)
tdm = avm.correct_motion(tdm)
# user added metadata
tdmj = tv.dat_imu_json.copy(deep=True)
tdmj = avm.correct_motion(tdmj)
# set declination and then correct
tdm10 = tv.dat_imu.copy(deep=True)
tdm10.velds.set_declination(10.0, inplace=True)
tdm10 = avm.correct_motion(tdm10)
# test setting declination to 0 doesn't affect correction
tdm0 = tv.dat_imu.copy(deep=True)
tdm0.velds.set_declination(0.0, inplace=True)
tdm0 = avm.correct_motion(tdm0)
tdm0.attrs.pop('declination')
tdm0.attrs.pop('declination_in_orientmat')
# test motion-corrected data rotation
tdmE = tv.dat_imu.copy(deep=True)
tdmE.velds.set_declination(10.0, inplace=True)
tdmE.velds.rotate2('earth', inplace=True)
tdmE = avm.correct_motion(tdmE)
if make_data:
save(tdm, 'vector_data_imu01_mc.nc')
save(tdm10, 'vector_data_imu01_mcDeclin10.nc')
save(tdmj, 'vector_data_imu01-json_mc.nc')
return
cdm10 = load('vector_data_imu01_mcDeclin10.nc')
assert_allclose(tdm, load('vector_data_imu01_mc.nc'), atol=1e-7)
assert_allclose(tdm10, tdmj, atol=1e-7)
assert_allclose(tdm0, tdm, atol=1e-7)
assert_allclose(tdm10, cdm10, atol=1e-7)
assert_allclose(tdmE, cdm10, atol=1e-7)
assert_allclose(tdmj, load('vector_data_imu01-json_mc.nc'), atol=1e-7)
def test_sep_probes(make_data=False):
tdm = tv.dat_imu.copy(deep=True)
tdm = avm.correct_motion(tdm, separate_probes=True)
if make_data:
save(tdm, 'vector_data_imu01_mcsp.nc')
return
assert_allclose(tdm, load('vector_data_imu01_mcsp.nc'), atol=1e-7)
# def test_motion_adcp():
# # Correction for ADCPs not completed yet
# tdm = tp.dat_sig_i.copy(deep=True)
# avm.set_inst2head_rotmat(tdm, rotmat=np.eye(4), inplace=True) # 4th doesn't matter
# tdm.attrs['inst2head_vec'] = np.array([0,0,0,0])
# tdmc = avm.correct_motion(tdm)
# assert type(tdm)==type(tdmc) # simple way of making sure tdmc exists
```
#### File: dolfyn/tests/test_shortcuts.py
```python
from dolfyn.tests import test_read_adv as tv
from dolfyn.tests.base import load_netcdf as load, save_netcdf as save, rfnm
from dolfyn import rotate2
import dolfyn.adv.api as avm
from xarray.testing import assert_allclose
import xarray as xr
import os
class adv_setup():
def __init__(self, tv):
dat = tv.dat.copy(deep=True)
self.dat = rotate2(dat, 'earth', inplace=False)
self.tdat = avm.calc_turbulence(self.dat, n_bin=20.0, fs=self.dat.fs)
short = xr.Dataset()
short['u'] = self.tdat.velds.u
short['v'] = self.tdat.velds.v
short['w'] = self.tdat.velds.w
short['U'] = self.tdat.velds.U
short['U_mag'] = self.tdat.velds.U_mag
short['U_dir'] = self.tdat.velds.U_dir
short['U_dir_N'] = self.dat.velds.U_dir
short["upup_"] = self.tdat.velds.upup_
short["vpvp_"] = self.tdat.velds.vpvp_
short["wpwp_"] = self.tdat.velds.wpwp_
short["upvp_"] = self.tdat.velds.upvp_
short["upwp_"] = self.tdat.velds.upwp_
short["vpwp_"] = self.tdat.velds.vpwp_
short['tke'] = self.tdat.velds.tke
short['I'] = self.tdat.velds.I
short['E_coh'] = self.tdat.velds.E_coh
short['I_tke'] = self.tdat.velds.I_tke
self.short = short
def test_shortcuts(make_data=False):
test_dat = adv_setup(tv)
if make_data:
save(test_dat.short, 'vector_data01_u.nc')
return
assert_allclose(test_dat.short, load('vector_data01_u.nc'), atol=1e-6)
def test_save_complex_data():
test_dat = adv_setup(tv)
save(test_dat.short, 'test_save.nc')
assert os.path.exists(rfnm('test_save.nc'))
```
#### File: dolfyn/tests/test_vs_nortek.py
```python
from dolfyn.tests import test_read_adp as tr
from dolfyn.tests import base
from dolfyn.rotate.api import rotate2
from numpy.testing import assert_allclose
import numpy as np
import scipy.io as sio
"""
Testing against velocity and bottom-track velocity data in Nortek mat files
exported from SignatureDeployment.
inst2earth rotation fails for AHRS-equipped istruments and I don't know why -
I believe it's due to an RC filter (or some such) on Nortek's side after they
load in the orientation matrix from the AHRS (Check out the difference
colorplots compared to non-AHRS instruments.) Using HPR- or quaterion-calc'd
orientation matrices doesn't close the gap.
"""
def load_nortek_matfile(filename):
# remember to transpose this data
data = sio.loadmat(filename,
struct_as_record=False,
squeeze_me=True)
d = data['Data']
# print(d._fieldnames)
burst = 'Burst'
bt = 'BottomTrack'
beam = ['_VelBeam1', '_VelBeam2', '_VelBeam3', '_VelBeam4']
b5 = 'IBurst_VelBeam5'
inst = ['_VelX', '_VelY', '_VelZ1', '_VelZ2']
earth = ['_VelEast', '_VelNorth', '_VelUp1', '_VelUp2']
axis = {'beam': beam, 'inst': inst, 'earth': earth}
AHRS = 'Burst_AHRSRotationMatrix' # , 'IBurst_AHRSRotationMatrix']
vel = {'beam': {}, 'inst': {}, 'earth': {}}
for ky in vel.keys():
for i in range(len(axis[ky])):
vel[ky][i] = np.transpose(getattr(d, burst+axis[ky][i]))
vel[ky] = np.stack((vel[ky][0], vel[ky][1],
vel[ky][2], vel[ky][3]), axis=0)
if AHRS in d._fieldnames:
vel['omat'] = np.transpose(getattr(d, AHRS))
if b5 in d._fieldnames:
vel['b5'] = np.transpose(getattr(d, b5))
#vel['omat5'] = getattr(d, AHRS[1])
if bt+beam[0] in d._fieldnames:
vel_bt = {'beam': {}, 'inst': {}, 'earth': {}}
for ky in vel_bt.keys():
for i in range(len(axis[ky])):
vel_bt[ky][i] = np.transpose(getattr(d, bt+axis[ky][i]))
vel_bt[ky] = np.stack((vel_bt[ky][0], vel_bt[ky][1],
vel_bt[ky][2], vel_bt[ky][3]), axis=0)
return vel, vel_bt
else:
return vel
def rotate(axis):
# BenchFile01.ad2cp
td_sig = rotate2(tr.dat_sig, axis, inplace=False)
# Sig1000_IMU.ad2cp no userdata
td_sig_i = rotate2(tr.dat_sig_i, axis, inplace=False)
# VelEchoBT01.ad2cp
td_sig_ieb = rotate2(tr.dat_sig_ieb, axis,
inplace=False)
# Sig500_Echo.ad2cp
td_sig_ie = rotate2(tr.dat_sig_ie, axis,
inplace=False)
td_sig_vel = load_nortek_matfile(base.rfnm('BenchFile01.mat'))
td_sig_i_vel = load_nortek_matfile(base.rfnm('Sig1000_IMU.mat'))
td_sig_ieb_vel, vel_bt = load_nortek_matfile(base.rfnm('VelEchoBT01.mat'))
td_sig_ie_vel = load_nortek_matfile(base.rfnm('Sig500_Echo.mat'))
nens = 100
# ARHS inst2earth orientation matrix check
# Checks the 1,1 element because the nortek orientmat's shape is [9,:] as
# opposed to [3,3,:]
if axis == 'inst':
assert_allclose(td_sig_i.orientmat[0][0].values,
td_sig_i_vel['omat'][0, :nens], atol=1e-7)
assert_allclose(td_sig_ieb.orientmat[0][0].values,
td_sig_ieb_vel['omat'][0, :][..., :nens], atol=1e-7)
# 4-beam velocity
assert_allclose(td_sig.vel.values, td_sig_vel[axis][..., :nens], atol=1e-5)
assert_allclose(td_sig_i.vel.values,
td_sig_i_vel[axis][..., :nens], atol=5e-3)
assert_allclose(td_sig_ieb.vel.values,
td_sig_ieb_vel[axis][..., :nens], atol=5e-3)
assert_allclose(td_sig_ie.vel.values,
td_sig_ie_vel[axis][..., :nens], atol=1e-5)
# 5th-beam velocity
if axis == 'beam':
assert_allclose(td_sig_i.vel_b5.values,
td_sig_i_vel['b5'][..., :nens], atol=1e-5)
assert_allclose(td_sig_ieb.vel_b5.values,
td_sig_ieb_vel['b5'][..., :nens], atol=1e-5)
assert_allclose(td_sig_ie.vel_b5.values,
td_sig_ie_vel['b5'][..., :nens], atol=1e-5)
# bottom-track
assert_allclose(td_sig_ieb.vel_bt.values,
vel_bt[axis][..., :nens], atol=5e-3)
def test_rotate2_beam():
rotate('beam')
def test_rotate2_inst():
rotate('inst')
def test_rotate2_earth():
rotate('earth')
``` |
{
"source": "jklymak/gcalcli",
"score": 3
} |
#### File: gcalcli/tests/test_input_validation.py
```python
import pytest
from gcalcli.validators import validate_input, ValidationError
from gcalcli.validators import (STR_NOT_EMPTY,
PARSABLE_DATE,
PARSABLE_DURATION,
STR_TO_INT,
STR_ALLOW_EMPTY,
REMINDER,
VALID_COLORS)
import gcalcli.validators
# Tests required:
#
# * Title: any string, not blank
# * Location: any string, allow blank
# * When: string that can be parsed by dateutil
# * Duration: string that can be cast to int
# * Description: any string, allow blank
# * Color: any string matching: blueberry, lavendar, grape, etc, or blank
# * Reminder: a valid reminder
def test_any_string_not_blank_validator(monkeypatch):
# Empty string raises ValidationError
monkeypatch.setattr(gcalcli.validators, "input", lambda: "")
with pytest.raises(ValidationError) as ve:
validate_input(STR_NOT_EMPTY)
assert ve.value.message == ("Input here cannot be empty. "
"(Ctrl-C to exit)\n")
# None raises ValidationError
monkeypatch.setattr(gcalcli.validators, "input", lambda: None)
with pytest.raises(ValidationError) as ve:
validate_input(STR_NOT_EMPTY)
assert ve.value.message == ("Input here cannot be empty. "
"(Ctrl-C to exit)\n")
# Valid string passes
monkeypatch.setattr(gcalcli.validators, "input", lambda: "Valid Text")
assert validate_input(STR_NOT_EMPTY) == "Valid Text"
def test_any_string_parsable_by_dateutil(monkeypatch):
# non-date raises ValidationError
monkeypatch.setattr(gcalcli.validators, "input", lambda: "NON-DATE STR")
with pytest.raises(ValidationError) as ve:
validate_input(PARSABLE_DATE)
assert ve.value.message == (
"Expected format: a date (e.g. 2019-01-01, tomorrow 10am, "
"2nd Jan, Jan 4th, etc) or valid time if today. "
"(Ctrl-C to exit)\n"
)
# date string passes
monkeypatch.setattr(gcalcli.validators, "input", lambda: "2nd January")
assert validate_input(PARSABLE_DATE) == "2nd January"
def test_any_string_parsable_by_parsedatetime(monkeypatch):
# non-date raises ValidationError
monkeypatch.setattr(gcalcli.validators, "input", lambda: "NON-DATE STR")
with pytest.raises(ValidationError) as ve:
validate_input(PARSABLE_DURATION)
assert ve.value.message == (
'Expected format: a duration (e.g. 1m, 1s, 1h3m)'
'(Ctrl-C to exit)\n'
)
# duration string passes
monkeypatch.setattr(gcalcli.validators, "input", lambda: "1m")
assert validate_input(PARSABLE_DURATION) == "1m"
# duration string passes
monkeypatch.setattr(gcalcli.validators, "input", lambda: "1h2m")
assert validate_input(PARSABLE_DURATION) == "1h2m"
def test_string_can_be_cast_to_int(monkeypatch):
# non int-castable string raises ValidationError
monkeypatch.setattr(gcalcli.validators, "input", lambda: "X")
with pytest.raises(ValidationError) as ve:
validate_input(STR_TO_INT)
assert ve.value.message == ("Input here must be a number. "
"(Ctrl-C to exit)\n")
# int string passes
monkeypatch.setattr(gcalcli.validators, "input", lambda: "10")
assert validate_input(STR_TO_INT) == "10"
def test_for_valid_colour_name(monkeypatch):
# non valid colour raises ValidationError
monkeypatch.setattr(gcalcli.validators, "input", lambda: "purple")
with pytest.raises(ValidationError) as ve:
validate_input(VALID_COLORS)
assert ve.value.message == (
"Expected colors are: lavender, sage, grape, flamingo, banana, "
"tangerine, peacock, graphite, blueberry, basil, tomato. "
"(Ctrl-C to exit)\n"
)
# valid colour passes
monkeypatch.setattr(gcalcli.validators, "input", lambda: "grape")
assert validate_input(VALID_COLORS) == "grape"
# empty str passes
monkeypatch.setattr(gcalcli.validators, "input", lambda: "")
assert validate_input(VALID_COLORS) == ""
def test_any_string_and_blank(monkeypatch):
# string passes
monkeypatch.setattr(gcalcli.validators, "input", lambda: "TEST")
assert validate_input(STR_ALLOW_EMPTY) == "TEST"
def test_reminder(monkeypatch):
# valid reminders pass
monkeypatch.setattr(gcalcli.validators, "input", lambda: "10m email")
assert validate_input(REMINDER) == "10m email"
monkeypatch.setattr(gcalcli.validators, "input", lambda: "10 popup")
assert validate_input(REMINDER) == "10 popup"
monkeypatch.setattr(gcalcli.validators, "input", lambda: "10m sms")
assert validate_input(REMINDER) == "10m sms"
monkeypatch.setattr(gcalcli.validators, "input", lambda: "12323")
assert validate_input(REMINDER) == "12323"
# invalid reminder raises ValidationError
monkeypatch.setattr(gcalcli.validators, "input", lambda: "meaningless")
with pytest.raises(ValidationError) as ve:
validate_input(REMINDER)
assert ve.value.message == ('Expected format: <number><w|d|h|m> '
'<popup|email|sms>. (Ctrl-C to exit)\n')
# invalid reminder raises ValidationError
monkeypatch.setattr(gcalcli.validators, "input", lambda: "")
with pytest.raises(ValidationError) as ve:
validate_input(REMINDER)
assert ve.value.message == ('Expected format: <number><w|d|h|m> '
'<popup|email|sms>. (Ctrl-C to exit)\n')
``` |
{
"source": "jklymak/mpl-bench",
"score": 2
} |
#### File: mpl-bench/benchmarks/image.py
```python
from __future__ import absolute_import, division, print_function
import distutils.version
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
MPL_VERSION = distutils.version.LooseVersion(matplotlib.__version__)
class InterpolationSuite:
params = ('none', 'nearest', 'bilinear', 'bicubic')
param_names = ('interpolation', )
def setup(self, interpolation):
self.data = np.arange(100).reshape((10, 10))
self.fig, self.ax = plt.subplots(dpi=100)
self.ax.set_xlim(0, 3)
self.ax.set_ylim(0, 3)
def teardown(self, interpolation):
plt.close(self.fig)
def time_setup(self, interpolation):
self.ax.imshow(self.data, interpolation=interpolation,
extent=(1, 2, 1, 2))
def time_full_draw(self, interpolation):
self.ax.imshow(self.data, interpolation=interpolation,
extent=(1, 2, 1, 2))
self.fig.canvas.draw()
class MaskSuite:
# Test mask image two ways: Using nans and using a masked array.
params = ('nan', 'mask')
param_names = ('method', )
def setup(self, method):
if method == 'nan':
A = np.ones((5, 5))
A[1:2, 1:2] = np.nan
elif method == 'mask':
A = np.zeros((5, 5), dtype=bool)
A[1:2, 1:2] = True
A = np.ma.masked_array(np.ones((5, 5), dtype=np.uint16), A)
else:
raise NotImplementedError('%s is not a known masking method' %
(method, ))
self.A = A
self.fig, self.ax = plt.subplots(dpi=100)
def teardown(self, method):
plt.close(self.fig)
def time_setup(self, method):
self.ax.imshow(self.A, interpolation='nearest')
def time_full_draw(self, method):
self.ax.imshow(self.A, interpolation='nearest')
self.fig.canvas.draw()
class TypeSuite:
# Test both endianness, some alternate float and non-float types.
params = ('<f8', '>f8', np.longdouble, int, bool)
param_names = ('dtype', )
def setup(self, t):
if t == np.longdouble and MPL_VERSION < '2.1.0':
raise NotImplementedError(
'Long double not supported in this Matplotlib version')
x = np.arange(10)
X, Y = np.meshgrid(x, x)
Z = ((X - 5)**2 + (Y - 5)**2)**0.5
self.data = Z.astype(t)
self.fig, self.ax = plt.subplots(dpi=100)
def teardown(self, t):
if t == np.longdouble and MPL_VERSION < '2.1.0':
return
plt.close(self.fig)
def time_setup(self, t):
self.ax.imshow(self.data)
def time_full_draw(self, t):
self.ax.imshow(self.data)
self.fig.canvas.draw()
``` |
{
"source": "jklymak/pick_waypoints",
"score": 4
} |
#### File: pick_waypoints/pick_waypoints/example.py
```python
__all__ = [
"example_function",
]
import numpy as np
def example_function(ax, data, above_color="r", below_color="k", **kwargs):
"""
An example function that makes a scatter plot with points colored differently
depending on if they are above or below `y=0`.
Parameters
----------
ax : matplotlib axis
The axis to plot on.
data : (N, 2) array-like
The data to make a plot from
above_color : color-like, default: 'r'
The color of points with `y>0`
below_color : color-like, default: 'k'
The color of points with `y<0`
kwargs :
Passed through to `ax.scatter`
Returns
-------
MarkerCollection
"""
colors = np.array([above_color] * data.shape[0])
colors[data[:, 1] < 0] = below_color
return ax.scatter(data[:, 0], data[:, 1], c=colors, **kwargs)
``` |
{
"source": "jklymak/veros",
"score": 2
} |
#### File: veros/test/setup_test.py
```python
import pytest
@pytest.fixture(autouse=True)
def ensure_diskless():
from veros import runtime_settings
object.__setattr__(runtime_settings, "diskless_mode", True)
def test_setup_acc():
from veros.setups.acc import ACCSetup
sim = ACCSetup()
sim.setup()
with sim.state.settings.unlock():
sim.state.settings.runlen = sim.state.settings.dt_tracer * 20
sim.run()
def test_setup_acc_basic():
from veros.setups.acc_basic import ACCBasicSetup
sim = ACCBasicSetup()
sim.setup()
with sim.state.settings.unlock():
sim.state.settings.runlen = sim.state.settings.dt_tracer * 20
sim.run()
def test_setup_4deg():
from veros.setups.global_4deg import GlobalFourDegreeSetup
sim = GlobalFourDegreeSetup()
sim.setup()
with sim.state.settings.unlock():
sim.state.settings.runlen = sim.state.settings.dt_tracer * 20
sim.run()
def test_setup_flexible():
from veros.setups.global_flexible import GlobalFlexibleResolutionSetup
sim = GlobalFlexibleResolutionSetup(
override=dict(
nx=100,
ny=50,
dt_tracer=3600,
dt_mom=3600,
)
)
sim.setup()
with sim.state.settings.unlock():
sim.state.settings.runlen = sim.state.settings.dt_tracer * 20
sim.run()
def test_setup_1deg():
from veros.setups.global_1deg import GlobalOneDegreeSetup
# too big to test
GlobalOneDegreeSetup()
def test_setup_north_atlantic():
from veros.setups.north_atlantic import NorthAtlanticSetup
sim = NorthAtlanticSetup(override=dict(nx=100, ny=100, nz=50))
sim.setup()
with sim.state.settings.unlock():
sim.state.settings.runlen = sim.state.settings.dt_tracer
sim.run()
```
#### File: veros/test/state_test.py
```python
import pytest
from veros.state import VerosSettings, VerosVariables, VerosState
@pytest.fixture
def dummy_settings():
from veros.settings import SETTINGS
return VerosSettings(SETTINGS)
@pytest.fixture
def dummy_variables():
from veros.variables import VARIABLES, DIM_TO_SHAPE_VAR
fixed_dims = {k: 10 for k in DIM_TO_SHAPE_VAR.keys()}
return VerosVariables(VARIABLES, fixed_dims)
@pytest.fixture
def dummy_state():
from veros.variables import VARIABLES, DIM_TO_SHAPE_VAR
from veros.settings import SETTINGS
return VerosState(VARIABLES, SETTINGS, DIM_TO_SHAPE_VAR)
def test_lock_settings(dummy_settings):
orig_val = dummy_settings.dt_tracer
with pytest.raises(RuntimeError):
dummy_settings.dt_tracer = 0
assert dummy_settings.dt_tracer == orig_val
with dummy_settings.unlock():
dummy_settings.dt_tracer = 1
assert dummy_settings.dt_tracer == 1
def test_settings_repr(dummy_settings):
with dummy_settings.unlock():
dummy_settings.dt_tracer = 1
assert "dt_tracer = 1.0," in repr(dummy_settings)
def test_variables_repr(dummy_variables):
from veros.core.operators import numpy as npx
array_type = type(npx.array([]))
assert f"tau = {array_type} with shape (), dtype int32," in repr(dummy_variables)
def test_to_xarray(dummy_state):
pytest.importorskip("xarray")
dummy_state.initialize_variables()
ds = dummy_state.to_xarray()
# settings
assert tuple(ds.attrs.keys()) == tuple(dummy_state.settings.fields())
assert tuple(ds.attrs.values()) == tuple(dummy_state.settings.values())
# dimensions
used_dims = set()
for var, meta in dummy_state.var_meta.items():
if var in dummy_state.variables:
if meta.dims is None:
continue
used_dims |= set(meta.dims)
assert set(ds.coords.keys()) == used_dims
for dim in used_dims:
assert int(ds.dims[dim]) == dummy_state.dimensions[dim]
# variables
for var in dummy_state.variables.fields():
assert var in ds
def test_variable_init(dummy_state):
with pytest.raises(RuntimeError):
dummy_state.variables
dummy_state.initialize_variables()
assert isinstance(dummy_state.variables, VerosVariables)
with pytest.raises(RuntimeError):
dummy_state.initialize_variables()
def test_set_dimension(dummy_state):
with dummy_state.settings.unlock():
dummy_state.settings.nx = 10
assert dummy_state.dimensions["xt"] == 10
dummy_state.dimensions["foobar"] = 42
assert dummy_state.dimensions["foobar"] == 42
with pytest.raises(RuntimeError):
dummy_state.dimensions["xt"] = 11
assert dummy_state._dimensions["xt"] == "nx"
def test_resize_dimension(dummy_state):
from veros.state import resize_dimension
with dummy_state.settings.unlock():
dummy_state.settings.nx = 10
dummy_state.initialize_variables()
assert dummy_state.dimensions["xt"] == 10
assert dummy_state.variables.dxt.shape == (14,)
resize_dimension(dummy_state, "xt", 100)
assert dummy_state.dimensions["xt"] == 100
assert dummy_state.variables.dxt.shape == (104,)
def test_timers(dummy_state):
from veros.timer import Timer
timer = dummy_state.timers["foobar"]
assert isinstance(timer, Timer)
```
#### File: veros/core/momentum.py
```python
from veros.core.operators import numpy as npx
from veros import veros_routine, veros_kernel, KernelOutput, runtime_settings
from veros.variables import allocate
from veros.core import friction, streamfunction
from veros.core.operators import update, update_add, at
@veros_kernel
def tend_coriolisf(state):
"""
time tendency due to Coriolis force
"""
vs = state.variables
settings = state.settings
vs.du_cor = update(
vs.du_cor,
at[2:-2, 2:-2],
0.25
* vs.maskU[2:-2, 2:-2]
* (
vs.coriolis_t[2:-2, 2:-2, npx.newaxis]
* (vs.v[2:-2, 2:-2, :, vs.tau] + vs.v[2:-2, 1:-3, :, vs.tau])
* vs.dxt[2:-2, npx.newaxis, npx.newaxis]
/ vs.dxu[2:-2, npx.newaxis, npx.newaxis]
+ vs.coriolis_t[3:-1, 2:-2, npx.newaxis]
* (vs.v[3:-1, 2:-2, :, vs.tau] + vs.v[3:-1, 1:-3, :, vs.tau])
* vs.dxt[3:-1, npx.newaxis, npx.newaxis]
/ vs.dxu[2:-2, npx.newaxis, npx.newaxis]
),
)
vs.dv_cor = update(
vs.dv_cor,
at[2:-2, 2:-2],
-0.25
* vs.maskV[2:-2, 2:-2]
* (
vs.coriolis_t[2:-2, 2:-2, npx.newaxis]
* (vs.u[1:-3, 2:-2, :, vs.tau] + vs.u[2:-2, 2:-2, :, vs.tau])
* vs.dyt[npx.newaxis, 2:-2, npx.newaxis]
* vs.cost[npx.newaxis, 2:-2, npx.newaxis]
/ (vs.dyu[npx.newaxis, 2:-2, npx.newaxis] * vs.cosu[npx.newaxis, 2:-2, npx.newaxis])
+ vs.coriolis_t[2:-2, 3:-1, npx.newaxis]
* (vs.u[1:-3, 3:-1, :, vs.tau] + vs.u[2:-2, 3:-1, :, vs.tau])
* vs.dyt[npx.newaxis, 3:-1, npx.newaxis]
* vs.cost[npx.newaxis, 3:-1, npx.newaxis]
/ (vs.dyu[npx.newaxis, 2:-2, npx.newaxis] * vs.cosu[npx.newaxis, 2:-2, npx.newaxis])
),
)
"""
time tendency due to metric terms
"""
if settings.coord_degree:
vs.du_cor = update_add(
vs.du_cor,
at[2:-2, 2:-2],
vs.maskU[2:-2, 2:-2]
* 0.125
* vs.tantr[npx.newaxis, 2:-2, npx.newaxis]
* (
(vs.u[2:-2, 2:-2, :, vs.tau] + vs.u[1:-3, 2:-2, :, vs.tau])
* (vs.v[2:-2, 2:-2, :, vs.tau] + vs.v[2:-2, 1:-3, :, vs.tau])
* vs.dxt[2:-2, npx.newaxis, npx.newaxis]
/ vs.dxu[2:-2, npx.newaxis, npx.newaxis]
+ (vs.u[3:-1, 2:-2, :, vs.tau] + vs.u[2:-2, 2:-2, :, vs.tau])
* (vs.v[3:-1, 2:-2, :, vs.tau] + vs.v[3:-1, 1:-3, :, vs.tau])
* vs.dxt[3:-1, npx.newaxis, npx.newaxis]
/ vs.dxu[2:-2, npx.newaxis, npx.newaxis]
),
)
vs.dv_cor = update_add(
vs.dv_cor,
at[2:-2, 2:-2],
-1
* vs.maskV[2:-2, 2:-2]
* 0.125
* (
vs.tantr[npx.newaxis, 2:-2, npx.newaxis]
* (vs.u[2:-2, 2:-2, :, vs.tau] + vs.u[1:-3, 2:-2, :, vs.tau]) ** 2
* vs.dyt[npx.newaxis, 2:-2, npx.newaxis]
* vs.cost[npx.newaxis, 2:-2, npx.newaxis]
/ (vs.dyu[npx.newaxis, 2:-2, npx.newaxis] * vs.cosu[npx.newaxis, 2:-2, npx.newaxis])
+ vs.tantr[npx.newaxis, 3:-1, npx.newaxis]
* (vs.u[2:-2, 3:-1, :, vs.tau] + vs.u[1:-3, 3:-1, :, vs.tau]) ** 2
* vs.dyt[npx.newaxis, 3:-1, npx.newaxis]
* vs.cost[npx.newaxis, 3:-1, npx.newaxis]
/ (vs.dyu[npx.newaxis, 2:-2, npx.newaxis] * vs.cosu[npx.newaxis, 2:-2, npx.newaxis])
),
)
"""
transfer to time tendencies
"""
vs.du = update(vs.du, at[2:-2, 2:-2, :, vs.tau], vs.du_cor[2:-2, 2:-2])
vs.dv = update(vs.dv, at[2:-2, 2:-2, :, vs.tau], vs.dv_cor[2:-2, 2:-2])
return KernelOutput(du=vs.du, dv=vs.dv, du_cor=vs.du_cor, dv_cor=vs.dv_cor)
@veros_kernel
def tend_tauxyf(state):
"""
wind stress forcing
"""
vs = state.variables
settings = state.settings
if runtime_settings.pyom_compatibility_mode:
vs.du = update_add(
vs.du, at[2:-2, 2:-2, -1, vs.tau], vs.maskU[2:-2, 2:-2, -1] * vs.surface_taux[2:-2, 2:-2] / vs.dzt[-1]
)
vs.dv = update_add(
vs.dv, at[2:-2, 2:-2, -1, vs.tau], vs.maskV[2:-2, 2:-2, -1] * vs.surface_tauy[2:-2, 2:-2] / vs.dzt[-1]
)
else:
vs.du = update_add(
vs.du,
at[2:-2, 2:-2, -1, vs.tau],
vs.maskU[2:-2, 2:-2, -1] * vs.surface_taux[2:-2, 2:-2] / vs.dzt[-1] / settings.rho_0,
)
vs.dv = update_add(
vs.dv,
at[2:-2, 2:-2, -1, vs.tau],
vs.maskV[2:-2, 2:-2, -1] * vs.surface_tauy[2:-2, 2:-2] / vs.dzt[-1] / settings.rho_0,
)
return KernelOutput(du=vs.du, dv=vs.dv)
@veros_kernel
def momentum_advection(state):
"""
Advection of momentum with second order which is energy conserving
"""
vs = state.variables
"""
Code from MITgcm
"""
utr = vs.u[..., vs.tau] * vs.maskU * vs.dyt[npx.newaxis, :, npx.newaxis] * vs.dzt[npx.newaxis, npx.newaxis, :]
vtr = (
vs.dzt[npx.newaxis, npx.newaxis, :]
* vs.cosu[npx.newaxis, :, npx.newaxis]
* vs.dxt[:, npx.newaxis, npx.newaxis]
* vs.v[..., vs.tau]
* vs.maskV
)
wtr = vs.w[..., vs.tau] * vs.maskW * vs.area_t[:, :, npx.newaxis]
"""
for zonal momentum
"""
flux_east = allocate(state.dimensions, ("xu", "yt", "zt"))
flux_north = allocate(state.dimensions, ("xt", "yu", "zt"))
flux_top = allocate(state.dimensions, ("xt", "yt", "zw"))
flux_east = update(
flux_east,
at[1:-2, 2:-2],
0.25 * (vs.u[1:-2, 2:-2, :, vs.tau] + vs.u[2:-1, 2:-2, :, vs.tau]) * (utr[2:-1, 2:-2] + utr[1:-2, 2:-2]),
)
flux_north = update(
flux_north,
at[2:-2, 1:-2],
0.25 * (vs.u[2:-2, 1:-2, :, vs.tau] + vs.u[2:-2, 2:-1, :, vs.tau]) * (vtr[3:-1, 1:-2] + vtr[2:-2, 1:-2]),
)
flux_top = update(
flux_top,
at[2:-2, 2:-2, :-1],
0.25
* (vs.u[2:-2, 2:-2, 1:, vs.tau] + vs.u[2:-2, 2:-2, :-1, vs.tau])
* (wtr[2:-2, 2:-2, :-1] + wtr[3:-1, 2:-2, :-1]),
)
vs.du_adv = update(
vs.du_adv,
at[2:-2, 2:-2],
-1
* vs.maskU[2:-2, 2:-2]
* (flux_east[2:-2, 2:-2] - flux_east[1:-3, 2:-2] + flux_north[2:-2, 2:-2] - flux_north[2:-2, 1:-3])
/ (vs.dzt[npx.newaxis, npx.newaxis, :] * vs.area_u[2:-2, 2:-2, npx.newaxis]),
)
tmp = vs.maskU / (vs.dzt * vs.area_u[:, :, npx.newaxis])
vs.du_adv = vs.du_adv - tmp * flux_top
vs.du_adv = update_add(vs.du_adv, at[:, :, 1:], tmp[:, :, 1:] * flux_top[:, :, :-1])
"""
for meridional momentum
"""
flux_top = update(flux_top, at[...], 0.0)
flux_east = update(
flux_east,
at[1:-2, 2:-2],
0.25 * (vs.v[1:-2, 2:-2, :, vs.tau] + vs.v[2:-1, 2:-2, :, vs.tau]) * (utr[1:-2, 3:-1] + utr[1:-2, 2:-2]),
)
flux_north = update(
flux_north,
at[2:-2, 1:-2],
0.25 * (vs.v[2:-2, 1:-2, :, vs.tau] + vs.v[2:-2, 2:-1, :, vs.tau]) * (vtr[2:-2, 2:-1] + vtr[2:-2, 1:-2]),
)
flux_top = update(
flux_top,
at[2:-2, 2:-2, :-1],
0.25
* (vs.v[2:-2, 2:-2, 1:, vs.tau] + vs.v[2:-2, 2:-2, :-1, vs.tau])
* (wtr[2:-2, 2:-2, :-1] + wtr[2:-2, 3:-1, :-1]),
)
vs.dv_adv = update(
vs.dv_adv,
at[2:-2, 2:-2],
-1
* vs.maskV[2:-2, 2:-2]
* (flux_east[2:-2, 2:-2] - flux_east[1:-3, 2:-2] + flux_north[2:-2, 2:-2] - flux_north[2:-2, 1:-3])
/ (vs.dzt * vs.area_v[2:-2, 2:-2, npx.newaxis]),
)
tmp = vs.maskV / (vs.dzt * vs.area_v[:, :, npx.newaxis])
vs.dv_adv = vs.dv_adv - tmp * flux_top
vs.dv_adv = update_add(vs.dv_adv, at[:, :, 1:], tmp[:, :, 1:] * flux_top[:, :, :-1])
vs.du = update_add(vs.du, at[:, :, :, vs.tau], vs.du_adv)
vs.dv = update_add(vs.dv, at[:, :, :, vs.tau], vs.dv_adv)
return KernelOutput(du=vs.du, dv=vs.dv, du_adv=vs.du_adv, dv_adv=vs.dv_adv)
@veros_routine
def vertical_velocity(state):
vs = state.variables
vs.update(vertical_velocity_kernel(state))
@veros_kernel
def vertical_velocity_kernel(state):
"""
vertical velocity from continuity :
\\int_0^z w_z dz = w(z)-w(0) = - \\int dz (u_x + v_y)
w(z) = -int dz u_x + v_y
"""
vs = state.variables
fxa = allocate(state.dimensions, ("xt", "yt", "zw"))
# integrate from bottom to surface to see error in w
fxa = update(
fxa,
at[1:, 1:, 0],
-1
* vs.maskW[1:, 1:, 0]
* vs.dzt[0]
* (
(vs.u[1:, 1:, 0, vs.taup1] - vs.u[:-1, 1:, 0, vs.taup1])
/ (vs.cost[npx.newaxis, 1:] * vs.dxt[1:, npx.newaxis])
+ (
vs.cosu[npx.newaxis, 1:] * vs.v[1:, 1:, 0, vs.taup1]
- vs.cosu[npx.newaxis, :-1] * vs.v[1:, :-1, 0, vs.taup1]
)
/ (vs.cost[npx.newaxis, 1:] * vs.dyt[npx.newaxis, 1:])
),
)
fxa = update(
fxa,
at[1:, 1:, 1:],
-1
* vs.maskW[1:, 1:, 1:]
* vs.dzt[npx.newaxis, npx.newaxis, 1:]
* (
(vs.u[1:, 1:, 1:, vs.taup1] - vs.u[:-1, 1:, 1:, vs.taup1])
/ (vs.cost[npx.newaxis, 1:, npx.newaxis] * vs.dxt[1:, npx.newaxis, npx.newaxis])
+ (
vs.cosu[npx.newaxis, 1:, npx.newaxis] * vs.v[1:, 1:, 1:, vs.taup1]
- vs.cosu[npx.newaxis, :-1, npx.newaxis] * vs.v[1:, :-1, 1:, vs.taup1]
)
/ (vs.cost[npx.newaxis, 1:, npx.newaxis] * vs.dyt[npx.newaxis, 1:, npx.newaxis])
),
)
vs.w = update(vs.w, at[1:, 1:, :, vs.taup1], npx.cumsum(fxa[1:, 1:, :], axis=2))
return KernelOutput(w=vs.w)
@veros_routine
def momentum(state):
"""
solve for momentum for taup1
"""
vs = state.variables
"""
time tendency due to Coriolis force
"""
vs.update(tend_coriolisf(state))
"""
wind stress forcing
"""
vs.update(tend_tauxyf(state))
"""
advection
"""
vs.update(momentum_advection(state))
with state.timers["friction"]:
friction.friction(state)
"""
external mode
"""
with state.timers["pressure"]:
streamfunction.solve_streamfunction(state)
```
#### File: veros/veros/variables.py
```python
from veros import runtime_settings
class Variable:
def __init__(
self,
name,
dims,
units="",
long_description="",
dtype=None,
time_dependent=True,
scale=1.0,
write_to_restart=False,
extra_attributes=None,
mask=None,
active=True,
initial=None,
):
if dims is not None:
dims = tuple(dims)
self.name = name
self.dims = dims
self.units = units
self.long_description = long_description
self.dtype = dtype
self.time_dependent = time_dependent
self.scale = scale
self.write_to_restart = write_to_restart
self.active = active
self.initial = initial
self.get_mask = lambda vs: None
if mask is not None:
if not callable(mask):
raise TypeError("mask argument has to be callable")
self.get_mask = mask
elif dims is not None:
if dims[:3] in DEFAULT_MASKS:
self.get_mask = DEFAULT_MASKS[dims[:3]]
elif dims[:2] in DEFAULT_MASKS:
self.get_mask = DEFAULT_MASKS[dims[:2]]
#: Additional attributes to be written in netCDF output
self.extra_attributes = extra_attributes or {}
def __repr__(self):
attr_str = []
for v in vars(self):
attr_str.append(f"{v}={getattr(self, v)}")
attr_str = ", ".join(attr_str)
return f"{self.__class__.__qualname__}({attr_str})"
# fill value for netCDF output (invalid data is replaced by this value)
FILL_VALUE = -1e18
#
XT = ("xt",)
XU = ("xu",)
YT = ("yt",)
YU = ("yu",)
ZT = ("zt",)
ZW = ("zw",)
T_HOR = ("xt", "yt")
U_HOR = ("xu", "yt")
V_HOR = ("xt", "yu")
ZETA_HOR = ("xu", "yu")
T_GRID = ("xt", "yt", "zt")
U_GRID = ("xu", "yt", "zt")
V_GRID = ("xt", "yu", "zt")
W_GRID = ("xt", "yt", "zw")
ZETA_GRID = ("xu", "yu", "zt")
TIMESTEPS = ("timesteps",)
ISLE = ("isle",)
TENSOR_COMP = ("tensor1", "tensor2")
# those are written to netCDF output by default
BASE_DIMENSIONS = XT + XU + YT + YU + ZT + ZW + ISLE
GHOST_DIMENSIONS = ("xt", "yt", "xu", "yu")
# these are the settings that are getting used to determine shapes
DIM_TO_SHAPE_VAR = {
"xt": "nx",
"xu": "nx",
"yt": "ny",
"yu": "ny",
"zt": "nz",
"zw": "nz",
"timesteps": 3,
"tensor1": 2,
"tensor2": 2,
"isle": 0,
}
DEFAULT_MASKS = {
T_HOR: lambda vs: vs.maskT[:, :, -1],
U_HOR: lambda vs: vs.maskU[:, :, -1],
V_HOR: lambda vs: vs.maskV[:, :, -1],
ZETA_HOR: lambda vs: vs.maskZ[:, :, -1],
T_GRID: lambda vs: vs.maskT,
U_GRID: lambda vs: vs.maskU,
V_GRID: lambda vs: vs.maskV,
W_GRID: lambda vs: vs.maskW,
ZETA_GRID: lambda vs: vs.maskZ,
}
# custom mask for streamfunction
ZETA_HOR_ERODED = lambda vs: vs.maskZ[:, :, -1] | vs.boundary_mask.sum(axis=2) # noqa: E731
def get_shape(dimensions, grid, include_ghosts=True, local=True):
from veros.routines import CURRENT_CONTEXT
from veros.distributed import SCATTERED_DIMENSIONS
if grid is None:
return ()
px, py = runtime_settings.num_proc
grid_shapes = dict(dimensions)
if local and CURRENT_CONTEXT.is_dist_safe:
for pxi, dims in zip((px, py), SCATTERED_DIMENSIONS):
for dim in dims:
if dim not in grid_shapes:
continue
grid_shapes[dim] = grid_shapes[dim] // pxi
if include_ghosts:
for d in GHOST_DIMENSIONS:
if d in grid_shapes:
grid_shapes[d] += 4
shape = []
for grid_dim in grid:
if isinstance(grid_dim, int):
shape.append(grid_dim)
continue
if grid_dim not in grid_shapes:
raise ValueError(f"unrecognized dimension {grid_dim}")
shape.append(grid_shapes[grid_dim])
return tuple(shape)
def remove_ghosts(array, dims):
if dims is None:
# scalar
return array
ghost_mask = tuple(slice(2, -2) if dim in GHOST_DIMENSIONS else slice(None) for dim in dims)
return array[ghost_mask]
VARIABLES = {
# scalars
"tau": Variable(
"Index of current time step",
None,
"",
"Index of current time step",
dtype="int32",
initial=1,
write_to_restart=True,
),
"taup1": Variable(
"Index of next time step", None, "", "Index of next time step", dtype="int32", initial=2, write_to_restart=True
),
"taum1": Variable(
"Index of last time step", None, "", "Index of last time step", dtype="int32", initial=0, write_to_restart=True
),
"time": Variable(
"Current time",
None,
"",
"Current time",
write_to_restart=True,
),
"itt": Variable("Current iteration", None, "", "Current iteration", dtype="int32", initial=0),
# base variables
"dxt": Variable("Zonal T-grid spacing", XT, "m", "Zonal (x) spacing of T-grid point", time_dependent=False),
"dxu": Variable("Zonal U-grid spacing", XU, "m", "Zonal (x) spacing of U-grid point", time_dependent=False),
"dyt": Variable(
"Meridional T-grid spacing", YT, "m", "Meridional (y) spacing of T-grid point", time_dependent=False
),
"dyu": Variable(
"Meridional U-grid spacing", YU, "m", "Meridional (y) spacing of U-grid point", time_dependent=False
),
"zt": Variable(
"Vertical coordinate (T)",
ZT,
"m",
"Vertical coordinate",
time_dependent=False,
extra_attributes={"positive": "up"},
),
"zw": Variable(
"Vertical coordinate (W)",
ZW,
"m",
"Vertical coordinate",
time_dependent=False,
extra_attributes={"positive": "up"},
),
"dzt": Variable("Vertical spacing (T)", ZT, "m", "Vertical spacing", time_dependent=False),
"dzw": Variable("Vertical spacing (W)", ZW, "m", "Vertical spacing", time_dependent=False),
"cost": Variable("Metric factor (T)", YT, "1", "Metric factor for spherical coordinates", time_dependent=False),
"cosu": Variable("Metric factor (U)", YU, "1", "Metric factor for spherical coordinates", time_dependent=False),
"tantr": Variable("Metric factor", YT, "1", "Metric factor for spherical coordinates", time_dependent=False),
"coriolis_t": Variable(
"Coriolis frequency", T_HOR, "1/s", "Coriolis frequency at T grid point", time_dependent=False
),
"kbot": Variable(
"Index of deepest cell",
T_HOR,
"",
"Index of the deepest grid cell (counting from 1, 0 means all land)",
dtype="int32",
time_dependent=False,
),
"ht": Variable("Total depth (T)", T_HOR, "m", "Total depth of the water column", time_dependent=False),
"hu": Variable("Total depth (U)", U_HOR, "m", "Total depth of the water column", time_dependent=False),
"hv": Variable("Total depth (V)", V_HOR, "m", "Total depth of the water column", time_dependent=False),
"hur": Variable(
"Total depth (U), masked", U_HOR, "m", "Total depth of the water column (masked)", time_dependent=False
),
"hvr": Variable(
"Total depth (V), masked", V_HOR, "m", "Total depth of the water column (masked)", time_dependent=False
),
"beta": Variable(
"Change of Coriolis freq.", T_HOR, "1/(ms)", "Change of Coriolis frequency with latitude", time_dependent=False
),
"area_t": Variable("Area of T-box", T_HOR, "m^2", "Area of T-box", time_dependent=False),
"area_u": Variable("Area of U-box", U_HOR, "m^2", "Area of U-box", time_dependent=False),
"area_v": Variable("Area of V-box", V_HOR, "m^2", "Area of V-box", time_dependent=False),
"maskT": Variable(
"Mask for tracer points",
T_GRID,
"",
"Mask in physical space for tracer points",
time_dependent=False,
dtype="bool",
),
"maskU": Variable(
"Mask for U points",
U_GRID,
"",
"Mask in physical space for U points",
time_dependent=False,
dtype="bool",
),
"maskV": Variable(
"Mask for V points",
V_GRID,
"",
"Mask in physical space for V points",
time_dependent=False,
dtype="bool",
),
"maskW": Variable(
"Mask for W points",
W_GRID,
"",
"Mask in physical space for W points",
time_dependent=False,
dtype="bool",
),
"maskZ": Variable(
"Mask for Zeta points",
ZETA_GRID,
"",
"Mask in physical space for Zeta points",
time_dependent=False,
dtype="bool",
),
"rho": Variable(
"Density",
T_GRID + TIMESTEPS,
"kg/m^3",
"In-situ density anomaly, relative to the surface mean value of 1024 kg/m^3",
write_to_restart=True,
),
"prho": Variable(
"Potential density",
T_GRID,
"kg/m^3",
"Potential density anomaly, relative to the surface mean value of 1024 kg/m^3 "
"(identical to in-situ density anomaly for equation of state type 1, 2, and 4)",
),
"int_drhodT": Variable(
"Der. of dyn. enthalpy by temperature",
T_GRID + TIMESTEPS,
"kg / (m^2 deg C)",
"Partial derivative of dynamic enthalpy by temperature",
write_to_restart=True,
),
"int_drhodS": Variable(
"Der. of dyn. enthalpy by salinity",
T_GRID + TIMESTEPS,
"kg / (m^2 g / kg)",
"Partial derivative of dynamic enthalpy by salinity",
write_to_restart=True,
),
"Nsqr": Variable(
"Square of stability frequency",
W_GRID + TIMESTEPS,
"1/s^2",
"Square of stability frequency",
write_to_restart=True,
),
"Hd": Variable("Dynamic enthalpy", T_GRID + TIMESTEPS, "m^2/s^2", "Dynamic enthalpy", write_to_restart=True),
"dHd": Variable(
"Change of dyn. enth. by adv.",
T_GRID + TIMESTEPS,
"m^2/s^3",
"Change of dynamic enthalpy due to advection",
write_to_restart=True,
),
"temp": Variable("Temperature", T_GRID + TIMESTEPS, "deg C", "Conservative temperature", write_to_restart=True),
"dtemp": Variable(
"Temperature tendency",
T_GRID + TIMESTEPS,
"deg C/s",
"Conservative temperature tendency",
write_to_restart=True,
),
"salt": Variable("Salinity", T_GRID + TIMESTEPS, "g/kg", "Salinity", write_to_restart=True),
"dsalt": Variable("Salinity tendency", T_GRID + TIMESTEPS, "g/(kg s)", "Salinity tendency", write_to_restart=True),
"dtemp_vmix": Variable(
"Change of temp. by vertical mixing",
T_GRID,
"deg C/s",
"Change of temperature due to vertical mixing",
),
"dtemp_hmix": Variable(
"Change of temp. by horizontal mixing",
T_GRID,
"deg C/s",
"Change of temperature due to horizontal mixing",
),
"dsalt_vmix": Variable(
"Change of sal. by vertical mixing",
T_GRID,
"deg C/s",
"Change of salinity due to vertical mixing",
),
"dsalt_hmix": Variable(
"Change of sal. by horizontal mixing",
T_GRID,
"deg C/s",
"Change of salinity due to horizontal mixing",
),
"dtemp_iso": Variable(
"Change of temp. by isop. mixing",
T_GRID,
"deg C/s",
"Change of temperature due to isopycnal mixing plus skew mixing",
),
"dsalt_iso": Variable(
"Change of sal. by isop. mixing",
T_GRID,
"deg C/s",
"Change of salinity due to isopycnal mixing plus skew mixing",
),
"forc_temp_surface": Variable(
"Surface temperature flux",
T_HOR,
"m deg C/s",
"Surface temperature flux",
),
"forc_salt_surface": Variable(
"Surface salinity flux",
T_HOR,
"m g/s kg",
"Surface salinity flux",
),
"u": Variable("Zonal velocity", U_GRID + TIMESTEPS, "m/s", "Zonal velocity", write_to_restart=True),
"v": Variable("Meridional velocity", V_GRID + TIMESTEPS, "m/s", "Meridional velocity", write_to_restart=True),
"w": Variable("Vertical velocity", W_GRID + TIMESTEPS, "m/s", "Vertical velocity", write_to_restart=True),
"du": Variable(
"Zonal velocity tendency", U_GRID + TIMESTEPS, "m/s", "Zonal velocity tendency", write_to_restart=True
),
"dv": Variable(
"Meridional velocity tendency", V_GRID + TIMESTEPS, "m/s", "Meridional velocity tendency", write_to_restart=True
),
"du_cor": Variable("Change of u by Coriolis force", U_GRID, "m/s^2", "Change of u due to Coriolis force"),
"dv_cor": Variable("Change of v by Coriolis force", V_GRID, "m/s^2", "Change of v due to Coriolis force"),
"du_mix": Variable(
"Change of u by vertical mixing", U_GRID, "m/s^2", "Change of u due to implicit vertical mixing"
),
"dv_mix": Variable(
"Change of v by vertical mixing", V_GRID, "m/s^2", "Change of v due to implicit vertical mixing"
),
"du_adv": Variable("Change of u by advection", U_GRID, "m/s^2", "Change of u due to advection"),
"dv_adv": Variable("Change of v by advection", V_GRID, "m/s^2", "Change of v due to advection"),
"p_hydro": Variable("Hydrostatic pressure", T_GRID, "m^2/s^2", "Hydrostatic pressure"),
"kappaM": Variable("Vertical viscosity", T_GRID, "m^2/s", "Vertical viscosity"),
"kappaH": Variable("Vertical diffusivity", W_GRID, "m^2/s", "Vertical diffusivity"),
"surface_taux": Variable(
"Surface wind stress",
U_HOR,
"N/m^2",
"Zonal surface wind stress",
),
"surface_tauy": Variable(
"Surface wind stress",
V_HOR,
"N/m^2",
"Meridional surface wind stress",
),
"forc_rho_surface": Variable("Surface density flux", T_HOR, "kg / (m^2 s)", "Surface potential density flux"),
"psi": Variable(
"Streamfunction",
ZETA_HOR + TIMESTEPS,
"m^3/s",
"Barotropic streamfunction",
write_to_restart=True,
mask=ZETA_HOR_ERODED,
),
"dpsi": Variable(
"Streamfunction tendency", ZETA_HOR + TIMESTEPS, "m^3/s^2", "Streamfunction tendency", write_to_restart=True
),
"land_map": Variable("Land map", T_HOR, "", "Land map", dtype="int32"),
"isle": Variable("Island number", ISLE, "", "Island number"),
"psin": Variable(
"Boundary streamfunction",
ZETA_HOR + ISLE,
"m^3/s",
"Boundary streamfunction",
time_dependent=False,
mask=ZETA_HOR_ERODED,
),
"dpsin": Variable(
"Boundary streamfunction factor",
ISLE + TIMESTEPS,
"m^3/s^2",
"Boundary streamfunction factor",
write_to_restart=True,
),
"line_psin": Variable(
"Boundary line integrals", ISLE + ISLE, "m^4/s^2", "Boundary line integrals", time_dependent=False
),
"boundary_mask": Variable("Boundary mask", T_HOR + ISLE, "", "Boundary mask", time_dependent=False, dtype="bool"),
"line_dir_south_mask": Variable(
"Line integral mask", T_HOR + ISLE, "", "Line integral mask", time_dependent=False, dtype="bool"
),
"line_dir_north_mask": Variable(
"Line integral mask", T_HOR + ISLE, "", "Line integral mask", time_dependent=False, dtype="bool"
),
"line_dir_east_mask": Variable(
"Line integral mask", T_HOR + ISLE, "", "Line integral mask", time_dependent=False, dtype="bool"
),
"line_dir_west_mask": Variable(
"Line integral mask", T_HOR + ISLE, "", "Line integral mask", time_dependent=False, dtype="bool"
),
"K_gm": Variable("Skewness diffusivity", W_GRID, "m^2/s", "GM diffusivity, either constant or from EKE model"),
"K_iso": Variable("Isopycnal diffusivity", W_GRID, "m^2/s", "Along-isopycnal diffusivity"),
"K_diss_v": Variable(
"Dissipation of kinetic Energy",
W_GRID,
"m^2/s^3",
"Kinetic energy dissipation by vertical, rayleigh and bottom friction",
write_to_restart=True,
),
"K_diss_bot": Variable(
"Dissipation of kinetic Energy", W_GRID, "m^2/s^3", "Mean energy dissipation by bottom and rayleigh friction"
),
"K_diss_h": Variable(
"Dissipation of kinetic Energy", W_GRID, "m^2/s^3", "Kinetic energy dissipation by horizontal friction"
),
"K_diss_gm": Variable(
"Dissipation of mean energy",
W_GRID,
"m^2/s^3",
"Mean energy dissipation by GM (TRM formalism only)",
),
"P_diss_v": Variable(
"Dissipation of potential Energy", W_GRID, "m^2/s^3", "Potential energy dissipation by vertical diffusion"
),
"P_diss_nonlin": Variable(
"Dissipation of potential Energy",
W_GRID,
"m^2/s^3",
"Potential energy dissipation by nonlinear equation of state",
),
"P_diss_iso": Variable(
"Dissipation of potential Energy", W_GRID, "m^2/s^3", "Potential energy dissipation by isopycnal mixing"
),
"P_diss_skew": Variable(
"Dissipation of potential Energy", W_GRID, "m^2/s^3", "Potential energy dissipation by GM (w/o TRM)"
),
"P_diss_hmix": Variable(
"Dissipation of potential Energy", W_GRID, "m^2/s^3", "Potential energy dissipation by horizontal mixing"
),
"P_diss_adv": Variable(
"Dissipation of potential Energy", W_GRID, "m^2/s^3", "Potential energy dissipation by advection"
),
"P_diss_sources": Variable(
"Dissipation of potential Energy",
W_GRID,
"m^2/s^3",
"Potential energy dissipation by external sources (e.g. restoring zones)",
),
"u_wgrid": Variable("U on W grid", W_GRID, "m/s", "Zonal velocity interpolated to W grid points"),
"v_wgrid": Variable("V on W grid", W_GRID, "m/s", "Meridional velocity interpolated to W grid points"),
"w_wgrid": Variable("W on W grid", W_GRID, "m/s", "Vertical velocity interpolated to W grid points"),
"xt": Variable(
"Zonal coordinate (T)",
XT,
lambda settings: "degrees_east" if settings.coord_degree else "km",
"Zonal (x) coordinate of T-grid point",
time_dependent=False,
scale=lambda settings: 1 if settings.coord_degree else 1e-3,
),
"xu": Variable(
"Zonal coordinate (U)",
XU,
lambda settings: "degrees_east" if settings.coord_degree else "km",
"Zonal (x) coordinate of U-grid point",
time_dependent=False,
scale=lambda settings: 1 if settings.coord_degree else 1e-3,
),
"yt": Variable(
"Meridional coordinate (T)",
YT,
lambda settings: "degrees_north" if settings.coord_degree else "km",
"Meridional (y) coordinate of T-grid point",
time_dependent=False,
scale=lambda settings: 1 if settings.coord_degree else 1e-3,
),
"yu": Variable(
"Meridional coordinate (U)",
YU,
lambda settings: "degrees_north" if settings.coord_degree else "km",
"Meridional (y) coordinate of U-grid point",
time_dependent=False,
scale=lambda settings: 1 if settings.coord_degree else 1e-3,
),
"temp_source": Variable(
"Source of temperature",
T_GRID,
"K/s",
"Non-conservative source of temperature",
active=lambda settings: settings.enable_tempsalt_sources,
),
"salt_source": Variable(
"Source of salt",
T_GRID,
"g/(kg s)",
"Non-conservative source of salt",
active=lambda settings: settings.enable_tempsalt_sources,
),
"u_source": Variable(
"Source of zonal velocity",
U_GRID,
"m/s^2",
"Non-conservative source of zonal velocity",
active=lambda settings: settings.enable_momentum_sources,
),
"v_source": Variable(
"Source of meridional velocity",
V_GRID,
"m/s^2",
"Non-conservative source of meridional velocity",
active=lambda settings: settings.enable_momentum_sources,
),
"K_11": Variable(
"Isopycnal mixing coefficient",
T_GRID,
"m^2/s",
"Isopycnal mixing tensor component",
active=lambda settings: settings.enable_neutral_diffusion,
),
"K_22": Variable(
"Isopycnal mixing coefficient",
T_GRID,
"m^2/s",
"Isopycnal mixing tensor component",
active=lambda settings: settings.enable_neutral_diffusion,
),
"K_33": Variable(
"Isopycnal mixing coefficient",
T_GRID,
"m^2/s",
"Isopycnal mixing tensor component",
active=lambda settings: settings.enable_neutral_diffusion,
),
"Ai_ez": Variable(
"Isopycnal diffusion coefficient",
T_GRID + TENSOR_COMP,
"Vertical isopycnal diffusion coefficient on eastern face of T cell",
"1",
active=lambda settings: settings.enable_neutral_diffusion,
),
"Ai_nz": Variable(
"Isopycnal diffusion coefficient",
T_GRID + TENSOR_COMP,
"Vertical isopycnal diffusion coefficient on northern face of T cell",
"1",
active=lambda settings: settings.enable_neutral_diffusion,
),
"Ai_bx": Variable(
"Isopycnal diffusion coefficient",
T_GRID + TENSOR_COMP,
"Zonal isopycnal diffusion coefficient on bottom face of T cell",
"1",
active=lambda settings: settings.enable_neutral_diffusion,
),
"Ai_by": Variable(
"Isopycnal diffusion coefficient",
T_GRID + TENSOR_COMP,
"Meridional isopycnal diffusion coefficient on bottom face of T cell",
"1",
active=lambda settings: settings.enable_neutral_diffusion,
),
"B1_gm": Variable(
"Zonal component of GM streamfunction",
V_GRID,
"m^2/s",
"Zonal component of GM streamfunction",
active=lambda settings: settings.enable_skew_diffusion,
),
"B2_gm": Variable(
"Meridional component of GM streamfunction",
U_GRID,
"m^2/s",
"Meridional component of GM streamfunction",
active=lambda settings: settings.enable_skew_diffusion,
),
"r_bot_var_u": Variable(
"Bottom friction coeff.",
U_HOR,
"1/s",
"Zonal bottom friction coefficient",
active=lambda settings: settings.enable_bottom_friction_var,
),
"r_bot_var_v": Variable(
"Bottom friction coeff.",
V_HOR,
"1/s",
"Meridional bottom friction coefficient",
active=lambda settings: settings.enable_bottom_friction_var,
),
"kappa_gm": Variable(
"Vertical diffusivity",
W_GRID,
"m^2/s",
"Vertical diffusivity",
active=lambda settings: settings.enable_TEM_friction,
),
"tke": Variable(
"Turbulent kinetic energy",
W_GRID + TIMESTEPS,
"m^2/s^2",
"Turbulent kinetic energy",
write_to_restart=True,
active=lambda settings: settings.enable_tke,
),
"sqrttke": Variable(
"Square-root of TKE",
W_GRID,
"m/s",
"Square-root of TKE",
active=lambda settings: settings.enable_tke,
),
"dtke": Variable(
"Turbulent kinetic energy tendency",
W_GRID + TIMESTEPS,
"m^2/s^3",
"Turbulent kinetic energy tendency",
write_to_restart=True,
active=lambda settings: settings.enable_tke,
),
"Prandtlnumber": Variable(
"Prandtl number",
W_GRID,
"",
"Prandtl number",
active=lambda settings: settings.enable_tke,
),
"mxl": Variable(
"Mixing length",
W_GRID,
"m",
"Mixing length",
active=lambda settings: settings.enable_tke,
),
"forc_tke_surface": Variable(
"TKE surface flux",
T_HOR,
"m^3/s^3",
"TKE surface flux",
active=lambda settings: settings.enable_tke,
),
"tke_diss": Variable(
"TKE dissipation",
W_GRID,
"m^2/s^3",
"TKE dissipation",
active=lambda settings: settings.enable_tke,
),
"tke_surf_corr": Variable(
"Correction of TKE surface flux",
T_HOR,
"m^3/s^3",
"Correction of TKE surface flux",
active=lambda settings: settings.enable_tke,
),
"eke": Variable(
"meso-scale energy",
W_GRID + TIMESTEPS,
"m^2/s^2",
"meso-scale energy",
write_to_restart=True,
active=lambda settings: settings.enable_eke,
),
"deke": Variable(
"meso-scale energy tendency",
W_GRID + TIMESTEPS,
"m^2/s^3",
"meso-scale energy tendency",
write_to_restart=True,
active=lambda settings: settings.enable_eke,
),
"sqrteke": Variable(
"square-root of eke",
W_GRID,
"m/s",
"square-root of eke",
active=lambda settings: settings.enable_eke,
),
"L_rossby": Variable(
"Rossby radius",
T_HOR,
"m",
"Rossby radius",
active=lambda settings: settings.enable_eke,
),
"L_rhines": Variable(
"Rhines scale",
W_GRID,
"m",
"Rhines scale",
active=lambda settings: settings.enable_eke,
),
"eke_len": Variable(
"Eddy length scale",
T_GRID,
"m",
"Eddy length scale",
active=lambda settings: settings.enable_eke,
),
"eke_diss_iw": Variable(
"Dissipation of EKE to IW",
W_GRID,
"m^2/s^3",
"Dissipation of EKE to internal waves",
active=lambda settings: settings.enable_eke,
),
"eke_diss_tke": Variable(
"Dissipation of EKE to TKE",
W_GRID,
"m^2/s^3",
"Dissipation of EKE to TKE",
active=lambda settings: settings.enable_eke,
),
"E_iw": Variable(
"Internal wave energy",
W_GRID + TIMESTEPS,
"m^2/s^2",
"Internal wave energy",
write_to_restart=True,
active=lambda settings: settings.enable_idemix,
),
"dE_iw": Variable(
"Internal wave energy tendency",
W_GRID + TIMESTEPS,
"m^2/s^2",
"Internal wave energy tendency",
write_to_restart=True,
active=lambda settings: settings.enable_idemix,
),
"c0": Variable(
"Vertical IW group velocity",
W_GRID,
"m/s",
"Vertical internal wave group velocity",
active=lambda settings: settings.enable_idemix,
),
"v0": Variable(
"Horizontal IW group velocity",
W_GRID,
"m/s",
"Horizontal internal wave group velocity",
active=lambda settings: settings.enable_idemix,
),
"alpha_c": Variable(
"?",
W_GRID,
"?",
"?",
active=lambda settings: settings.enable_idemix,
),
"iw_diss": Variable(
"IW dissipation",
W_GRID,
"m^2/s^3",
"Internal wave dissipation",
active=lambda settings: settings.enable_idemix,
),
"forc_iw_surface": Variable(
"IW surface forcing",
T_HOR,
"m^3/s^3",
"Internal wave surface forcing",
time_dependent=False,
active=lambda settings: settings.enable_idemix,
),
"forc_iw_bottom": Variable(
"IW bottom forcing",
T_HOR,
"m^3/s^3",
"Internal wave bottom forcing",
time_dependent=False,
active=lambda settings: settings.enable_idemix,
),
}
def manifest_metadata(var_meta, settings):
"""Evaluate callable metadata fields given the current settings."""
from copy import copy
out = {}
for var_name, var_val in var_meta.items():
var_val = copy(var_val)
for attr, attr_val in vars(var_val).items():
if callable(attr_val) and attr != "get_mask":
setattr(var_val, attr, attr_val(settings))
out[var_name] = var_val
return out
def allocate(dimensions, grid, dtype=None, include_ghosts=True, local=True, fill=0):
from veros.core.operators import numpy as npx
if dtype is None:
dtype = runtime_settings.float_type
shape = get_shape(dimensions, grid, include_ghosts=include_ghosts, local=local)
out = npx.full(shape, fill, dtype=dtype)
if runtime_settings.backend == "numpy":
out.flags.writeable = False
return out
``` |
{
"source": "jklynch/bad_seed",
"score": 2
} |
#### File: SULI/src/tensorForceEnv.py
```python
import numpy as np
import gym
from gym import spaces
from random import random
from tensorforce.environments import Environment
from tensorforce.agents import Agent
from tensorforce.execution import Runner
from heapq import nlargest
def stdDeviaiton(array):
cleanedUp = np.array([])
for elem in array:
if elem != 0:
cleanedUp = np.append(cleanedUp, elem)
return np.std(cleanedUp)
class CustomEnvironment(Environment):
# LEFT = 0
# RIGHT = 1
sum = 0
extraCounter = 3
firstCount = 0
secondCount = 0
thirdCount = 0
# SAMPLES = 5
# TRIALS = 10
# GRID = []
# def __init__(self):
def __init__(self, grid_size=10):
super().__init__()
self.startingPoint = 3
# Size of the 1D-grid
self.grid_size = grid_size
# Initialize the agent at the right of the grid
self.agent_pos = self.startingPoint
self._max_episode_timesteps = 500
self.TRIALS = 100
self.SAMPLES = 5
self.GRID = []
self.minSampling = {}
self.stdDev = {}
# self.stdDevSim = {}
self.sum = 0
self.reward = 0
# self.extraCounter = self.startingPoint
# self.simulation = [[0, 0, 0, 0, 0, 0, 7, 2, 0, 0], [0, 3, 0, 0, 0, 3, 0, 0, 0, 0],[0, 0, 2, 9, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 1, 0, 0, 1, 0, 0],[0, 0, 0, 0, 0, 0, 1, 0, 8, 0]]
for i in range(self.SAMPLES):
col = []
for j in range(self.TRIALS):
if j < self.startingPoint:
col.append(random())
else:
col.append(0)
self.GRID.append(col)
for i in range(self.SAMPLES):
self.minSampling[i] = 0
for i in range(self.SAMPLES):
self.stdDev[i] = self.startingPoint
# for i in range(self.SAMPLES):
# self.stdDevSim[i] = 0
# for i in range(self.SAMPLES):
# print(i)
# print(self.simulation[i])
# print(stdDeviaiton(array=[0, 0, 0, 0, 0, 0, 1, 0, 8, 0]))
# self.stdDevSim[i] = stdDeviaiton(array=self.simulation[i])
# print(self.stdDevSim)
# print(nlargest(3, self.stdDevSim, key=self.stdDevSim.get))
# Define action and observation space
# They must be gym.spaces objects
# Example when using discrete actions, we have two: left and right
n_actions = self.SAMPLES
self.action_space = spaces.Discrete(n_actions)
# The observation will be the coordinate of the agent
# this can be described both by Discrete and Box space
self.observation_space = spaces.Box(low=0, high=self.SAMPLES,
shape=(self.SAMPLES, self.TRIALS), dtype=np.float32)
def states(self):
return dict(type='float', shape=(1))
def actions(self):
return dict(type='int', num_values=self.SAMPLES)
# Optional, should only be defined if environment has a natural maximum
# episode length
def max_episode_timesteps(self):
return super().max_episode_timesteps()
#
# # Optional
# def close(self):
# pass
def reset(self):
# self.extraCounter = self.startingPoint
CustomEnvironment.extraCounter = self.startingPoint
self.reward = 0
self.agent_pos = self.startingPoint
for i in range(self.SAMPLES):
for j in range(self.TRIALS):
if j < self.startingPoint:
self.GRID[i][j] = random()
else:
self.GRID[i][j] = 0
for i in range(self.SAMPLES):
self.minSampling[i] = 0
# here we convert to float32 to make it more general (in case we want to use continuous actions)
return np.array([self.agent_pos]).astype(np.float32)
def execute(self, actions):
# self.extraCounter += 1
CustomEnvironment.extraCounter += 1
maxStdDev = []
reward = 0
if (actions >= 0 and actions < self.SAMPLES):
for i in range(self.SAMPLES):
self.stdDev[i] = stdDeviaiton(array=self.GRID[i])
maxStdDev = nlargest(3, self.stdDev, key=self.stdDev.get)
print(actions, maxStdDev)
if actions == maxStdDev[0]:
CustomEnvironment.firstCount += 1
self.reward += 1
if actions == maxStdDev[1]:
CustomEnvironment.secondCount += 1
self.reward += 1
if actions == maxStdDev[2]:
CustomEnvironment.thirdCount += 1
self.reward += 1
# print(maxStdDev, actions)
# if self.agent_pos <= self.TRIALS:
self.GRID[actions][self.agent_pos] = random()
self.minSampling[actions] += 1
self.agent_pos += 1
print(self.reward)
else:
raise ValueError("Received invalid action={} which is not part of the action space".format(actions))
# Account for the boundaries of the grid
self.agent_pos = np.clip(self.agent_pos, 0, self.TRIALS)
# Are we at the right of the grid?
done = bool(self.agent_pos == self.TRIALS)
if done:
reward = self.reward
# reward += 1
self.sum += 1
if self.sum > 0:
CustomEnvironment.sum += reward
print(self.minSampling)
returning = np.array([self.agent_pos]).astype(np.float32), reward, done
return returning
def runEnv():
environment = Environment.create(
environment=CustomEnvironment, max_episode_timesteps=500
)
agent = Agent.create(agent='a2c', environment=environment, batch_size=10, learning_rate=1e-3)
# Train for 200 episodes
# for _ in range(2):
# states = environment.reset()
# terminal = False
# while CustomEnvironment.extraCounter != 100:
# actions = agent.act(states=states)
# # print(actions)
# # print(states)
# states, terminal, reward = environment.execute(actions=actions)
# agent.observe(terminal=terminal, reward=reward)
# Evaluate for 100 episodes
sum_rewards = 0.0
for _ in range(1):
states = environment.reset()
internals = agent.initial_internals()
terminal = False
while CustomEnvironment.extraCounter != 100:
actions, internals = agent.act(states=states, internals=internals, independent=True)
states, terminal, reward = environment.execute(actions=actions)
sum_rewards += reward
# print('Mean episode reward:', sum_rewards / 100)
print(CustomEnvironment.firstCount)
print(CustomEnvironment.secondCount)
print(CustomEnvironment.thirdCount)
print(CustomEnvironment.sum)
# Close agent and environment
agent.close()
environment.close()
if __name__ == "__main__":
runEnv()
``` |
{
"source": "jklynch/bluesky-kafka",
"score": 2
} |
#### File: bluesky_kafka/tests/conftest.py
```python
import os
import tempfile
from contextlib import contextmanager
import intake
import numpy as np
import pytest
import yaml
from bluesky.tests.conftest import RE # noqa
from ophyd.tests.conftest import hw # noqa
from bluesky_kafka import BlueskyConsumer, Publisher
from bluesky_kafka.utils import create_topics, delete_topics
TEST_TOPIC = "bluesky-kafka-test"
TEST_TOPIC2 = "bluesky2-kafka-test"
def pytest_addoption(parser):
"""
Add `--kafka-bootstrap-servers` to the pytest command line parser.
"""
parser.addoption(
"--kafka-bootstrap-servers",
action="store",
default="127.0.0.1:9092",
help="comma-separated list of address:port for Kafka bootstrap servers",
)
@pytest.fixture(scope="function")
def kafka_bootstrap_servers(request):
"""
Return a comma-delimited string of Kafka bootstrap server host:port specified
on the pytest command line with option --kafka-bootstrap-servers.
Parameters
----------
request : pytest request fixture
Returns
-------
comma-delimited string of Kafka bootstrap server host:port
"""
return request.config.getoption("--kafka-bootstrap-servers")
@pytest.fixture(scope="function")
def broker_authorization_config():
return {
# "security.protocol": "SASL_PLAINTEXT",
# "sasl.mechanisms": "PLAIN",
# "sasl.username": "user",
# "sasl.password": "password",
}
@pytest.fixture(scope="function")
def temporary_topics(kafka_bootstrap_servers, broker_authorization_config):
"""
Use this "factory as a fixture and context manager" to cleanly
create new topics and delete them after a test.
If `bootstrap_servers` is not specified to the factory function
then the `kafka_bootstrap_servers` fixture will be used.
Parameters
----------
kafka_bootstrap_servers : pytest fixture
comma-delimited str of Kafka bootstrap server host:port specified on the pytest command line
broker_authorization_config: dict
Kafka broker authentication parameters for the test broker
"""
@contextmanager
def _temporary_topics(topics, bootstrap_servers=None, admin_client_config=None):
if bootstrap_servers is None:
bootstrap_servers = kafka_bootstrap_servers
if admin_client_config is None:
admin_client_config = broker_authorization_config
try:
# delete existing requested topics
# this will delete any un-consumed messages
# the intention is to make tests repeatable by ensuring
# they always start with a topics having no "old" messages
delete_topics(
bootstrap_servers=bootstrap_servers,
topics_to_delete=topics,
admin_client_config=admin_client_config,
)
create_topics(
bootstrap_servers=bootstrap_servers,
topics_to_create=topics,
admin_client_config=admin_client_config,
)
yield topics
finally:
delete_topics(
bootstrap_servers=bootstrap_servers,
topics_to_delete=topics,
admin_client_config=admin_client_config,
)
return _temporary_topics
@pytest.fixture(scope="function")
def publisher_factory(kafka_bootstrap_servers, broker_authorization_config):
"""
Use this "factory as a fixture" to create one or more Publishers in a test function.
If `bootstrap_servers` is not specified to the factory function then the `kafka_bootstrap_servers`
fixture will be used. The `serializer` parameter can be passed through **kwargs of the factory function.
For example:
def test_something(publisher_factory):
publisher_abc = publisher_factory(topic="abc")
publisher_xyz = publisher_factory(topic="xyz", serializer=pickle.dumps)
...
Parameters
----------
kafka_bootstrap_servers : pytest fixture
comma-delimited str of Kafka bootstrap server host:port specified on the pytest command line
broker_authorization_config: dict
Kafka broker authentication parameters for the test broker
Returns
-------
_publisher_factory : function(topic, key, producer_config, flush_on_stop_doc, **kwargs)
a factory function returning bluesky_kafka.Publisher instances constructed with the
specified arguments
"""
def _publisher_factory(
topic,
bootstrap_servers=None,
key=None,
producer_config=None,
**kwargs,
):
"""
Parameters
----------
topic : str
Topic to which all messages will be published.
bootstrap_servers: str
Comma-delimited list of Kafka server addresses as a string such as ``'127.0.0.1:9092'``;
default is the value of the pytest command line parameter --kafka-bootstrap-servers
key : str
Kafka "key" string. Specify a key to maintain message order. If None is specified
no ordering will be imposed on messages.
producer_config : dict, optional
Dictionary configuration information used to construct the underlying Kafka Producer.
**kwargs
**kwargs will be passed to bluesky_kafka.Publisher() and may include on_delivery,
flush_on_stop_doc, and serializer
Returns
-------
publisher : bluesky_kafka.Publisher
a Publisher instance constructed with the specified arguments
"""
if bootstrap_servers is None:
bootstrap_servers = kafka_bootstrap_servers
if producer_config is None:
# this default configuration is not guaranteed
# to be generally appropriate
producer_config = {
"acks": 1,
"enable.idempotence": False,
"request.timeout.ms": 1000,
}
producer_config.update(broker_authorization_config)
return Publisher(
topic=topic,
key=key,
bootstrap_servers=bootstrap_servers,
producer_config=producer_config,
**kwargs,
)
return _publisher_factory
@pytest.fixture(scope="function")
def consume_documents_from_kafka_until_first_stop_document(
kafka_bootstrap_servers, broker_authorization_config
):
"""Use this fixture to consume Kafka messages containing bluesky (name, document) tuples.
This fixture will construct a BlueskyConsumer and run its polling loop. When the first
stop document is encountered the consumer polling loop will terminate so the test function
can continue.
Parameters
----------
kafka_bootstrap_servers : pytest fixture
comma-delimited str of Kafka bootstrap server host:port specified on the pytest command line
broker_authorization_config: dict
Kafka broker authentication parameters for the test broker
Returns
-------
_consume_documents_from_kafka: function(topic, bootstrap_servers=None, **bluesky_consumer_kwargs) -> List[(name, document)]
calling this function will consume Kafka messages and place the (name, document)
tuples into a list; when the first stop document is encountered the consumer
polling loop will terminate and the document list will be returned
"""
def _consume_documents_from_kafka(
kafka_topic,
bootstrap_servers=None,
consumer_config=None,
**bluesky_consumer_kwargs,
):
"""
Parameters
----------
kafka_topic: str
Kafka messages with this topic will be consumed
bootstrap_servers: str, optional
Comma-delimited list of Kafka server addresses as a string such as ``'127.0.0.1:9092'``;
default is the value of the pytest command line parameter --kafka-bootstrap-servers
consumer_config: dict, optional
Dictionary of Kafka consumer configuration parameters
bluesky_consumer_kwargs:
Allows polling_duration and deserializer to be passed the the BlueskyConsumer
Returns
-------
consumed_bluesky_documents: list
list of (name, document) tuples delivered by Kafka
"""
if bootstrap_servers is None:
bootstrap_servers = kafka_bootstrap_servers
if consumer_config is None:
consumer_config = {
# this consumer is intended to read messages that
# have already been published, so it is necessary
# to specify "earliest" here
"auto.offset.reset": "earliest",
}
consumer_config.update(broker_authorization_config)
consumed_bluesky_documents = []
def store_consumed_document(consumer, topic, name, document):
"""This function appends to a list all documents received by the consumer.
Parameters
----------
consumer: bluesky_kafka.BlueskyConsumer
unused
topic: str
unused
name: str
bluesky document name, such as "start", "descriptor", "event", etc
document: dict
dictionary of bluesky document data
"""
consumed_bluesky_documents.append((name, document))
bluesky_consumer = BlueskyConsumer(
topics=[kafka_topic],
bootstrap_servers=bootstrap_servers,
group_id=f"{kafka_topic}.consumer.group",
consumer_config=consumer_config,
process_document=store_consumed_document,
**bluesky_consumer_kwargs,
)
def until_first_stop_document():
"""
This function returns False to end the BlueskyConsumer polling loop after seeing
a "stop" document. Without something like this the polling loop will never end.
"""
if "stop" in [name for name, _ in consumed_bluesky_documents]:
return False
else:
return True
# start() will return when 'until_first_stop_document' returns False
bluesky_consumer.start(
continue_polling=until_first_stop_document,
)
return consumed_bluesky_documents
return _consume_documents_from_kafka
@pytest.fixture(scope="function")
def publisher(request, kafka_bootstrap_servers, broker_authorization_config):
# work with a single broker
producer_config = {
"acks": 1,
"enable.idempotence": False,
"request.timeout.ms": 5000,
}
producer_config.update(broker_authorization_config)
return Publisher(
topic=TEST_TOPIC,
bootstrap_servers=kafka_bootstrap_servers,
key="kafka-unit-test-key",
producer_config=producer_config,
flush_on_stop_doc=True,
)
@pytest.fixture(scope="function")
def publisher2(request, kafka_bootstrap_servers, broker_authorization_config):
# work with a single broker
producer_config = {
"acks": 1,
"enable.idempotence": False,
"request.timeout.ms": 5000,
}
producer_config.update(broker_authorization_config)
return Publisher(
topic=TEST_TOPIC2,
bootstrap_servers=kafka_bootstrap_servers,
key="kafka-unit-test-key",
# work with a single broker
producer_config=producer_config,
flush_on_stop_doc=True,
)
@pytest.fixture(scope="function")
def mongo_client(request):
mongobox = pytest.importorskip("mongobox")
box = mongobox.MongoBox()
box.start()
return box.client()
@pytest.fixture(scope="function")
def mongo_uri(request, mongo_client):
return f"mongodb://{mongo_client.address[0]}:{mongo_client.address[1]}"
@pytest.fixture(scope="function")
def numpy_md(request):
return {
"numpy_data": {"nested": np.array([1, 2, 3])},
"numpy_scalar": np.float64(3),
"numpy_array": np.ones((3, 3)),
}
@pytest.fixture(scope="function")
def data_broker(request, mongo_uri):
TMP_DIR = tempfile.mkdtemp()
YAML_FILENAME = "intake_test_catalog.yml"
fullname = os.path.join(TMP_DIR, YAML_FILENAME)
# Write a catalog file.
with open(fullname, "w") as f:
f.write(
f"""
sources:
xyz:
description: Some imaginary beamline
driver: "bluesky-mongo-normalized-catalog"
container: catalog
args:
metadatastore_db: {mongo_uri}/{TEST_TOPIC}
asset_registry_db: {mongo_uri}/{TEST_TOPIC}
handler_registry:
NPY_SEQ: ophyd.sim.NumpySeqHandler
metadata:
beamline: "00-ID"
xyz2:
description: Some imaginary beamline
driver: "bluesky-mongo-normalized-catalog"
container: catalog
args:
metadatastore_db: {mongo_uri}/{TEST_TOPIC2}
asset_registry_db: {mongo_uri}/{TEST_TOPIC2}
handler_registry:
NPY_SEQ: ophyd.sim.NumpySeqHandler
metadata:
beamline: "00-ID"
"""
)
def load_config(filename):
package_directory = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(package_directory, filename)
with open(filename) as f:
return yaml.load(f, Loader=getattr(yaml, "FullLoader", yaml.Loader))
# Create a databroker with the catalog config file.
return intake.open_catalog(fullname)
``` |
{
"source": "jklynch/bluesky-mpl",
"score": 3
} |
#### File: bluesky_mpl/artists/grid.py
```python
from event_model import DocumentRouter
import matplotlib.pyplot as plt
import numpy
class Grid(DocumentRouter):
"""
Draw a matplotlib AxesImage Arist update it for each Event.
The purposes of this callback is to create (on initialization) of a
matplotlib grid image and then update it with new data for every `event`.
NOTE: Some important parameters are fed in through **kwargs like `extent`
which defines the axes min and max and `origin` which defines if the grid
co-ordinates start in the bottom left or top left of the plot. For more
info see https://matplotlib.org/tutorials/intermediate/imshow_extent.html
or https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.imshow.html#matplotlib.axes.Axes.imshow
Parameters
----------
func : callable
This must accept a BulkEvent and return three lists of floats (x
grid co-ordinates, y grid co-ordinates and grid position intensity
values). The three lists must contain an equal number of items, but
that number is arbitrary. That is, a given document may add one new
point, no new points or multiple new points to the plot.
shape : tuple
The (row, col) shape of the grid.
ax : matplotlib Axes, optional.
if ``None``, a new Figure and Axes are created.
**kwargs
Passed through to :meth:`Axes.imshow` to style the AxesImage object.
"""
def __init__(self, func, shape, *, ax=None, **kwargs):
self.func = func
self.shape = shape
if ax is None:
_, ax = plt.subplots()
self.ax = ax
self.grid_data = numpy.full(self.shape, numpy.nan)
self.image, = ax.imshow(self.grid_data, **kwargs)
def event_page(self, doc):
'''
Takes in a bulk_events document and updates grid_data with the values
returned from self.func(doc)
Parameters
----------
doc : dict
The bulk event dictionary that contains the 'data' and 'timestamps'
associated with the bulk event.
Returns
-------
x_coords, y_coords, I_vals : Lists
These are lists of x co-ordinate, y co-ordinate and intensity
values arising from the bulk event.
'''
x_coords, y_coords, I_vals = self.func(doc)
self._update(x_coords, y_coords, I_vals)
def _update(self, x_coords, y_coords, I_vals):
'''
Updates self.grid_data with the values from the lists x_coords,
y_coords, I_vals.
Parameters
----------
x_coords, y_coords, I_vals : Lists
These are lists of x co-ordinate, y co-ordinate and intensity
values arising from the event. The length of all three lists must
be the same.
'''
if not len(x_coords) == len(y_coords) == len(I_vals):
raise ValueError("User function is expected to provide the same "
"number of x, y and I points. Got {0} x points, "
"{1} y points and {2} I values."
"".format(len(x_coords), len(y_coords),
len(I_vals)))
if not x_coords:
# No new data, Short-circuit.
return
# Update grid_data and the plot.
self.grid_data[x_coords, y_coords] = I_vals
self.image.set_array(self.grid_data)
```
#### File: bluesky_mpl/artists/image.py
```python
import logging
from event_model import DocumentRouter
import numpy
log = logging.getLogger(__name__)
class Image(DocumentRouter):
"""
Draw a matplotlib Image Arist update it for each Event.
Parameters
----------
func : callable
This must accept an EventPage and return two lists of floats
(x points and y points). The two lists must contain an equal number of
items, but that number is arbitrary. That is, a given document may add
one new point to the plot, no new points, or multiple new points.
label_template : string
This string will be formatted with the RunStart document. Any missing
values will be filled with '?'. If the keyword argument 'label' is
given, this argument will be ignored.
ax : matplotlib Axes, optional
If None, a new Figure and Axes are created.
**kwargs
Passed through to :meth:`Axes.plot` to style Line object.
"""
def __init__(self, func, shape, *, label_template='{scan_id} [{uid:.8}]', ax=None, **kwargs):
self.func = func
if ax is None:
import matplotlib.pyplot as plt
_, ax = plt.subplots()
self.ax = ax
if len(self.ax.images) == 1:
self.image, = self.ax.images
elif len(self.ax.images) == 0:
self.image = ax.imshow(numpy.zeros(shape), **kwargs)
self.ax.figure.colorbar(self.image, ax=self.ax)
self.label_template = label_template
else:
raise ValueError(f"Expected ax to be an axis with no image "
f"artists or one image artist. Found "
f"ax.images={self.ax.images}")
def event_page(self, doc):
data = self.func(doc)
if data is not None:
self._update(data)
def _update(self, arr):
"""
Takes in new array data and redraws plot if they are not empty.
"""
if arr.ndim != 2:
raise ValueError(
f'The number of dimensions must be 2, but received array '
f'has {arr.ndim} number of dimensions.')
self.image.set_array(arr)
new_clim = self.infer_clim(self.image.get_clim(), arr)
self.image.set_clim(*new_clim)
self.ax.figure.canvas.draw_idle()
def infer_clim(self, current_clim, arr):
return (min(current_clim[0], arr.min()), max(current_clim[1], arr.max()))
```
#### File: bluesky-mpl/bluesky_mpl/demo.py
```python
import logging
from multiprocessing import Process, Queue
from pathlib import Path
from suitcase.jsonl import Serializer
from bluesky import RunEngine
from ophyd.sim import det, det4, noisy_det, motor, motor1, motor2, img
from bluesky.plans import scan, count, grid_scan
from bluesky.preprocessors import SupplementalData
from event_model import RunRouter
from ophyd.sim import SynSignal
import numpy as np
det.kind = 'hinted'
noisy_det.kind = 'hinted'
det4.kind = 'hinted'
log = logging.getLogger('bluesky_mpl')
random_img = SynSignal(func=lambda: np.random.random((5, 10, 10)), name='random_img')
def generate_example_catalog(data_path):
data_path = Path(data_path)
def factory(name, doc):
serializer = Serializer(data_path / 'abc')
serializer('start', doc)
return [serializer], []
RE = RunEngine()
sd = SupplementalData()
RE.preprocessors.append(sd)
sd.baseline.extend([motor1, motor2])
rr = RunRouter([factory])
RE.subscribe(rr)
RE(count([det]))
RE(count([noisy_det], 5))
RE(scan([det], motor, -1, 1, 7))
RE(grid_scan([det4], motor1, -1, 1, 4, motor2, -1, 1, 7, False))
RE(scan([det], motor, -1, 1, motor2, -1, 1, 5))
RE(count([noisy_det, det], 5))
RE(count([random_img], 5))
RE(count([img], 5))
def factory(name, doc):
serializer = Serializer(data_path / 'xyz')
serializer('start', doc)
return [serializer], []
RE = RunEngine()
rr = RunRouter([factory])
RE.subscribe(rr)
RE(count([det], 3))
catalog_filepath = data_path / 'catalog.yml'
with open(catalog_filepath, 'w') as file:
file.write(f'''
sources:
abc:
description: Some imaginary beamline
driver: bluesky-jsonl-catalog
container: catalog
args:
paths: {Path(data_path) / 'abc' / '*.jsonl'}
handler_registry:
NPY_SEQ: ophyd.sim.NumpySeqHandler
metadata:
beamline: "00-ID"
xyz:
description: Some imaginary beamline
driver: bluesky-jsonl-catalog
container: catalog
args:
paths: {Path(data_path) / 'xyz' / '*.jsonl'}
handler_registry:
NPY_SEQ: ophyd.sim.NumpySeqHandler
metadata:
beamline: "99-ID"
''')
return str(catalog_filepath)
def run_proxy(queue):
"""
Run Proxy on random, free ports and communicate the port numbers back.
"""
from bluesky.callbacks.zmq import Proxy
proxy = Proxy()
queue.put((proxy.in_port, proxy.out_port))
proxy.start()
def run_publisher(in_port, data_path):
"""
Acquire data in an infinite loop and publish it.
"""
import asyncio
from bluesky.callbacks.zmq import Publisher
from suitcase.jsonl import Serializer
from ophyd.sim import noisy_det, motor1, motor2
from bluesky.plans import count
from bluesky.preprocessors import SupplementalData
from bluesky.plan_stubs import sleep
publisher = Publisher(f'localhost:{in_port}')
RE = RunEngine(loop=asyncio.new_event_loop())
sd = SupplementalData()
RE.preprocessors.append(sd)
sd.baseline.extend([motor1, motor2])
RE.subscribe(publisher)
def factory(name, doc):
serializer = Serializer(data_path / 'abc', flush=True)
serializer('start', doc)
return [serializer], []
rr = RunRouter([factory])
RE.subscribe(rr)
def infinite_plan():
while True:
yield from sleep(3)
yield from count([noisy_det], 20, delay=0.5)
yield from count([random_img], 10, delay=1)
# Just as a convenience, avoid collission with scan_ids of runs in Catalog.
RE.md['scan_id'] = 100
try:
RE(infinite_plan())
finally:
RE.halt()
def stream_example_data(data_path):
data_path = Path(data_path)
log.debug(f"Serializing example data into directory {data_path!s}")
queue = Queue()
proxy_process = Process(target=run_proxy, args=(queue,))
proxy_process.start()
in_port, out_port = queue.get()
log.debug(f"Demo Proxy is listening on port {in_port} and publishing to {out_port}.")
publisher_process = Process(target=run_publisher, args=(in_port, data_path))
publisher_process.start()
log.debug("Demo acquisition has started.")
return f'localhost:{out_port}', proxy_process, publisher_process
```
#### File: bluesky_mpl/heuristics/image.py
```python
import functools
import logging
import numpy
from traitlets import default
from traitlets.traitlets import Dict, Type
from traitlets.config import Configurable
from ..utils import load_config, Callable
log = logging.getLogger('bluesky_mpl')
def first_frame(event_page, image_key):
"""
Extract the first frame image data to plot out of an EventPage.
"""
if event_page['seq_num'][0] == 1:
data = numpy.asarray(event_page['data'][image_key])
log.debug('Image from %s has shape %r', image_key, data.shape)
if data.ndim == 3:
# Axes are event axis, y, x. Slice out the first event.
return data[0, ...]
elif data.ndim == 4:
# Axes are event axis, 'num_images' stack, y, x.
# Slice out the first event and sum along 'num_images' stack.
return data[0, ...].sum(0)
else:
raise ValueError(
f'The number of dimensions for the image_key "{image_key}" '
f'must be 3 or 4 for event page {event_page}, but received array '
f'has {data.ndim} number of dimensions.')
else:
return None
def latest_frame(event_page, image_key):
"""
Extract the most recent frame of image data to plot out of an EventPage.
"""
data = numpy.asarray(event_page['data'][image_key])
if event_page['seq_num'][0] == 1:
# Just log once per event stream.
log.debug('Image from %s has shape %r', image_key, data.shape)
if data.ndim == 3:
# Axes are event axis, y, x. Slice out the first event.
return data[0, ...]
elif data.ndim == 4:
# Axes are event axis, 'num_images' stack, y, x.
# Slice out the first event and sum along 'num_images' stack.
return data[0, ...].sum(0)
else:
raise ValueError(
f'The number of dimensions for the image_key "{image_key}" '
f'must be 3 for event page {event_page}, but received array '
f'has {data.ndim} number of dimensions.')
class BaseImageManager(Configurable):
"""
Manage the image plots for one FigureManager.
"""
imshow_options = Dict({}, config=True)
image_class = Type()
@default('image_class')
def default_image_class(self):
# By defining the default value of image_class dynamically here, we
# avoid importing matplotlib if some non-matplotlib image_class is
# specfied by configuration.
from ..artists.mpl.image import Image
return Image
def __init__(self, fig_manager, dimensions):
self.update_config(load_config())
self.fig_manager = fig_manager
self.start_doc = None
# We do not actually do anything with self.dimensions, just stashing it
# here in case we need it later.
self.dimensions = dimensions
def __call__(self, name, start_doc):
# We do not actually do anything with self.start_doc, just stashing it
# here in case we need it later.
self.start_doc = start_doc
return [], [self.subfactory]
def subfactory(self, name, descriptor_doc):
image_keys = {}
for key, data_key in descriptor_doc['data_keys'].items():
ndim = len(data_key['shape'] or [])
# We want to record a shape that will match the arr.shape
# of the arrays we will see later. Ophyd has been writing
# incorrect info into descriptors. We try to detect and correct
# that here.
if ndim == 2:
shape = data_key['shape']
image_keys[key] = shape
elif ndim == 3:
# ophyd <1.4.0 gives (x, y, z) where z is 0
# Maybe the better way to detect this is start['version']['ophyd'].
if data_key['shape'][-1] == 0:
object_keys = descriptor_doc.get('object_keys', {})
for object_name, data_keys in object_keys.items():
if key in data_keys:
object_name = object_name # used below
break
else:
log.debug("Couldn't find %s in object_keys %r", key, object_keys)
# Unable to handle this. Skip it.
continue
num_images = descriptor_doc['configuration'][object_name]['data'].get('num_images', -1)
x, y, _ = data_key['shape']
shape = (num_images, y, x)
image_keys[key] = shape[1:] # Stash (y, x) shape alone.
log.debug("Patching the shape in the data key for %s"
"from %r to %r", key, data_key['shape'], shape)
else:
# Assume we are getting correct metadata.
shape = data_key['shape'][1:] # Stash (y, x) shape alone.
image_keys[key] = shape
else:
continue
log.debug('%s has %d-dimensional image of shape %r',
key, ndim, shape)
callbacks = []
for image_key, shape in image_keys.items():
caption_desc = f'{" ".join(self.func.__name__.split("_")).capitalize()}'
figure_label = f'{caption_desc} of {image_key}'
fig = self.fig_manager.get_figure(
('image', image_key), figure_label, 1)
# If we are reusing an existing figure, it will have a second axis
# for the colorbar, which we should ignore.
# This is likely a bit brittle.
ax, *_possible_colorbar = fig.axes
log.debug('plot image %s', image_key)
func = functools.partial(self.func, image_key=image_key)
image = self.image_class(func, shape=shape, ax=ax, **self.imshow_options)
callbacks.append(image)
for callback in callbacks:
callback('start', self.start_doc)
callback('descriptor', descriptor_doc)
return callbacks
class FirstFrameImageManager(BaseImageManager):
func = Callable(first_frame, config=True)
class LatestFrameImageManager(BaseImageManager):
func = Callable(latest_frame, config=True)
```
#### File: bluesky_mpl/qt/viewer.py
```python
import functools
import os
import re
import event_model
import matplotlib
from traitlets.traitlets import Dict, DottedObjectName, List
from qtpy.QtWidgets import QApplication, QMainWindow, QWidget, QVBoxLayout
from qtpy.QtCore import QObject, Signal
from qtpy import QtCore, QtGui
from .figures import FigureDispatcher
from .utils import (
ConfigurableQTabWidget,
)
from ..utils import load_config
@functools.lru_cache(maxsize=1)
def _get_teleporter():
class Teleporter(QObject):
name_doc_escape = Signal(str, dict, bool)
return Teleporter
class QtAwareCallback:
def __init__(self, *args, use_teleporter=None, **kwargs):
if use_teleporter is None:
use_teleporter = 'qt' in matplotlib.get_backend().lower()
if use_teleporter:
Teleporter = _get_teleporter()
self.__teleporter = Teleporter()
self.__teleporter.name_doc_escape.connect(self._dispatch)
else:
self.__teleporter = None
super().__init__(*args, **kwargs)
def __call__(self, name, doc, validate=False):
if self.__teleporter is not None:
self.__teleporter.name_doc_escape.emit(name, doc, validate)
else:
self._dispatch(name, doc, validate)
class QRunRouter(event_model.RunRouter, QtAwareCallback):
...
qApp = None
def _create_qApp():
"""
Create QApplicaiton if one does not exist. Return QApplication.instance().
Vendored from matplotlib.backends.backend_qt5 with changes:
- Assume Qt5, removing tolerance for Qt4.
- Applicaiton has been changed (matplotlib -> bluesky).
"""
global qApp
if qApp is None:
app = QApplication.instance()
if app is None:
# check for DISPLAY env variable on X11 build of Qt
try:
from PyQt5 import QtX11Extras # noqa
is_x11_build = True
except ImportError:
is_x11_build = False
else:
is_x11_build = hasattr(QtGui, "QX11Info")
if is_x11_build:
display = os.environ.get('DISPLAY')
if display is None or not re.search(r':\d', display):
raise RuntimeError('Invalid DISPLAY variable')
try:
QApplication.setAttribute(
QtCore.Qt.AA_EnableHighDpiScaling)
except AttributeError: # Attribute only exists for Qt>=5.6.
pass
qApp = QApplication(["bluesky"])
qApp.lastWindowClosed.connect(qApp.quit)
else:
qApp = app
try:
qApp.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
except AttributeError:
pass
def start_viewer():
matplotlib.use('Qt5Agg')
_create_qApp()
main_window = QMainWindow()
viewer = Viewer()
main_window.setCentralWidget(viewer)
main_window.show()
# Avoid letting main_window be garbage collected.
viewer._main_window = main_window
return viewer
class Viewer(QWidget):
name_doc = Signal(str, dict)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
layout = QVBoxLayout()
outer_tab_container = OuterTabContainer()
layout.addWidget(outer_tab_container)
self.setLayout(layout)
self.name_doc.connect(outer_tab_container.run_router)
def __call__(self, name, doc):
self.name_doc.emit(name, doc)
class OuterTabContainer(ConfigurableQTabWidget):
def __init__(self, *args, **kwargs):
self.update_config(load_config())
self.overplot = False
self.run_router = QRunRouter(
[self.get_tab_run_router])
super().__init__(*args, **kwargs)
def get_tab_run_router(self, name, doc):
if self.overplot:
tab = self.currentWidget()
else:
tab = InnerTabContainer()
label = '' # TODO: What should this be?
self.addTab(tab, label)
tab.run_router('start', doc)
return [tab.run_router], []
class InnerTabContainer(ConfigurableQTabWidget):
factories = List([FigureDispatcher], config=True)
handler_registry = Dict(DottedObjectName(), config=True)
def __init__(self, *args, **kwargs):
self.update_config(load_config())
super().__init__(*args, **kwargs)
self.run_router = QRunRouter(
[factory(self.addTab) for factory in self.factories],
handler_registry=self.handler_registry)
``` |
{
"source": "jklynch/bluesky-widgets",
"score": 3
} |
#### File: examples/advanced/qt_with_RE_worker.py
```python
from bluesky_widgets.utils.streaming import stream_documents_into_runs
from bluesky_widgets.qt.zmq_dispatcher import RemoteDispatcher
from bluesky_widgets.models.plot_builders import Lines
from bluesky_widgets.qt.figures import QtFigure
from bluesky_widgets.qt import gui_qt
from bluesky_queueserver.manager.comms import ZMQCommSendThreads
import sys
import time
def main():
with gui_qt("Example App"):
worker_address, message_bus_address = sys.argv[1:]
dispatcher = RemoteDispatcher(message_bus_address)
client = ZMQCommSendThreads(zmq_server_address=worker_address)
# NOTE: this example starts only if RE Manager is idle and the queue is cleared.
# Those are optional steps that ensure that the code in this example is executed correctly.
# Check if RE Worker environment already exists and RE manager is idle.
status = client.send_message(method="status")
if status["manager_state"] != "idle":
raise RuntimeError(
f"RE Manager state must be 'idle': current state: {status['manager_state']}"
)
# Clear the queue.
response = client.send_message(method="queue_clear")
if not response["success"]:
raise RuntimeError(f"Failed to clear the plan queue: {response['msg']}")
# Open the new environment only if it does not exist.
if not status["worker_environment_exists"]:
# Initiate opening of RE Worker environment
response = client.send_message(method="environment_open")
if not response["success"]:
raise RuntimeError(
f"Failed to open RE Worker environment: {response['msg']}"
)
# Wait for the environment to be created.
t_timeout = 10
t_stop = time.time() + t_timeout
while True:
status2 = client.send_message(method="status")
if (
status2["worker_environment_exists"]
and status2["manager_state"] == "idle"
):
break
if time.time() > t_stop:
raise RuntimeError("Failed to start RE Worker: timeout occurred")
time.sleep(0.5)
# Add plan to queue
response = client.send_message(
method="queue_item_add",
params={
"plan": {"name": "scan", "args": [["det"], "motor", -5, 5, 11]},
"user": "",
"user_group": "admin",
},
)
if not response["success"]:
raise RuntimeError(f"Failed to add plan to the queue: {response['msg']}")
model = Lines("motor", ["det"], max_runs=3)
dispatcher.subscribe(stream_documents_into_runs(model.add_run))
view = QtFigure(model.figure)
view.show()
dispatcher.start()
response = client.send_message(method="queue_start")
if not response["success"]:
raise RuntimeError(f"Failed to start the queue: {response['msg']}")
if __name__ == "__main__":
main()
```
#### File: auto_plot_builders/_tests/test_auto_images.py
```python
from bluesky_live.run_builder import build_simple_run
import numpy
from .. import AutoImages
from ....headless.figures import HeadlessFigures
def test_images():
"Test AutoImages with a 2D array."
run = build_simple_run({"ccd": numpy.random.random((11, 13))})
model = AutoImages()
view = HeadlessFigures(model.figures)
assert not model.figures
model.add_run(run)
assert len(model.figures) == 1
assert model.figures[0].axes[0].artists
view.close()
def test_images_multiple_fields():
"Test AutoImages with multiple fields with varied shapes."
run = build_simple_run(
{
"ccd": numpy.random.random((11, 13)),
"ccd2": numpy.random.random((17, 19, 23)),
}
)
model = AutoImages()
view = HeadlessFigures(model.figures)
assert not model.figures
model.add_run(run)
assert len(model.figures) == 2
assert model.figures[0].axes[0].artists
assert model.figures[1].axes[0].artists
view.close()
```
#### File: auto_plot_builders/_tests/test_auto_lines.py
```python
from bluesky_live.run_builder import build_simple_run
from .. import AutoLines
from ....headless.figures import HeadlessFigures
# Make some runs to use.
runs = [
build_simple_run(
{"motor": [1, 2], "det": [10, 20], "det2": [15, 25]},
metadata={"scan_id": 1 + i},
)
for i in range(10)
]
MAX_RUNS = 3
def test_pinned():
"Test AutoLines with 'pinned' and un-pinned runs."
NUM_YS = 2
model = AutoLines(max_runs=MAX_RUNS)
view = HeadlessFigures(model.figures)
assert not model.figures
# Add MAX_RUNS and then some more and check that they do get bumped off.
for run in runs[:5]:
model.add_run(run)
assert len(model.plot_builders[0].runs) <= MAX_RUNS
assert runs[2:5] == list(model.plot_builders[0].runs)
assert len(model.figures) == 1
# Add a pinned run.
pinned_run = runs[5]
model.add_run(pinned_run, pinned=True)
assert (
frozenset([pinned_run.metadata["start"]["uid"]])
== model.plot_builders[0].pinned
)
for run in runs[6:]:
model.add_run(run)
assert len(model.plot_builders[0].runs) == 1 + MAX_RUNS
for axes_index in range(NUM_YS):
assert len(model.figures[0].axes[axes_index].artists) == (1 + MAX_RUNS)
# Check that it hasn't been bumped off.
assert pinned_run in model.plot_builders[0].runs
assert len(model.figures) == 1
# Remove the pinned run.
model.discard_run(pinned_run)
assert len(model.plot_builders[0].runs) == MAX_RUNS
for axes_index in range(NUM_YS):
assert len(model.figures[0].axes[axes_index].artists) == MAX_RUNS
assert pinned_run not in model.plot_builders[0].runs
view.close()
def test_decrease_max_runs():
"Decreasing max_runs should remove the runs and their associated lines."
INITIAL_MAX_RUNS = 5
model = AutoLines(max_runs=INITIAL_MAX_RUNS)
view = HeadlessFigures(model.figures)
for run in runs[:5]:
model.add_run(run)
assert len(model.plot_builders[0].runs) == INITIAL_MAX_RUNS
assert len(model.figures[0].axes[0].artists) == INITIAL_MAX_RUNS
# Decrease max_runs.
model.max_runs = MAX_RUNS
assert len(model.plot_builders[0].runs) == MAX_RUNS
assert len(model.figures[0].axes[0].artists) == MAX_RUNS
view.close()
```
#### File: models/_tests/test_figures.py
```python
from bluesky_live.run_builder import RunBuilder
from bluesky_live.event import CallbackException
import pytest
from ...models.plot_specs import (
AxesAlreadySet,
Figure,
FigureList,
Axes,
Line,
Image,
)
# Generate example data.
with RunBuilder() as builder:
builder.add_stream("primary", data={"a": [1, 2, 3], "b": [1, 4, 9]})
run = builder.get_run()
def transform(run):
ds = run.primary.read()
return {"x": ds["a"], "y": ds["b"]}
def func(run):
line = Line.from_run(transform, run, "label")
axes = Axes(artists=[line], x_label="a", y_label="b", title="axes title")
figure = Figure((axes,), title="figure title")
return figure
def test_figure(FigureView):
"Basic test: create a FigureView."
figure = func(run)
FigureView(figure)
def test_figures(FigureViews):
"Basic test: create FigureViews."
figure = func(run)
another_figure = func(run)
figures = FigureList([figure, another_figure])
FigureViews(figures)
def test_figure_title_syncing(FigureView):
model = func(run)
view = FigureView(model)
initial = model.title
assert view.figure._suptitle.get_text() == initial
expected = "figure title changed"
model.title = expected
assert model.title == expected
# Is there no public matplotlib API for this?
# https://stackoverflow.com/a/48917679
assert view.figure._suptitle.get_text() == expected
def test_short_title_syncing(FigureViews, request):
QtFigures = pytest.importorskip("bluesky_widgets.qt.figures.QtFigures")
if request.getfixturevalue("FigureViews") is not QtFigures:
pytest.skip("This tests details of the QtFigures view.")
model = func(run)
figures = FigureList([model])
view = FigureViews(figures)
actual_title = view.figures[model.uuid].figure._suptitle.get_text()
assert view.tabText(0) == actual_title
expected_short_title = "new short title"
model.short_title = expected_short_title
assert model.short_title == expected_short_title
actual_title = view.figures[model.uuid].figure._suptitle.get_text()
assert view.tabText(0) == expected_short_title
assert actual_title == model.title
expected_title = "new title"
model.title = expected_title
assert view.tabText(0) == model.short_title
model.short_title = None
assert view.tabText(0) == expected_title
def test_non_null_short_title_syncing(FigureViews, request):
QtFigures = pytest.importorskip("bluesky_widgets.qt.figures.QtFigures")
if request.getfixturevalue("FigureViews") is not QtFigures:
pytest.skip("This tests details of the QtFigures view.")
model = func(run)
model.short_title = "short title"
figures = FigureList([model])
view = FigureViews(figures)
actual_title = view.figures[model.uuid].figure._suptitle.get_text()
assert view.tabText(0) == model.short_title
assert actual_title == model.title
@pytest.mark.parametrize(
("model_property", "mpl_method"),
[("title", "get_title"), ("x_label", "get_xlabel"), ("y_label", "get_ylabel")],
)
def test_axes_syncing(FigureView, model_property, mpl_method):
model = func(run)
view = FigureView(model)
initial = getattr(model.axes[0], model_property)
assert getattr(view.figure.axes[0], mpl_method)() == initial
expected = "axes title changed"
setattr(model.axes[0], model_property, expected)
assert getattr(model.axes[0], model_property) == expected
assert getattr(view.figure.axes[0], mpl_method)() == expected
def test_axes_set_figure():
"Adding axes to a figure sets their figure."
axes = Axes()
assert axes.figure is None
figure = Figure((axes,), title="figure title")
assert axes.figure is figure
# Once axes belong to a figure, they cannot belong to another figure.
with pytest.raises(RuntimeError):
Figure((axes,), title="figure title")
with pytest.raises(AttributeError):
figure.axes = (axes,) # not settable
artist_set_axes_params = pytest.mark.parametrize(
"artist_factory",
# These are factories because each artist can only be assigned to Axes once
# in its lifecycle. For each test that these params are used in, we need a
# fresh instance.
[
lambda: Line.from_run(transform, run, "label"),
lambda: Image.from_run(transform, run, "label"),
],
ids=["lines", "images"],
)
@artist_set_axes_params
def test_artist_set_axes_at_init(artist_factory):
"Adding an artist to axes at init time sets its axes."
artist = artist_factory()
axes = Axes(artists=[artist])
assert artist in axes.artists
assert artist in axes.by_uuid.values()
assert artist.axes is axes
# Once line belong to a axes, it cannot belong to another axes.
with pytest.raises(CallbackException) as exc_info:
Axes(artists=[artist])
exc = exc_info.value
assert hasattr(exc, "__cause__") and isinstance(exc.__cause__, AxesAlreadySet)
@artist_set_axes_params
def test_artist_set_axes_after_init(artist_factory):
"Adding an artist to axes after init time sets its axes."
artist = artist_factory()
axes = Axes()
axes.artists.append(artist)
assert artist in axes.artists
assert artist in axes.by_uuid.values()
assert artist.axes is axes
# Once line belong to a axes, it cannot belong to another axes.
with pytest.raises(CallbackException) as exc_info:
Axes(artists=[artist])
exc = exc_info.value
assert hasattr(exc, "__cause__") and isinstance(exc.__cause__, AxesAlreadySet)
```
#### File: models/_tests/test_rastered_image.py
```python
from bluesky_live.run_builder import RunBuilder
import pytest
import numpy
from ..plot_builders import RasteredImages
from ..plot_specs import Axes, Figure
@pytest.fixture
def non_snaking_run():
# Test data
md = {"motors": ["y", "x"], "shape": [2, 2], "snaking": (False, False)}
with RunBuilder(md) as builder:
builder.add_stream(
"primary", data={"ccd": [1, 2, 3, 4], "x": [0, 1, 0, 1], "y": [0, 0, 1, 1]}
)
run = builder.get_run()
return run
@pytest.fixture
def snaking_run():
# Test data
md = {"motors": ["y", "x"], "shape": [2, 2], "snaking": (False, True)}
with RunBuilder(md) as builder:
builder.add_stream(
"primary", data={"ccd": [1, 2, 3, 4], "x": [0, 1, 1, 0], "y": [0, 0, 1, 1]}
)
run = builder.get_run()
return run
def test_rastered_image(non_snaking_run, FigureView):
"Test RasteredImages with a 2D array."
run = non_snaking_run
model = RasteredImages("ccd", shape=(2, 2))
view = FigureView(model.figure)
assert not model.figure.axes[0].artists
model.add_run(run)
assert model.figure.axes[0].artists
view.close()
def test_x_y_positive_change_x_y_limits(non_snaking_run, FigureView):
"Test x_positive and y_positive change x_limits and y_limits"
run = non_snaking_run
model = RasteredImages("ccd", shape=(2, 2), x_positive="left", y_positive="down")
view = FigureView(model.figure)
model.add_run(run)
expected_x_lims = expected_y_lims = (1.5, -0.5)
assert model.axes.x_limits == expected_x_lims
assert model.axes.y_limits == expected_y_lims
model.x_positive = "right"
model.y_positive = "up"
expected_x_lims = expected_y_lims = (-0.5, 1.5)
assert model.axes.x_limits == expected_x_lims
assert model.axes.y_limits == expected_y_lims
view.close()
def test_x_y_limits_change_x_y_positive(non_snaking_run, FigureView):
"Test x_limits and y_limits change x_positive and y_positive"
run = non_snaking_run
axes = Axes(x_limits=(1.5, -0.5), y_limits=(1.5, -0.5))
Figure((axes,), title="")
model = RasteredImages("ccd", shape=(2, 2), axes=axes)
view = FigureView(model.figure)
model.add_run(run)
assert model.x_positive == "left"
assert model.y_positive == "down"
model.axes.x_limits = model.axes.y_limits = (-0.5, 1.5)
assert model.x_positive == "right"
assert model.y_positive == "up"
view.close()
def test_non_snaking_image_data(non_snaking_run, FigureView):
run = non_snaking_run
model = RasteredImages("ccd", shape=(2, 2))
model.add_run(run)
view = FigureView(model.figure)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, 2], [3, 4]]
assert numpy.array_equal(actual_data, expected_data)
view.close()
def test_snaking_image_data(snaking_run, FigureView):
run = snaking_run
model = RasteredImages("ccd", shape=(2, 2))
view = FigureView(model.figure)
model.add_run(run)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, 2], [4, 3]]
assert numpy.array_equal(actual_data, expected_data)
view.close()
def test_non_snaking_image_data_positions(FigureView):
md = {"motors": ["y", "x"], "shape": [2, 2], "snaking": (False, False)}
model = RasteredImages("ccd", shape=(2, 2))
view = FigureView(model.figure)
with RunBuilder(md) as builder:
ccd = iter([1, 2, 3, 4])
x = iter([0, 1, 0, 1])
y = iter([0, 0, 1, 1])
run = builder.get_run()
model.add_run(run)
# First data point
builder.add_stream(
"primary", data={"ccd": [next(ccd)], "x": [next(x)], "y": [next(y)]}
)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, numpy.nan], [numpy.nan, numpy.nan]]
assert numpy.array_equal(actual_data, expected_data, equal_nan=True)
# Second point
builder.add_data(
"primary", data={"ccd": [next(ccd)], "x": [next(x)], "y": [next(y)]}
)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, 2], [numpy.nan, numpy.nan]]
assert numpy.array_equal(actual_data, expected_data, equal_nan=True)
# Third point
builder.add_data(
"primary", data={"ccd": [next(ccd)], "x": [next(x)], "y": [next(y)]}
)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, 2], [3, numpy.nan]]
assert numpy.array_equal(actual_data, expected_data, equal_nan=True)
# Fourth point
builder.add_data(
"primary", data={"ccd": [next(ccd)], "x": [next(x)], "y": [next(y)]}
)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, 2], [3, 4]]
assert numpy.array_equal(actual_data, expected_data, equal_nan=True)
view.close()
def test_snaking_image_data_positions(FigureView):
md = {"motors": ["y", "x"], "shape": [2, 2], "snaking": (False, True)}
model = RasteredImages("ccd", shape=(2, 2))
view = FigureView(model.figure)
with RunBuilder(md) as builder:
ccd = iter([1, 2, 3, 4])
x = iter([0, 1, 1, 0])
y = iter([0, 0, 1, 1])
run = builder.get_run()
model.add_run(run)
# First data point
builder.add_stream(
"primary", data={"ccd": [next(ccd)], "x": [next(x)], "y": [next(y)]}
)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, numpy.nan], [numpy.nan, numpy.nan]]
assert numpy.array_equal(actual_data, expected_data, equal_nan=True)
# Second point
builder.add_data(
"primary", data={"ccd": [next(ccd)], "x": [next(x)], "y": [next(y)]}
)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, 2], [numpy.nan, numpy.nan]]
assert numpy.array_equal(actual_data, expected_data, equal_nan=True)
# Third point
builder.add_data(
"primary", data={"ccd": [next(ccd)], "x": [next(x)], "y": [next(y)]}
)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, 2], [numpy.nan, 3]]
assert numpy.array_equal(actual_data, expected_data, equal_nan=True)
# Fourth point
builder.add_data(
"primary", data={"ccd": [next(ccd)], "x": [next(x)], "y": [next(y)]}
)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, 2], [4, 3]]
assert numpy.array_equal(actual_data, expected_data, equal_nan=True)
view.close()
def test_figure_set_after_instantiation():
axes = Axes()
model = RasteredImages("ccd", shape=(2, 2), axes=axes)
assert model.figure is None
figure = Figure((axes,), title="")
assert model.figure is figure
``` |
{
"source": "jklynch/databroker",
"score": 3
} |
#### File: databroker/databroker/discovery.py
```python
import collections
from databroker.utils import list_configs, lookup_config, CONFIG_SEARCH_PATH
import entrypoints
from intake.catalog import Catalog
from intake.catalog.entry import CatalogEntry
import warnings
class EntrypointEntry(CatalogEntry):
"""
A catalog entry for an entrypoint.
"""
def __init__(self, entrypoint):
self._entrypoint = entrypoint
def __repr__(self):
return f"<Entry containing Catalog named {self.name}>"
@property
def name(self):
return self._entrypoint.name
def describe(self):
"""Basic information about this entry"""
return {'name': self.name,
'module_name': self._entrypoint.module_name,
'object_name': self._entrypoint.object_name,
'distro': self._entrypoint.distro,
'extras': self._entrypoint.extras}
def get(self):
"""Instantiate the DataSource for the given parameters"""
return self._entrypoint.load()
class EntrypointsCatalog(Catalog):
"""
A catalog of discovered entrypoint catalogs.
"""
def __init__(self, *args, entrypoints_group='intake.catalogs', paths=None,
**kwargs):
self._entrypoints_group = entrypoints_group
self._paths = paths
super().__init__(*args, **kwargs)
def _load(self):
catalogs = entrypoints.get_group_named(self._entrypoints_group,
path=self._paths)
self.name = self.name or 'EntrypointsCatalog'
self.description = (self.description
or f'EntrypointsCatalog of {len(catalogs)} catalogs.')
for name, entrypoint in catalogs.items():
try:
self._entries[name] = EntrypointEntry(entrypoint)
except Exception as e:
warnings.warn(f"Failed to load {name}, {entrypoint}, {e!r}.")
class V0Entry(CatalogEntry):
def __init__(self, name, *args, **kwargs):
self._name = name
super().__init__(*args, **kwargs)
def __repr__(self):
return f"<Entry containing Catalog named {self._name}>"
def describe(self):
return {"name": self._name}
def get(self):
# Hide this import here so that
# importing v2 doesn't import v1 unless we actually use it.
from databroker import v1
config = lookup_config(self._name)
catalog = v1.from_config(config) # might return v0, v1, or v2 Broker
if not hasattr(catalog, 'v2'):
raise ValueError("The config file could not be parsed for v2-style access.")
return catalog.v2 # works if catalog is v1-style or v2-style
class V0Catalog(Catalog):
"""
Build v2.Brokers based on any v0-style configs we can find.
"""
def __init__(self, *args, paths=CONFIG_SEARCH_PATH, **kwargs):
self._paths = paths
super().__init__(*args, **kwargs)
def _load(self):
for name in list_configs(paths=self._paths):
self._entries[name] = V0Entry(name)
class MergedCatalog(Catalog):
"""
A Catalog that merges the entries of a list of catalogs.
"""
def __init__(self, catalogs, *args, **kwargs):
self._catalogs = catalogs
super().__init__(*args, **kwargs)
def _load(self):
for catalog in self._catalogs:
catalog._load()
def _make_entries_container(self):
return collections.ChainMap(*(catalog._entries for catalog in self._catalogs))
```
#### File: databroker/databroker/projector.py
```python
import xarray
from importlib import import_module
from .core import BlueskyRun
class ProjectionError(Exception):
pass
def get_run_projection(run: BlueskyRun, projection_name: str = None):
"""Finds a projection in the run.
If projection_name is provided, searches through the projections in the run
to find a match.
Otherwise, looks in the run to see if there is only one projection. If so, returns it.
Parameters
----------
run : BlueskyRun
Run to investigate for a projection
projection_name : str, optional
name of the projection to look for, by default None
Returns
-------
dict
returns a projection dictionary, or None of not found
Raises
------
KeyError
If the a projection_name is specified and there is more than one
projection in the run with that name
"""
if projection_name is not None:
projections = [projection for projection in run.metadata['start']['projections']
if projection.get('name') == projection_name]
if len(projections) > 1:
raise KeyError("Multiple projections of name {projection_name} found")
if len(projections) == 1:
return projections[0]
if len(projections) == 0:
return None
if 'projections' in run.metadata['start'] and len(run.metadata['start']['projections']) == 1:
return run.metadata['start']['projections'][0]
return None
def get_calculated_value(run: BlueskyRun, key: str, mapping: dict):
"""Calls and returns the callable from the calculated projection mapping.
It is ancticipated that the return will be
and xarray.DataArray.
This should be expressed in the familiar 'module:func' syntax borrowed from python entry-points.
An example implementation of a calculated field projection entry:
'/projection/key': {
"type": "calculated",
"callable": "foo.bar:really_fun",
"args": ['arg1'], "kwargs": {"foo": "bar"}}
And a corresponding function implementation might be:
def really_fun(run, *args, **kwargs)"
# args will be ['arg1']
# kwargs will be {"foo": "bar"}
# for this calculated field
return xarray.DataArray[[1, 2, 3]]
Parameters
----------
run : BlueskyRun
run which can be used for the calcuation
key : str
key name for this projection
mapping : dict
full contents of this projection
Returns
-------
any
result of calling the method specified in the calcated field in the projection
Raises
------
ProjectionError
[description]
"""
callable_name = mapping['callable']
try:
module_name, function_name = callable_name.split(":")
module = import_module(module_name)
callable_func = getattr(module, function_name)
except ProjectionError as e:
raise ProjectionError('Error importing callable {function_name}', e)
calc_args = mapping['args']
calc_kwargs = mapping['kwargs']
return callable_func(run, *calc_args, **calc_kwargs)
def project_xarray(run: BlueskyRun, *args, projection=None, projection_name=None, **kwargs):
"""Produces an xarray Dataset by projecting the provided run. Selects projection based on
logic of get_run_projection().
Projections come with multiple types: linked, and caclulated. Calculated fields are only supported
in the data (not at the top-level attrs).
Calculated fields in projections schema contain a callable field. This should be expressed in
the familiar 'module:func' syntax borrowed from python entry-points.
All projections with "location"="configuration" will look in the start document
for metadata. Each field will be added to the return Dataset's attrs dictionary keyed
on projection key.
All projections with "location"="event" will look for a field in a stream.
Parameters
----------
run : BlueskyRun
run to project
projection_name : str, optional
name of a projection to select in the run, by default None
projection : dict, optional
projection not from the run to use, by default None
Returns
-------
xarray.Dataset
The return Dataset will contain:
- single value data (typically from the run start) in the return Dataset's attrs dict, keyed
on the projection key. These are projections marked "location": "configuration"
- multi-value data (typically from a stream). Keys for the dict-like xarray.Dataset match keys
in the passed-in projection. These are projections with "location": "linked"
Raises
------
ProjectionError
"""
try:
if projection is None:
projection = get_run_projection(run, projection_name)
if projection is None:
raise ProjectionError("Projection could not be found")
attrs = {} # will populate the return Dataset attrs field
data_vars = {} # will populate the return Dataset DataArrays
for field_key, mapping in projection['projection'].items():
# go through each projection
projection_type = mapping['type']
projection_location = mapping.get('location')
projection_data = None
projection_linked_field = mapping.get('field')
# single value data that will go in the top
# dataset's attributes
if projection_location == 'configuration':
attrs[field_key] = run.metadata['start'][projection_linked_field]
continue
# added to return Dataset in data_vars dict
if projection_type == "calculated":
data_vars[field_key] = get_calculated_value(run, field_key, mapping)
continue
# added to return Dataset in data_vars dict
if projection_location == 'event':
projection_stream = mapping.get('stream')
if projection_stream is None:
raise ProjectionError(f'stream missing for event projection: {field_key}')
data_vars[field_key] = run[projection_stream].to_dask()[projection_linked_field]
elif projection_location == 'configuration':
attrs[field_key] = projection_data
else:
raise KeyError(f'Unknown location: {projection_location} in projection.')
except Exception as e:
raise ProjectionError('Error projecting run') from e
return xarray.Dataset(data_vars, attrs=attrs)
```
#### File: databroker/databroker/v1.py
```python
from collections.abc import Iterable
from collections import defaultdict
from datetime import datetime
import pandas
import re
import warnings
import time
import humanize
import jinja2
import os
import shutil
from types import SimpleNamespace
import tzlocal
import xarray
import event_model
import intake
import pymongo
# Toolz and CyToolz have identical APIs -- same test suite, docstrings.
try:
from cytoolz.dicttoolz import merge
except ImportError:
from toolz.dicttoolz import merge
from .utils import (ALL, format_time, get_fields, wrap_in_deprecated_doct,
ensure_path_exists, lookup_config,
transpose)
# The v2 API is expected to grow more options for filled than just True/False
# (e.g. 'delayed') so it expects a string instead of a boolean.
_FILL = {True: 'yes', False: 'no'}
def temp_config():
raise NotImplementedError("Use temp() instead, which returns a v1.Broker.")
def temp():
from .v2 import temp
catalog = temp()
return Broker(catalog)
class Registry:
"""
An accessor that serves as a backward-compatible shim for Broker.reg
"""
def __init__(self, catalog):
self._catalog = catalog
@property
def handler_reg(self):
return self._catalog.handler_registry
@property
def root_map(self):
return self._catalog.root_map
def register_handler(self, key, handler, overwrite=False):
return self._catalog.register_handler(
key, handler, overwrite=overwrite)
def deregister_handler(self, key):
return self._catalog.deregister_handler(key)
def copy_files(self, resource, new_root,
verify=False, file_rename_hook=None,
run_start_uid=None):
"""
Copy files associated with a resource to a new directory.
The registered handler must have a `get_file_list` method and the
process running this method must have read/write access to both the
source and destination file systems.
This method does *not* update the assets dataregistry_template.
Internally the resource level directory information is stored
as two parts: the root and the resource_path. The 'root' is
the non-semantic component (typically a mount point) and the
'resource_path' is the 'semantic' part of the file path. For
example, it is common to collect data into paths that look like
``/mnt/DATA/2016/04/28``. In this case we could split this as
``/mnt/DATA`` as the 'root' and ``2016/04/28`` as the resource_path.
Parameters
----------
resource : Document
The resource to move the files of
new_root : str
The new 'root' to copy the files into
verify : bool, optional (False)
Verify that the move happened correctly. This currently
is not implemented and will raise if ``verify == True``.
file_rename_hook : callable, optional
If provided, must be a callable with signature ::
def hook(file_counter, total_number, old_name, new_name):
pass
This will be run in the inner loop of the file copy step and is
run inside of an unconditional try/except block.
See Also
--------
`RegistryMoving.shift_root`
`RegistryMoving.change_root`
"""
if verify:
raise NotImplementedError('Verification is not implemented yet')
def rename_hook_wrapper(hook):
if hook is None:
def noop(n, total, old_name, new_name):
return
return noop
def safe_hook(n, total, old_name, new_name):
try:
hook(n, total, old_name, new_name)
except Exception:
pass
return safe_hook
file_rename_hook = rename_hook_wrapper(file_rename_hook)
run_start_uid = resource.get('run_start', run_start_uid)
if run_start_uid is None:
raise ValueError(
"If the Resource document has no `run_start` key, the "
"caller must provide run_start_uid.")
file_list = self._catalog[run_start_uid].get_file_list(resource)
# check that all files share the same root
old_root = resource.get('root')
if not old_root:
warnings.warn("There is no 'root' in this resource which "
"is required to be able to change the root. "
"Please use `fs.shift_root` to move some of "
"the path from the 'resource_path' to the "
"'root'. For now assuming '/' as root")
old_root = os.path.sep
for f in file_list:
if not f.startswith(old_root):
raise RuntimeError('something is very wrong, the files '
'do not all share the same root, ABORT')
# sort out where new files should go
new_file_list = [os.path.join(new_root,
os.path.relpath(f, old_root))
for f in file_list]
N = len(new_file_list)
# copy the files to the new location
for n, (fin, fout) in enumerate(zip(file_list, new_file_list)):
# copy files
file_rename_hook(n, N, fin, fout)
ensure_path_exists(os.path.dirname(fout))
shutil.copy2(fin, fout)
return zip(file_list, new_file_list)
class Broker:
"""
This supports the original Broker API but implemented on intake.Catalog.
"""
def __init__(self, catalog, *, serializer=None,
external_fetchers=None):
self._catalog = catalog
self.__serializer = serializer
self.external_fetchers = external_fetchers or {}
self.prepare_hook = wrap_in_deprecated_doct
self.aliases = {}
self.filters = {}
self.v2._Broker__v1 = self
self._reg = Registry(catalog)
@property
def _serializer(self):
if self.__serializer is None:
# The method _get_serializer is an optional method implememented on
# some Broker subclasses to support the Broker.insert() method,
# which is pending deprecation.
if hasattr(self._catalog, '_get_serializer'):
self.__serializer = self._catalog._get_serializer()
return self.__serializer
@property
def reg(self):
"Registry of externally-stored data"
return self._reg
@property
def name(self):
return self._catalog.name
@property
def v1(self):
"A self-reference. This makes v1.Broker and v2.Broker symmetric."
return self
@property
def v2(self):
"Accessor to the version 2 API."
return self._catalog
@classmethod
def from_config(cls, config, auto_register=True, name=None):
return from_config(
config=config, auto_register=auto_register, name=name)
def get_config(self):
"""
Return the v0 config dict this was created from, or None if N/A.
"""
if hasattr(self, '_config'):
return self._config
@classmethod
def named(cls, name, auto_register=True):
"""
Create a new Broker instance using a configuration file with this name.
Configuration file search path:
* ``~/.config/databroker/{name}.yml``
* ``{python}/../etc/databroker/{name}.yml``
* ``/etc/databroker/{name}.yml``
where ``{python}`` is the location of the current Python binary, as
reported by ``sys.executable``. It will use the first match it finds.
Special Case: The name ``'temp'`` creates a new, temporary
configuration. Subsequent calls to ``Broker.named('temp')`` will
create separate configurations. Any data saved using this temporary
configuration will not be accessible once the ``Broker`` instance has
been deleted.
Parameters
----------
name : string
auto_register : boolean, optional
By default, automatically register built-in asset handlers (classes
that handle I/O for externally stored data). Set this to ``False``
to do all registration manually.
Returns
-------
db : Broker
"""
if name == 'temp':
return temp()
else:
try:
config = lookup_config(name)
except FileNotFoundError:
# Continue on to the v2 way.
pass
else:
db = cls.from_config(config, auto_register=auto_register, name=name)
return db
catalog = getattr(intake.cat, name)
return Broker(catalog)
@property
def fs(self):
warnings.warn("fs is deprecated, use `db.reg` instead", stacklevel=2)
return self.reg
def stream_names_given_header(self):
return list(self._catalog)
def fetch_external(self, start, stop):
return {k: func(start, stop) for
k, func in self.external_fetchers.items()}
def _patch_state(self, catalog):
"Copy references to v1 state."
catalog.v1.aliases = self.aliases
catalog.v1.filters = self.filters
catalog.v1.prepare_hook = self.prepare_hook
def __call__(self, text_search=None, **kwargs):
data_key = kwargs.pop('data_key', None)
tz = tzlocal.get_localzone().zone
if self.filters:
filters = self.filters.copy()
format_time(filters, tz) # mutates in place
catalog = self._catalog.search(filters)
self._patch_state(catalog)
else:
catalog = self._catalog
if text_search:
kwargs.update({'$text': {'$search': text_search}})
format_time(kwargs, tz) # mutates in place
result_catalog = catalog.search(kwargs)
self._patch_state(result_catalog)
return Results(self, result_catalog,
data_key)
def __getitem__(self, key):
# If this came from a client, we might be getting '-1'.
if not isinstance(key, str) and isinstance(key, Iterable):
return [self[item] for item in key]
if isinstance(key, slice):
if key.start is not None and key.start > -1:
raise ValueError("slice.start must be negative. You gave me "
"key=%s The offending part is key.start=%s"
% (key, key.start))
if key.stop is not None and key.stop > 0:
raise ValueError("slice.stop must be <= 0. You gave me key=%s. "
"The offending part is key.stop = %s"
% (key, key.stop))
if key.start is None:
raise ValueError("slice.start cannot be None because we do not "
"support slicing infinitely into the past; "
"the size of the result is non-deterministic "
"and could become too large.")
return [self[index]
for index in reversed(range(key.start, key.stop or 0, key.step or 1))]
datasource = self._catalog[key]
return Header(datasource)
get_fields = staticmethod(get_fields)
def get_documents(self,
headers, stream_name=ALL, fields=None, fill=False,
handler_registry=None):
"""
Get all documents from one or more runs.
Parameters
----------
headers : Header or iterable of Headers
The headers to fetch the events for
stream_name : str, optional
Get events from only "event stream" with this name.
Default is `ALL` which yields documents for all streams.
fields : List[str], optional
whitelist of field names of interest; if None, all are returned
Default is None
fill : bool or Iterable[str], optional
Which fields to fill. If `True`, fill all
possible fields.
Each event will have the data filled for the intersection
of it's external keys and the fields requested filled.
Default is False
handler_registry : dict, optional
mapping asset pecs (strings) to handlers (callable classes)
Yields
------
name : str
The name of the kind of document
doc : dict
The payload, may be RunStart, RunStop, EventDescriptor, or Event.
Raises
------
ValueError if any key in `fields` is not in at least one descriptor
pre header.
"""
if handler_registry is not None:
raise NotImplementedError("The handler_registry must be set when "
"the Broker is initialized, usually specified "
"in a configuration file.")
headers = _ensure_list(headers)
no_fields_filter = False
if fields is None:
no_fields_filter = True
fields = []
fields = set(fields)
comp_re = _compile_re(fields)
for header in headers:
uid = header.start['uid']
descs = header.descriptors
per_desc_discards = {}
per_desc_extra_data = {}
per_desc_extra_ts = {}
for d in descs:
(all_extra_dk, all_extra_data,
all_extra_ts, discard_fields) = _extract_extra_data(
header.start, header.stop, d, fields, comp_re,
no_fields_filter)
per_desc_discards[d['uid']] = discard_fields
per_desc_extra_data[d['uid']] = all_extra_data
per_desc_extra_ts[d['uid']] = all_extra_ts
d = d.copy()
dict.__setitem__(d, 'data_keys', d['data_keys'].copy())
for k in discard_fields:
del d['data_keys'][k]
d['data_keys'].update(all_extra_dk)
if not len(d['data_keys']) and not len(all_extra_data):
continue
def merge_config_into_event(event):
# Mutate event in place, adding in data and timestamps from the
# descriptor's 'configuration' key.
event_data = event['data'] # cache for perf
desc = event['descriptor']
event_timestamps = event['timestamps']
event_data.update(per_desc_extra_data[desc])
event_timestamps.update(per_desc_extra_ts[desc])
discard_fields = per_desc_discards[desc]
for field in discard_fields:
del event_data[field]
del event_timestamps[field]
get_documents_router = _GetDocumentsRouter(self.prepare_hook,
merge_config_into_event,
stream_name=stream_name)
for name, doc in self._catalog[uid].canonical(fill=_FILL[bool(fill)],
strict_order=True):
yield from get_documents_router(name, doc)
def get_events(self,
headers, stream_name='primary', fields=None, fill=False,
handler_registry=None):
"""
Get Event documents from one or more runs.
Parameters
----------
headers : Header or iterable of Headers
The headers to fetch the events for
stream_name : str, optional
Get events from only "event stream" with this name.
Default is 'primary'
fields : List[str], optional
whitelist of field names of interest; if None, all are returned
Default is None
fill : bool or Iterable[str], optional
Which fields to fill. If `True`, fill all
possible fields.
Each event will have the data filled for the intersection
of it's external keys and the fields requested filled.
Default is False
handler_registry : dict, optional
mapping asset specs (strings) to handlers (callable classes)
Yields
------
event : Event
The event, optionally with non-scalar data filled in
Raises
------
ValueError if any key in `fields` is not in at least one descriptor
pre header.
"""
if handler_registry is not None:
raise NotImplementedError("The handler_registry must be set when "
"the Broker is initialized, usually specified "
"in a configuration file.")
for name, doc in self.get_documents(headers,
fields=fields,
stream_name=stream_name,
fill=fill,
handler_registry=handler_registry):
if name == 'event':
yield doc
def get_table(self,
headers, stream_name='primary', fields=None, fill=False,
handler_registry=None,
convert_times=True, timezone=None, localize_times=True):
"""
Load the data from one or more runs as a table (``pandas.DataFrame``).
Parameters
----------
headers : Header or iterable of Headers
The headers to fetch the events for
stream_name : str, optional
Get events from only "event stream" with this name.
Default is 'primary'
fields : List[str], optional
whitelist of field names of interest; if None, all are returned
Default is None
fill : bool or Iterable[str], optional
Which fields to fill. If `True`, fill all
possible fields.
Each event will have the data filled for the intersection
of it's external keys and the fields requested filled.
Default is False
handler_registry : dict, optional
mapping filestore specs (strings) to handlers (callable classes)
convert_times : bool, optional
Whether to convert times from float (seconds since 1970) to
numpy datetime64, using pandas. True by default.
timezone : str, optional
e.g., 'US/Eastern'; if None, use metadatastore configuration in
`self.mds.config['timezone']`
handler_registry : dict, optional
mapping asset specs (strings) to handlers (callable classes)
localize_times : bool, optional
If the times should be localized to the 'local' time zone. If
True (the default) the time stamps are converted to the localtime
zone (as configure in mds).
This is problematic for several reasons:
- apparent gaps or duplicate times around DST transitions
- incompatibility with every other time stamp (which is in UTC)
however, this makes the dataframe repr look nicer
This implies convert_times.
Defaults to True to preserve back-compatibility.
Returns
-------
table : pandas.DataFrame
"""
if handler_registry is not None:
raise NotImplementedError(
"The handler_registry must be set when "
"the Broker is initialized, usually specified "
"in a configuration file.")
headers = _ensure_list(headers)
# TODO --- Use local time I guess.
# if timezone is None:
# timezone = self.mds.config['timezone']
no_fields_filter = False
if fields is None:
no_fields_filter = True
fields = []
fields = set(fields)
comp_re = _compile_re(fields)
dfs = []
for header in headers:
descs = header.descriptors
start = header.start
stop = header.stop
descs = [desc for desc in descs if desc.get('name') == stream_name]
for descriptor in descs:
(all_extra_dk, all_extra_data,
all_extra_ts, discard_fields) = _extract_extra_data(
start, stop, descriptor, fields, comp_re, no_fields_filter)
all_events = [
doc for name, doc in
self.get_documents(header, stream_name=stream_name, fill=fill)
if name == 'event' and
doc['descriptor'] == descriptor['uid']]
seq_nums = [ev['seq_num'] for ev in all_events]
times = [ev['time'] for ev in all_events]
keys = list(descriptor['data_keys'])
data = transpose(all_events, keys, 'data')
# timestamps = transpose(all_events, keys, 'timestamps')
df = pandas.DataFrame(index=seq_nums)
# if converting to datetime64 (in utc or 'local' tz)
if convert_times or localize_times:
times = pandas.to_datetime(times, unit='s')
# make sure this is a series
times = pandas.Series(times, index=seq_nums)
# if localizing to 'local' time
if localize_times:
times = (times
.dt.tz_localize('UTC') # first make tz aware
# .dt.tz_convert(timezone) # convert to 'local'
.dt.tz_localize(None) # make naive again
)
df['time'] = times
for field, values in data.items():
if field in discard_fields:
continue
df[field] = values
if list(df.columns) == ['time']:
# no content
continue
for field, v in all_extra_data.items():
df[field] = v
dfs.append(df)
if dfs:
result = pandas.concat(dfs)
else:
# edge case: no data
result = pandas.DataFrame()
result.index.name = 'seq_num'
return result
def get_images(self, headers, name,
stream_name='primary',
handler_registry=None,):
"""
This method is deprecated. Use Broker.get_documents instead.
Load image data from one or more runs into a lazy array-like object.
Parameters
----------
headers : Header or list of Headers
name : string
field name (data key) of a detector
handler_registry : dict, optional
mapping spec names (strings) to handlers (callable classes)
Examples
--------
>>> header = db[-1]
>>> images = Images(header, 'my_detector_lightfield')
>>> for image in images:
# do something
"""
# Defer this import so that pims is an optional dependency.
from ._legacy_images import Images
headers = _ensure_list(headers)
datasets = [header.xarray_dask(stream_name=stream_name)
for header in headers]
if handler_registry is not None:
raise NotImplementedError(
"The handler_registry parameter is no longer supported "
"and must be None.")
dataset = xarray.merge(datasets)
data_array = dataset[name]
return Images(data_array=data_array)
def alias(self, key, **query):
"""
Create an alias for a query.
Parameters
----------
key : string
must be a valid Python identifier
query :
keyword argument comprising a query
Examples
--------
Define an alias that searches for headers with purpose='calibration'.
>>> db.alias('cal', purpose='calibration')
Use it.
>>> headers = db.cal # -> db(purpose='calibration')
Review defined aliases.
>>> db.aliases
{'cal': {'purpose': 'calibration'}}
"""
if hasattr(self, key) and key not in self.aliases:
raise ValueError("'%s' is not a legal alias." % key)
self.aliases[key] = query
def dynamic_alias(self, key, func):
"""
Create an alias for a "dynamic" query, a function that returns a query.
Parameters
----------
key : string
must be a valid Python identifier
func : callable
When called with no arguments, must return a dict that is a valid
query.
Examples
--------
Define an alias to get headers from the last 24 hours.
>>> import time
>>> db.dynamic_alias('today',
... lambda: {'since': time.time() - 24*60*60})
Use it.
>>> headers = db.today
Define an alias to get headers with the 'user' field in metadata
matches the current logged-in user.
>>> import getpass
>>> db.dynamic_alias('mine', lambda: {'user': getpass.getuser()})
Use it
>>> headers = db.mine
"""
if hasattr(self, key) and key not in self.aliases:
raise ValueError("'%s' is not a legal alias." % key)
self.aliases[key] = func
def add_filter(self, **kwargs):
"""
Add query to the list of 'filter' queries.
Any query passed to ``db.add_filter()`` is stashed and "AND-ed" with
all future queries.
``db.add_filter(**kwargs)`` is just a convenient way to spell
``db.filters.update(**kwargs)``.
Examples
--------
Filter all searches to restrict results to a specific user after a
March 2017.
>>> db.add_filter(user='Dan')
>>> db.add_filter(since='2017-3')
The following query is equivalent to
``db(user='Dan', plan_name='scan')``.
>>> db(plan_name='scan')
Review current filters.
>>> db.filters
{'user': 'Dan', 'since': '2017-3'}
Clear filters.
>>> db.clear_filters()
See Also
--------
:meth:`Broker.clear_filters`
"""
self.filters.update(**kwargs)
def clear_filters(self, **kwargs):
"""
Clear all 'filter' queries.
Filter queries are combined with every given query using '$and',
acting as a filter to restrict the results.
``Broker.clear_filters()`` is just a convenient way to spell
``Broker.filters.clear()``.
See Also
--------
:meth:`Broker.add_filter`
"""
self.filters.clear()
def __getattr__(self, key):
try:
query = self.aliases[key]
except KeyError:
raise AttributeError(key)
if callable(query):
query = query()
return self(**query)
def restream(self, headers, fields=None, fill=False):
"""
Get all Documents from given run(s).
This output can be used as a drop-in replacement for the output of the
bluesky Run Engine.
Parameters
----------
headers : Header or iterable of Headers
header or headers to fetch the documents for
fields : list, optional
whitelist of field names of interest; if None, all are returned
fill : bool, optional
Whether externally-stored data should be filled in. Defaults to
False.
Yields
------
name, doc : tuple
string name of the Document type and the Document itself.
Example: ('start', {'time': ..., ...})
Examples
--------
>>> def f(name, doc):
... # do something
...
>>> h = db[-1] # most recent header
>>> for name, doc in restream(h):
... f(name, doc)
See Also
--------
:meth:`Broker.process`
"""
for payload in self.get_documents(headers, fields=fields, fill=fill):
yield payload
stream = restream # compat
def process(self, headers, func, fields=None, fill=False):
"""
Pass all the documents from one or more runs into a callback.
This output can be used as a drop-in replacement for the output of the
bluesky Run Engine.
Parameters
----------
headers : Header or iterable of Headers
header or headers to process documents from
func : callable
function with the signature `f(name, doc)`
where `name` is a string and `doc` is a dict
fields : list, optional
whitelist of field names of interest; if None, all are returned
fill : bool, optional
Whether externally-stored data should be filled in. Defaults to
False.
Examples
--------
>>> def f(name, doc):
... # do something
...
>>> h = db[-1] # most recent header
>>> process(h, f)
See Also
--------
:meth:`Broker.restream`
"""
for name, doc in self.get_documents(headers, fields=fields, fill=fill):
func(name, doc)
def export(self, headers, db, new_root=None, copy_kwargs=None):
"""
Serialize a list of runs.
If a new_root is passed files associated with the run will be moved to
this new location, and the corresponding resource document will be
updated with the new_root.
Parameters
----------
headers : databroker.header
one or more run headers that are going to be exported
db : databroker.Broker
an instance of databroker.Broker class that will be the target to
export info
new_root : str
optional. root directory of files that are going to
be exported
copy_kwargs : dict or None
passed through to the ``copy_files`` method on Registry;
None by default
Returns
------
file_pairs : list
list of (old_file_path, new_file_path) pairs generated by
``copy_files`` method on Registry.
"""
if copy_kwargs is None:
copy_kwargs = {}
if isinstance(headers, Header):
headers = [headers]
file_pairs = []
for header in headers:
for name, doc in self._catalog[header.start['uid']].canonical(fill='no'):
if name == 'event_page':
for event in event_model.unpack_event_page(doc):
db.insert('event', event)
elif name == 'resource' and new_root:
copy_kwargs.setdefault('run_start_uid', header.start['uid'])
file_pairs.extend(self.reg.copy_files(doc, new_root, **copy_kwargs))
new_resource = doc.to_dict()
new_resource['root'] = new_root
db.insert(name, new_resource)
else:
db.insert(name, doc)
return file_pairs
def export_size(self, headers):
"""
Get the size of files associated with a list of headers.
Parameters
----------
headers : :class:databroker.Header:
one or more headers that are going to be exported
Returns
-------
total_size : float
total size of all the files associated with the ``headers`` in Gb
"""
headers = _ensure_list(headers)
total_size = 0
for header in headers:
run = self._catalog[header.start['uid']]
for name, doc in self._catalog[header.start['uid']].canonical(fill='no'):
if name == 'resource':
for filepath in run.get_file_list(doc):
total_size += os.path.getsize(filepath)
return total_size * 1e-9
def insert(self, name, doc):
if self._serializer is None:
raise RuntimeError("No Serializer was configured for this.")
warnings.warn(
"The method Broker.insert may be removed in a future release of "
"databroker.", PendingDeprecationWarning)
self._serializer(name, doc)
# Make a reasonable effort to keep the Catalog in sync with new data.
if name == 'stop':
self._catalog.force_reload()
def fill_event(*args, **kwargs):
raise NotImplementedError("This method is no longer supported. If you "
"need this please contact the developers by "
"opening an issue here: "
"https://github.com/bluesky/databroker/issues/new ")
def fill_events(*args, **kwargs):
raise NotImplementedError("This method is no longer supported. If you "
"need this please contact the developers by "
"opening an issue here: "
"https://github.com/bluesky/databroker/issues/new ")
def stats(self):
"Access MongoDB storage statistics for this database."
return self.v2.stats()
class Header:
"""
This supports the original Header API but implemented on intake's Entry.
"""
def __init__(self, datasource):
self.__data_source = datasource
self.db = datasource.catalog_object.v1
self.ext = None # TODO
md = datasource.describe()['metadata']
self._start = md['start']
self._stop = md['stop']
self.ext = SimpleNamespace(
**self.db.fetch_external(self.start, self.stop))
@property
def _data_source(self):
return self.__data_source
@property
def start(self):
return self.db.prepare_hook('start', self._start)
@property
def uid(self):
return self._start['uid']
@property
def stop(self):
if self._stop is None:
self._stop = self.__data_source.describe()['metadata']['stop'] or {}
return self.db.prepare_hook('stop', self._stop)
def __eq__(self, other):
return self.start == other.start
@property
def descriptors(self):
descriptors = self._data_source._descriptors
return sorted([self.db.prepare_hook('descriptor', doc) for doc in descriptors],
key=lambda d: d['time'], reverse=True)
@property
def stream_names(self):
return list(self.__data_source)
# These methods mock part of the dict interface. It has been proposed that
# we might remove them for 1.0.
def __getitem__(self, k):
if k in ('start', 'descriptors', 'stop', 'ext'):
return getattr(self, k)
else:
raise KeyError(k)
def get(self, *args, **kwargs):
return getattr(self, *args, **kwargs)
def items(self):
for k in self.keys():
yield k, getattr(self, k)
def values(self):
for k in self.keys():
yield getattr(self, k)
def keys(self):
for k in ('start', 'descriptors', 'stop', 'ext'):
yield k
def __iter__(self):
return self.keys()
def xarray(self, stream_name='primary'):
return self._data_source[stream_name].read()
def xarray_dask(self, stream_name='primary'):
return self._data_source[stream_name].to_dask()
def table(self, stream_name='primary', fields=None, fill=False,
timezone=None, convert_times=True, localize_times=True):
'''
Load the data from one event stream as a table (``pandas.DataFrame``).
Parameters
----------
stream_name : str, optional
Get events from only "event stream" with this name.
Default is 'primary'
fields : List[str], optional
whitelist of field names of interest; if None, all are returned
Default is None
fill : bool or Iterable[str], optional
Which fields to fill. If `True`, fill all
possible fields.
Each event will have the data filled for the intersection
of it's external keys and the fields requested filled.
Default is False
handler_registry : dict, optional
mapping filestore specs (strings) to handlers (callable classes)
convert_times : bool, optional
Whether to convert times from float (seconds since 1970) to
numpy datetime64, using pandas. True by default.
timezone : str, optional
e.g., 'US/Eastern'; if None, use metadatastore configuration in
`self.mds.config['timezone']`
localize_times : bool, optional
If the times should be localized to the 'local' time zone. If
True (the default) the time stamps are converted to the localtime
zone (as configure in mds).
This is problematic for several reasons:
- apparent gaps or duplicate times around DST transitions
- incompatibility with every other time stamp (which is in UTC)
however, this makes the dataframe repr look nicer
This implies convert_times.
Defaults to True to preserve back-compatibility.
Returns
-------
table : pandas.DataFrame
Examples
--------
Load the 'primary' data stream from the most recent run into a table.
>>> h = db[-1]
>>> h.table()
This is equivalent. (The default stream_name is 'primary'.)
>>> h.table(stream_name='primary')
time intensity
0 2017-07-16 12:12:37.239582345 102
1 2017-07-16 12:12:39.958385283 103
Load the 'baseline' data stream.
>>> h.table(stream_name='baseline')
time temperature
0 2017-07-16 12:12:35.128515999 273
1 2017-07-16 12:12:40.128515999 274
'''
return self.db.get_table(self, fields=fields,
stream_name=stream_name, fill=fill,
timezone=timezone,
convert_times=convert_times,
localize_times=localize_times)
def documents(self, stream_name=ALL, fields=None, fill=False):
"""
Load all documents from the run.
This is a generator the yields ``(name, doc)``.
Parameters
----------
stream_name : string or ``ALL``, optional
Filter results by stream name (e.g., 'primary', 'baseline'). The
default, ``ALL``, combines results from all streams.
fill : bool, optional
Whether externally-stored data should be filled in. False by
default.
Yields
------
name, doc : (string, dict)
Examples
--------
Loop through the documents from a run.
>>> h = db[-1]
>>> for name, doc in h.documents():
... # do something
"""
gen = self.db.get_documents(self, fields=fields,
stream_name=stream_name,
fill=fill)
for payload in gen:
yield payload
def data(self, field, stream_name='primary', fill=True):
"""
Extract data for one field. This is convenient for loading image data.
Parameters
----------
field : string
such as 'image' or 'intensity'
stream_name : string, optional
Get data from a single "event stream." Default is 'primary'
fill : bool, optional
If the data should be filled.
Yields
------
data
"""
for event in self.events(stream_name=stream_name,
fields=[field],
fill=fill):
yield event['data'][field]
def stream(self, *args, **kwargs):
warnings.warn(
"The 'stream' method been renamed to 'documents'. The old name "
"will be removed in the future.")
for payload in self.documents(*args, **kwargs):
yield payload
def fields(self, stream_name=ALL):
"""
Return the names of the fields ('data keys') in this run.
Parameters
----------
stream_name : string or ``ALL``, optional
Filter results by stream name (e.g., 'primary', 'baseline'). The
default, ``ALL``, combines results from all streams.
Returns
-------
fields : set
Examples
--------
Load the most recent run and list its fields.
>>> h = db[-1]
>>> h.fields()
{'eiger_stats1_total', 'eiger_image'}
See Also
--------
:meth:`Header.devices`
"""
fields = set()
for descriptor in self.descriptors:
if stream_name is ALL or descriptor.get('name') == stream_name:
fields.update(descriptor['data_keys'])
return fields
def devices(self, stream_name=ALL):
"""
Return the names of the devices in this run.
Parameters
----------
stream_name : string or ``ALL``, optional
Filter results by stream name (e.g., 'primary', 'baseline'). The
default, ``ALL``, combines results from all streams.
Returns
-------
devices : set
Examples
--------
Load the most recent run and list its devices.
>>> h = db[-1]
>>> h.devices()
{'eiger'}
See Also
--------
:meth:`Header.fields`
"""
result = set()
for d in self.descriptors:
if stream_name is ALL or stream_name == d.get('name', 'primary'):
result.update(d['object_keys'])
return result
def config_data(self, device_name):
"""
Extract device configuration data from Event Descriptors.
This refers to the data obtained from ``device.read_configuration()``.
See example below. The result is structed as a [...deep breath...]
dictionary of lists of dictionaries because:
* The device might have been read in multiple event streams
('primary', 'baseline', etc.). Each stream name is a key in the
outer dictionary.
* The configuration is typically read once per event stream, but in
general may be read multiple times if the configuration is changed
mid-stream. Thus, a list is needed.
* Each device typically produces multiple configuration fields
('exposure_time', 'period', etc.). These are the keys of the inner
dictionary.
Parameters
----------
device_name : string
device name (originally obtained from the ``name`` attribute of
some readable Device)
Returns
-------
result : dict
mapping each stream name (such as 'primary' or 'baseline') to a
list of data dictionaries
Examples
--------
Get the device configuration recorded for the device named 'eiger'.
>>> h.config_data('eiger')
{'primary': [{'exposure_time': 1.0}]}
Assign the exposure time to a variable.
>>> exp_time = h.config_data('eiger')['primary'][0]['exposure_time']
How did we know that ``'eiger'`` was a valid argument? We can query for
the complete list of device names:
>>> h.devices()
{'eiger', 'cs700'}
"""
result = defaultdict(list)
for d in sorted(self.descriptors, key=lambda d: d['time']):
config = d['configuration'].get(device_name)
if config:
result[d.get('name')].append(config['data'])
return dict(result) # strip off defaultdict behavior
def events(self, stream_name='primary', fields=None, fill=False):
"""
Load all Event documents from one event stream.
This is a generator the yields Event documents.
Parameters
----------
stream_name : str, optional
Get events from only "event stream" with this name.
Default is 'primary'
fields : List[str], optional
whitelist of field names of interest; if None, all are returned
Default is None
fill : bool or Iterable[str], optional
Which fields to fill. If `True`, fill all
possible fields.
Each event will have the data filled for the intersection
of it's external keys and the fields requested filled.
Default is False
Yields
------
doc : dict
Examples
--------
Loop through the Event documents from a run. This is 'lazy', meaning
that only one Event at a time is loaded into memory.
>>> h = db[-1]
>>> for event in h.events():
... # do something
List the Events documents from a run, loading them all into memory at
once.
>>> events = list(h.events())
"""
ev_gen = self.db.get_events([self], stream_name=stream_name,
fields=fields, fill=fill)
for ev in ev_gen:
yield ev
def _repr_html_(self):
env = jinja2.Environment()
env.filters['human_time'] = _pretty_print_time
template = env.from_string(_HTML_TEMPLATE)
return template.render(document=self)
class Results:
"""
Iterable object encapsulating a results set of Headers
Parameters
----------
catalog : Catalog
search results
data_key : string or None
Special query parameter that filters results
"""
def __init__(self, broker, catalog, data_key):
self._broker = broker
self._catalog = catalog
self._data_key = data_key
def __iter__(self):
# TODO Catalog.walk() fails. We should probably support Catalog.items().
for uid, entry in self._catalog._entries.items():
header = Header(entry())
if self._data_key is None:
yield header
else:
# Only include this header in the result if `data_key` is found
# in one of its descriptors' data_keys.
for descriptor in header.descriptors:
if self._data_key in descriptor['data_keys']:
yield header
break
def _ensure_list(headers):
try:
headers.items()
except AttributeError:
return headers
else:
return [headers]
def _compile_re(fields=[]):
"""
Return a regular expression object based on a list of regular expressions.
Parameters
----------
fields : list, optional
List of regular expressions. If fields is empty returns a general RE.
Returns
-------
comp_re : regular expression object
"""
if len(fields) == 0:
fields = ['.*']
f = ["(?:" + regex + r")\Z" for regex in fields]
comp_re = re.compile('|'.join(f))
return comp_re
def _extract_extra_data(start, stop, d, fields, comp_re,
no_fields_filter):
def _project_header_data(source_data, source_ts,
selected_fields, comp_re):
"""Extract values from a header for merging into events
Parameters
----------
source : dict
selected_fields : set
comp_re : SRE_Pattern
Returns
-------
data_keys : dict
data : dict
timestamps : dict
"""
fields = (set(filter(comp_re.match, source_data)) - selected_fields)
data = {k: source_data[k] for k in fields}
timestamps = {k: source_ts[k] for k in fields}
return {}, data, timestamps
if fields:
event_fields = set(d['data_keys'])
selected_fields = set(filter(comp_re.match, event_fields))
discard_fields = event_fields - selected_fields
else:
discard_fields = set()
selected_fields = set(d['data_keys'])
objs_config = d.get('configuration', {}).values()
config_data = merge(obj_conf['data'] for obj_conf in objs_config)
config_ts = merge(obj_conf['timestamps']
for obj_conf in objs_config)
all_extra_data = {}
all_extra_ts = {}
all_extra_dk = {}
if not no_fields_filter:
for dt, ts in [(config_data, config_ts),
(start, defaultdict(lambda: start['time'])),
(stop, defaultdict(lambda: stop['time']))]:
# Look in the descriptor, then start, then stop.
l_dk, l_data, l_ts = _project_header_data(
dt, ts, selected_fields, comp_re)
all_extra_data.update(l_data)
all_extra_ts.update(l_ts)
selected_fields.update(l_data)
all_extra_dk.update(l_dk)
return (all_extra_dk, all_extra_data, all_extra_ts,
discard_fields)
_HTML_TEMPLATE = """
{% macro rtable(doc, cap) -%}
<table>
<caption> {{ cap }} </caption>
{%- for key, value in doc | dictsort recursive -%}
<tr>
<th> {{ key }} </th>
<td>
{%- if value.items -%}
<table>
{{ loop(value | dictsort) }}
</table>
{%- elif value is iterable and value is not string -%}
<table>
{%- set outer_loop = loop -%}
{%- for stuff in value -%}
{%- if stuff.items -%}
{{ outer_loop(stuff | dictsort) }}
{%- else -%}
<tr><td>{{ stuff }}</td></tr>
{%- endif -%}
{%- endfor -%}
</table>
{%- else -%}
{%- if key == 'time' -%}
{{ value | human_time }}
{%- else -%}
{{ value }}
{%- endif -%}
{%- endif -%}
</td>
</tr>
{%- endfor -%}
</table>
{%- endmacro %}
<table>
<tr>
<td>{{ rtable(document.start, 'Start') }}</td>
</tr
<tr>
<td>{{ rtable(document.stop, 'Stop') }}</td>
</tr>
<tr>
<td>
<table>
<caption>Descriptors</caption>
{%- for d in document.descriptors -%}
<tr>
<td> {{ rtable(d, d.get('name')) }} </td>
</tr>
{%- endfor -%}
</table>
</td>
</tr>
</table>
"""
def _pretty_print_time(timestamp):
# timestamp needs to be a float or fromtimestamp() will barf
timestamp = float(timestamp)
dt = datetime.fromtimestamp(timestamp).isoformat()
ago = humanize.naturaltime(time.time() - timestamp)
return '{ago} ({date})'.format(ago=ago, date=dt)
class InvalidConfig(Exception):
"""Raised when the configuration file is invalid."""
...
def from_config(config, auto_register=True, name=None):
"""
Build (some version of) a Broker instance from a v0 configuration dict.
This method accepts v1 config files.
This can return a ``v0.Broker``, ``v1.Broker``, or ``v2.Broker`` depending
on the contents of ``config``.
If config contains the key 'api_version', it should be set to a value 0, 1,
0, or 2. That setting will be respected until there is an error, in which
case a warning will be issued and we will fall back to v0. If no
'api_version' is explicitly set by the configuration file, version 1 will
be used.
"""
forced_version = config.get('api_version')
if forced_version == 0:
from . import v0
return v0.Broker.from_config(config, auto_register, name)
try:
catalog = _from_v0_config(config, auto_register, name)
except InvalidConfig:
raise
except Exception as exc:
warnings.warn(
f"Failed to load config. Falling back to v0."
f"Exception was: {exc}")
from . import v0
return v0.Broker.from_config(config, auto_register, name)
if forced_version == 2:
return catalog
elif forced_version is None or forced_version == 1:
broker = Broker(catalog)
broker._config = config # HACK to support Broker.get_config()
return broker
else:
raise ValueError(f"Cannot handle api_version {forced_version}")
def _from_v0_config(config, auto_register, name):
mds_module = config['metadatastore']['module']
if mds_module != 'databroker.headersource.mongo':
raise NotImplementedError(
f"Unable to handle metadatastore.module {mds_module!r}")
mds_class = config['metadatastore']['class']
if mds_class not in ('MDS', 'MDSRO'):
raise NotImplementedError(
f"Unable to handle metadatastore.class {mds_class!r}")
assets_module = config['assets']['module']
if assets_module != 'databroker.assets.mongo':
raise NotImplementedError(
f"Unable to handle assets.module {assets_module!r}")
assets_class = config['assets']['class']
if assets_class not in ('Registry', 'RegistryRO'):
raise NotImplementedError(
f"Unable to handle assets.class {assets_class!r}")
# Get the mongo databases.
metadatastore_db = _get_mongo_database(config['metadatastore']['config'])
asset_registry_db = _get_mongo_database(config['assets']['config'])
from ._drivers.mongo_normalized import BlueskyMongoCatalog
from .core import discover_handlers
# Update the handler registry.
handler_registry = {}
if auto_register:
handler_registry.update(discover_handlers())
# In v0, config-specified handlers are *added* to any default ones.
for spec, contents in config.get('handlers', {}).items():
dotted_object = '.'.join((contents['module'], contents['class']))
handler_registry[spec] = dotted_object
root_map = config.get('root_map')
transforms = config.get('transforms')
return BlueskyMongoCatalog(metadatastore_db, asset_registry_db,
handler_registry=handler_registry,
root_map=root_map,
name=name,
transforms=transforms)
_mongo_clients = {} # cache of pymongo.MongoClient instances
def _get_mongo_database(config):
"""
Return a MongoClient.database. Use a cache in order to reuse the
MongoClient.
"""
# Check that config contains either uri, or host/port, but not both.
if {'uri', 'host'} <= set(config) or {'uri', 'port'} <= set(config):
raise InvalidConfig(
"The config file must define either uri, or host/port, but not both.")
uri = config.get('uri')
database = config['database']
# If this statement is True then uri does not exist in the config.
# If the config has username and password, turn it into a uri.
# This is only here for backward compatibility.
if {'mongo_user', 'mongo_pwd', 'host', 'port'} <= set(config):
uri = (f"mongodb://{config['mongo_user']}:{config['mongo_pwd']}@"
"f{config['host']}:{config['port']}/")
if uri:
try:
client = _mongo_clients[uri]
except KeyError:
client = pymongo.MongoClient(uri)
_mongo_clients[uri] = client
else:
host = config.get('host')
port = config.get('port')
try:
client = _mongo_clients[(host, port)]
except KeyError:
client = pymongo.MongoClient(host, port)
_mongo_clients[(host, port)] = client
return client[database]
class _GetDocumentsRouter:
"""
This is used by Broker.get_documents.
It employs a pattern similar to event_model.DocumentRouter, but the methods
are generators instead of functions.
"""
def __init__(self, prepare_hook, merge_config_into_event, stream_name):
self.prepare_hook = prepare_hook
self.merge_config_into_event = merge_config_into_event
self.stream_name = stream_name
self._descriptors = set()
def __call__(self, name, doc):
# Special case when there is no Run Stop doc.
# In v0, we returned an empty dict here. We now think better of it.
if name == 'stop' and doc is None:
doc = {}
for new_name, new_doc in getattr(self, name)(doc):
yield new_name, self.prepare_hook(new_name, new_doc)
def descriptor(self, doc):
"Cache descriptor uid and pass it through if it is stream of interest."
if self.stream_name is ALL or doc.get('name', 'primary') == self.stream_name:
self._descriptors.add(doc['uid'])
yield 'descriptor', doc
def event_page(self, doc):
"Unpack into events and pass them to event method for more processing."
if doc['descriptor'] in self._descriptors:
for event in event_model.unpack_event_page(doc):
yield from self.event(event)
def event(self, doc):
"Apply merge_config_into_event."
if doc['descriptor'] in self._descriptors:
# Mutate event in place, merging in content from other documents
# and discarding fields excluded by the user.
self.merge_config_into_event(doc)
# If the mutation above leaves event['data'] empty, omit it.
if doc['data']:
yield 'event', doc
def datum_page(self, doc):
"Unpack into datum."
for datum in event_model.unpack_datum_page(doc):
yield 'datum', datum
def datum(self, doc):
yield 'datum', doc
def start(self, doc):
yield 'start', doc
def stop(self, doc):
yield 'stop', doc
def resource(self, doc):
yield 'resource', doc
``` |
{
"source": "jklynch/diffrascape",
"score": 3
} |
#### File: diffrascape/env/karm.py
```python
import gym
import numpy as np
class KArm(gym.Env):
"""
"""
k = 10
action_space = gym.spaces.Discrete(n=k)
observation_space = gym.spaces.Box(low=0.0, high=1.0, shape=(k, 1)), dtype=np.fload
def __init__(self, width, height):
self.observation_space = gym.spaces.Box(
low=-10.0, high=5.0, shape=(width, height, 1), dtype=np.float
)
self.width = width
self.height = height
self.grid_world = None
self.start_point = None
self.current_point = None
self.action_table = np.array([[-1, 0], [1, 0], [0, -1], [0, 1]])
def reset(self):
self.grid_world = np.random.choice(
a=[-5.0, 0.0, 5.0], size=(self.width, self.height, 1),
)
self.start_point = (self.width // 2, self.height // 2)
self.grid_world[self.start_point[0], self.start_point[1], 0] = -10.0
self.current_point = self.start_point
return self.grid_world
def step(self, action):
"""
Parameters
----------
action: int
0 - current_point + (-1, 0)
1 - current_point + ( 1, 0)
2 - current_point + ( 0, -1)
3 - current_point + ( 0, 1)
"""
assert 0 <= action <= 3
self.current_point += self.action_table[action]
if (
0 < self.current_point[0] < self.width
and 0 < self.current_point[1] < self.height
):
reward = (
-1.0 + self.grid_world[self.current_point[0], self.current_point[1], 0]
)
done = False
self.grid_world[self.current_point[0], self.current_point[1], 0] = -10.0
else:
reward = 1.0
done = True
return self.grid_world, reward, done, {}
def get_current_grid_value(self, w, h):
return self.grid_world[w, h]
```
#### File: diffrascape/tests/test_bad_seeds.py
```python
from diffrascape.env import BadSeeds
def test_construct():
bad_seeds = BadSeeds()
bad_seeds_states = bad_seeds.states()
print(f"### states: {bad_seeds_states}")
assert bad_seeds_states["shape"][0] == 6
bad_seeds_actions = bad_seeds.actions()
print(f"### actions: {bad_seeds_actions}")
assert bad_seeds_actions["num_values"] == 3
``` |
{
"source": "jklynch/iqsq",
"score": 3
} |
#### File: iqsq/iqsq/__init__.py
```python
import pandas as pd
from scipy.spatial.distance import cdist
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
def read_aff_elements(path, *args, **kwargs):
return pd.read_csv(filepath_or_buffer=path, *args, **kwargs)
def read_aff_parameters(path, *args, **kwargs):
return pd.read_csv(filepath_or_buffer=path, *args, **kwargs)
def read_atom_positions(atom_positions_path):
"""Load data from .xyz file.
no header
"""
atom_positions_df = pd.read_table(
filepath_or_buffer=atom_positions_path,
header=None,
names=["x", "y", "z"],
index_col=0,
delim_whitespace=True,
)
# atom_positions_df.columns = ["x", "y", "z"]
return atom_positions_df
def build_atom_distance_matrix(atom_positions_df):
"""Construct a distance matrix from atom positions.
Parameters
----------
atom_positions_df: pandas.DataFrame
a 3xN dataframe with index x,y,z and one column per atom
for example:
x y z
Na 0 0 1
Cl 0 1 0
Na 1 0 0
Returns
-------
pandas.DataFrame NxN distance matrix with atom names on index and columns
for example:
Na Cl Na
Na 0.0 1.414 1.414
Cl 1.414 0.0 1.414
Na 1.414 1.414 0.0
"""
atom_distance_matrix = cdist(
atom_positions_df.to_numpy(), atom_positions_df.to_numpy()
)
atom_distance_matrix_df = pd.DataFrame(
data=atom_distance_matrix,
columns=atom_positions_df.index,
index=atom_positions_df.index,
)
# set index name to None, otherwise it is "0" and that looks
# odd when the dataframe is printed
atom_distance_matrix_df.index.name = None
return atom_distance_matrix_df
```
#### File: iqsq/tests/test_build_atom_distance_matrix.py
```python
from pathlib import Path
import numpy as np
from iqsq import build_atom_distance_matrix, read_atom_positions
def test_build_atom_distance_matrix(tmp_path):
atom_positions_path = tmp_path / Path("atom_positions.txt")
with open(atom_positions_path, "wt") as f:
f.write(
"""\
Na 0 0 1
Cl 0 1 0
"""
)
atom_positions_df = read_atom_positions(atom_positions_path=atom_positions_path)
atom_distance_matrix_df = build_atom_distance_matrix(
atom_positions_df=atom_positions_df
)
assert atom_distance_matrix_df.shape == (2, 2)
assert atom_distance_matrix_df.index[0] == "Na"
assert atom_distance_matrix_df.index[1] == "Cl"
assert atom_distance_matrix_df.columns[0] == "Na"
assert atom_distance_matrix_df.columns[1] == "Cl"
assert atom_distance_matrix_df.iloc[0, 0] == 0.0
assert atom_distance_matrix_df.iloc[1, 0] == np.sqrt(2)
assert atom_distance_matrix_df.iloc[0, 1] == np.sqrt(2)
assert atom_distance_matrix_df.iloc[1, 1] == 0.0
```
#### File: iqsq/tests/test_read_aff.py
```python
from pathlib import Path
from iqsq import read_aff_elements
def test_read_aff_elements(tmp_path):
test_aff_elements_path = tmp_path / Path("aff_elements.txt")
with open(test_aff_elements_path, "wt") as f:
f.write(
"""\
H
H1-
He
Li
Li1+
"""
)
aff_elements_df = read_aff_elements(path=test_aff_elements_path, header=None)
assert aff_elements_df.shape == (5, 1)
assert aff_elements_df.iloc[0, 0] == "H"
assert aff_elements_df.iloc[-1, 0] == "Li1+"
def test_read_aff_parameters(tmp_path):
test_aff_parameters_path = tmp_path / Path("aff_parameters.txt")
with open(test_aff_parameters_path, "wt") as f:
f.write(
"""\
0.489918 20.659300 0.262003 7.740390 0.196767 49.551900 0.049879 2.201590 0.001305
0.897661 53.136800 0.565616 15.187000 0.415815 186.576000 0.116973 3.567090 0.002389
0.873400 9.103700 0.630900 3.356800 0.311200 22.927600 0.178000 0.982100 0.006400
1.128200 3.954600 0.750800 1.052400 0.617500 85.390500 0.465300 168.261000 0.037700
0.696800 4.623700 0.788800 1.955700 0.341400 0.631600 0.156300 10.095300 0.016700
"""
)
aff_parameters_df = read_aff_elements(
path=test_aff_parameters_path, header=None, delim_whitespace=True
)
assert aff_parameters_df.shape == (5, 9)
assert aff_parameters_df.iloc[0, 0] == 0.489918
assert aff_parameters_df.iloc[-1, -1] == 0.016700
``` |
{
"source": "jklynch/lix-workers",
"score": 3
} |
#### File: lix/export/util.py
```python
from collections.abc import Mapping
import json
def _safe_attrs_assignment(h5_group, a_mapping):
a_mapping = _clean_dict(a_mapping)
for key, value in a_mapping.items():
# Special-case None, which fails too late to catch below.
if value is None:
value = "None"
# Try storing natively.
try:
h5_group.attrs[key] = value
# Fallback: Save the repr, which in many cases can be used to
# recreate the object.
except TypeError:
h5_group.attrs[key] = json.dumps(value)
def _clean_dict(a_mapping):
a_mapping = dict(a_mapping)
for k, v in list(a_mapping.items()):
# Store dictionaries as JSON strings.
if isinstance(v, Mapping):
a_mapping[k] = _clean_dict(a_mapping[k])
continue
try:
json.dumps(v)
except TypeError:
a_mapping[k] = str(v)
return a_mapping
``` |
{
"source": "jklynch/mr-fitty",
"score": 2
} |
#### File: mr-fitty/mrfitty/combination_fit.py
```python
import collections
from concurrent.futures import ProcessPoolExecutor, as_completed
import itertools
import logging
from operator import attrgetter
import os.path
import time
import traceback
import warnings
import matplotlib
matplotlib.use("pdf", force=True)
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import pandas as pd
import scipy.cluster.hierarchy as hc
from scipy.spatial.distance import pdist
from sklearn.utils import shuffle
from mrfitty.base import (
InterpolatedSpectrumSet,
InterpolatedReferenceSpectraSet,
SpectrumFit,
)
from mrfitty.plot import (
add_date_time_footer,
plot_fit,
plot_reference_tree,
# plot_prediction_errors,
plot_stacked_fit,
)
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
class FitFailed(Exception):
pass
class CombinationFitResults:
"""CombinationFitResults"""
def __init__(self, spectrum, best_fit, component_count_fit_table):
self.spectrum = spectrum
self.best_fit = best_fit
self.component_count_fit_table = component_count_fit_table
class AllCombinationFitTask:
def __init__(
self,
ls,
reference_spectrum_list,
unknown_spectrum_list,
energy_range_builder,
best_fits_plot_limit,
component_count_range=range(4),
):
self.ls = ls
self.reference_spectrum_list = reference_spectrum_list
self.unknown_spectrum_list = unknown_spectrum_list
self.energy_range_builder = energy_range_builder
self.best_fits_plot_limit = best_fits_plot_limit
self.component_count_range = component_count_range
self.fit_table = None
def fit_all(self, plots_pdf_dp):
"""
using self.fit_table here seems to be causing this intermittent error:
concurrent.futures.process._RemoteTraceback:
Traceback (most recent call last):
File "/home/jlynch/miniconda3/envs/mrf/lib/python3.7/multiprocessing/queues.py", line 236, in _feed
obj = _ForkingPickler.dumps(obj)
File "/home/jlynch/miniconda3/envs/mrf/lib/python3.7/multiprocessing/reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
RuntimeError: OrderedDict mutated during iteration
Parameters
----------
plots_pdf_dp
Returns
-------
"""
log = logging.getLogger(name="fit_all")
os.makedirs(plots_pdf_dp, exist_ok=True)
futures = dict()
failed_fits = list()
_fit_table = collections.OrderedDict()
with ProcessPoolExecutor(max_workers=4) as executor:
for unknown_spectrum in sorted(
self.unknown_spectrum_list, key=lambda s: s.file_name
):
future = executor.submit(
self.fit_and_plot_exc, unknown_spectrum, plots_pdf_dp
)
futures[future] = unknown_spectrum
for future in as_completed(futures):
unknown_spectrum = futures[future]
log.info("completed %s fit", unknown_spectrum.file_name)
try:
fit_results = future.result()
_fit_table[unknown_spectrum] = fit_results
except (Exception, BaseException):
log.exception("trouble in paradise")
traceback.print_exc()
failed_fits.append(unknown_spectrum)
if len(failed_fits) > 0:
print("failed fits:")
print("\n".join(failed_fits))
self.fit_table = _fit_table
return self.fit_table
def fit_and_plot_exc(self, unknown_spectrum, plots_pdf_dp):
log = logging.getLogger(name=f"fit_and_plot_exc:{unknown_spectrum.file_name}")
try:
return self.fit_and_plot(
unknown_spectrum=unknown_spectrum, plots_pdf_dp=plots_pdf_dp
)
except (BaseException, Exception):
log.exception("trouble in fit_and_plot_exc")
traceback.print_exc()
raise
def fit_and_plot(self, unknown_spectrum, plots_pdf_dp):
log = logging.getLogger(name=f"fit_and_plot:{unknown_spectrum.file_name}")
log.debug("fitting %s", unknown_spectrum.file_name)
t0 = time.time()
best_fit, fit_table = self.fit(unknown_spectrum)
t1 = time.time()
log.info("fit %s in %5.3fs", unknown_spectrum.file_name, t1 - t0)
fit_results = CombinationFitResults(
spectrum=unknown_spectrum,
best_fit=best_fit,
component_count_fit_table=fit_table,
)
file_base_name, _ = os.path.splitext(
os.path.basename(unknown_spectrum.file_name)
)
plots_pdf_fp = os.path.join(plots_pdf_dp, file_base_name + "_fit.pdf")
with PdfPages(plots_pdf_fp) as plot_file, warnings.catch_warnings():
warnings.simplefilter("ignore")
log.info("writing plots file {}".format(plots_pdf_dp))
# create plot
log.info("plotting fit for %s", unknown_spectrum.file_name)
f_list = self.plot_top_fits(
spectrum=unknown_spectrum, fit_results=fit_results
)
for f in f_list:
plot_file.savefig(f)
plt.close(f)
f = plot_fit(
spectrum=unknown_spectrum,
any_given_fit=fit_results.best_fit,
title="Best Fit",
fit_quality_labels=self.get_fit_quality_score_text(
any_given_fit=fit_results.best_fit
),
)
plot_file.savefig(f)
plt.close(f)
f = plot_stacked_fit(
spectrum=unknown_spectrum,
any_given_fit=fit_results.best_fit,
title="Best Fit",
fit_quality_labels=self.get_fit_quality_score_text(
any_given_fit=fit_results.best_fit
),
)
plot_file.savefig(f)
plt.close(f)
clustering_parameters = {
"linkage_method": "complete",
"pdist_metric": "correlation",
}
# use these for reference tree plots
interpolation_energy_range, _ = self.energy_range_builder.build_range(
unknown_spectrum=unknown_spectrum,
reference_spectrum_seq=self.reference_spectrum_list,
)
interpolated_reference_set_df = (
InterpolatedSpectrumSet.get_interpolated_spectrum_set_df(
energy_range=interpolation_energy_range,
spectrum_set=set(self.reference_spectrum_list),
)
)
reference_spectra_linkage, cutoff_distance = self.cluster_reference_spectra(
interpolated_reference_set_df, **clustering_parameters
)
h = plot_reference_tree(
linkage_distance_variable_by_sample=reference_spectra_linkage,
reference_df=interpolated_reference_set_df,
cutoff_distance=cutoff_distance,
title="Best Fit\n{}".format(unknown_spectrum.file_name),
reference_spectra_names=[
r.file_name for r in fit_results.best_fit.reference_spectra_seq
],
**clustering_parameters,
)
plot_file.savefig(h)
plt.close(h)
ordinal_list = (
"1st",
"2nd",
"3rd",
"4th",
"5th",
"6th",
"7th",
"8th",
"9th",
"10th",
)
# plot the best n-component fit
for n in sorted(fit_table.keys()):
log.info(
"plotting %d-component fit for %s", n, unknown_spectrum.file_name
)
n_component_fit_results = fit_table[n]
# here only plot the best fit for each component count
for i, fit in enumerate(n_component_fit_results):
if i < self.best_fits_plot_limit:
title = "{} Best {}-Component Fit".format(ordinal_list[i], n)
f = plot_fit(
spectrum=unknown_spectrum,
any_given_fit=fit,
title=title,
fit_quality_labels=self.get_fit_quality_score_text(
any_given_fit=fit
),
)
plot_file.savefig(f)
plt.close(f)
# if hasattr(fit, "prediction_errors"):
# g = plot_prediction_errors(
# spectrum=unknown_spectrum,
# fit=fit,
# title=title,
# )
# plot_file.savefig(g)
# plt.close(g)
h = plot_reference_tree(
linkage_distance_variable_by_sample=reference_spectra_linkage,
reference_df=interpolated_reference_set_df,
cutoff_distance=cutoff_distance,
title=title + "\n" + unknown_spectrum.file_name,
reference_spectra_names=[
r.file_name for r in fit.reference_spectra_seq
],
**clustering_parameters,
)
plot_file.savefig(h)
plt.close(h)
else:
break
return fit_results
# tried to speed up mrfitty by distributing the work in this function
# there was no speedup
# apparently this is not where a lot of time is spent
def fit(self, unknown_spectrum):
log = logging.getLogger(name=unknown_spectrum.file_name)
log.info("fitting unknown spectrum %s", unknown_spectrum.file_name)
interpolated_reference_spectra = InterpolatedReferenceSpectraSet(
unknown_spectrum=unknown_spectrum,
reference_set=self.reference_spectrum_list,
)
# fit all combinations of reference_spectra
# all_counts_spectrum_fit_table looks like this:
# { 1: [...list of 1-component fits sorted by NSS...],
# 2: [...list of 2-component fits sorted by NSS...],
# ...
# }
all_counts_spectrum_fit_table = collections.defaultdict(list)
reference_combination_grouper = grouper(
self.reference_combination_iter(self.component_count_range), n=1000
)
for reference_combination_group in reference_combination_grouper:
log.debug(
"fitting group of %d reference combinations",
len(reference_combination_group),
)
fits, failed_fits = self.do_some_fits(
unknown_spectrum=unknown_spectrum,
interpolated_reference_spectra=interpolated_reference_spectra,
reference_spectra_combinations=reference_combination_group,
)
log.debug("%d successful fits", len(fits))
# append new fits to the appropriate lists
# but do not sort yet
for fit in fits:
reference_count = len(fit.reference_spectra_seq)
spectrum_fit_list = all_counts_spectrum_fit_table[reference_count]
spectrum_fit_list.append(fit)
# now sort and trim each list to the best 100 fits
for (
reference_count,
spectrum_fit_list,
) in all_counts_spectrum_fit_table.items():
log.debug(
"sorting %d-component fit list with %d fits",
reference_count,
len(spectrum_fit_list),
)
spectrum_fit_list.sort(key=attrgetter("nss"))
# when there are many reference spectra the list of fits can get extremely long
# and eat up all of memory
# so keep only the top 100 fits for each component count
if len(spectrum_fit_list) > 100:
log.debug(
"trimming %d-component fit list with %d fits",
reference_count,
len(spectrum_fit_list),
)
all_counts_spectrum_fit_table[reference_count] = spectrum_fit_list[
:100
]
log.debug("%d failed fits", len(failed_fits))
best_fit = self.choose_best_component_count(all_counts_spectrum_fit_table)
return best_fit, all_counts_spectrum_fit_table
def do_some_fits(
self,
unknown_spectrum,
interpolated_reference_spectra,
reference_spectra_combinations,
):
log = logging.getLogger(name=unknown_spectrum.file_name)
fits = []
failed_fits = []
log.debug(
"do_some_fits for %d reference combinations",
len(reference_spectra_combinations),
)
for reference_spectra_combination in reference_spectra_combinations:
log.debug("fitting to reference_spectra %s", reference_spectra_combination)
if reference_spectra_combination is None:
pass
else:
try:
spectrum_fit = self.fit_references_to_unknown(
interpolated_reference_spectra=interpolated_reference_spectra,
reference_spectra_subset=reference_spectra_combination,
)
fits.append(spectrum_fit)
except FitFailed:
# this is a common occurrence when using ordinary linear regression
# it is not an 'error' just something that happens and needs to be handled
msg = 'failed to fit unknown "{}" to references\n\t{}'.format(
unknown_spectrum.file_name,
"\n\t".join(
[r.file_name for r in reference_spectra_combination]
),
)
failed_fits.append(msg)
log.debug("returning %d fits, %d failed fits", len(fits), len(failed_fits))
return fits, failed_fits
def reference_combination_iter(self, component_count_range):
for component_count in component_count_range:
for reference_spectra_combination in itertools.combinations(
self.reference_spectrum_list, component_count
):
yield reference_spectra_combination
def fit_references_to_unknown(
self, interpolated_reference_spectra, reference_spectra_subset
):
interpolated_data = (
interpolated_reference_spectra.get_reference_subset_and_unknown_df(
reference_list=reference_spectra_subset,
energy_range_builder=self.energy_range_builder,
)
)
interpolated_reference_spectra_subset_df = interpolated_data[
"reference_subset_df"
]
unknown_spectrum_df = interpolated_data["unknown_subset_df"]
lm = self.ls()
lm.fit(
interpolated_reference_spectra_subset_df.values,
unknown_spectrum_df.norm.values,
)
if any(lm.coef_ < 0.0):
msg = "negative coefficients while fitting:\n{}".format(lm.coef_)
raise FitFailed(msg)
else:
reference_spectra_coef_x = lm.coef_
spectrum_fit = SpectrumFit(
interpolant_incident_energy=interpolated_reference_spectra_subset_df.index,
reference_spectra_A_df=interpolated_reference_spectra_subset_df,
unknown_spectrum=interpolated_data["unknown_subset_spectrum"],
reference_spectra_seq=reference_spectra_subset,
reference_spectra_coef_x=reference_spectra_coef_x,
)
return spectrum_fit
def choose_best_component_count(self, all_counts_spectrum_fit_table):
"""
Choose the best fit from the best fits for each component count.
:param all_counts_spectrum_fit_table:
dictionary with component count keys and values list of spectrum fits in sorted order
:return: instance of SpectrumFit
"""
log = logging.getLogger(name=self.__class__.__name__)
best_fit = None
previous_nss = 1.0
for component_count in sorted(all_counts_spectrum_fit_table.keys()):
best_fit_for_component_count = all_counts_spectrum_fit_table[
component_count
][0]
improvement = (
previous_nss - best_fit_for_component_count.nss
) / previous_nss
log.debug(
"improvement: {:5.3f} for {}".format(
improvement, best_fit_for_component_count
)
)
if improvement < 0.10:
break
else:
best_fit = best_fit_for_component_count
previous_nss = best_fit.nss
log.debug("best fit: {}".format(best_fit))
return best_fit
def write_table(self, table_file_path):
"""
sample name, residual, reference 1, fraction 1, reference 2, fraction 2, ...
:param table_file_path:
:return:
"""
# log = logging.getLogger(name=self.__class__.__name__)
table_file_dir_path, _ = os.path.split(table_file_path)
os.makedirs(table_file_dir_path, exist_ok=True)
with open(table_file_path, "wt") as table_file:
table_file.write(
"spectrum\tNSS\tresidual percent\treference 1\tpercent 1\treference 2\tpercent 2\treference 3\tpercent 3\n" # noqa
)
for spectrum, fit_results in self.fit_table.items():
table_file.write(spectrum.file_name)
table_file.write("\t")
table_file.write("{:8.5f}\t".format(fit_results.best_fit.nss))
table_file.write(
"{:5.3f}".format(fit_results.best_fit.residuals_contribution)
)
for (
ref_name,
ref_pct,
) in fit_results.best_fit.reference_contribution_percent_sr.sort_values(
ascending=False
).items():
table_file.write("\t")
table_file.write(ref_name)
table_file.write("\t{:5.3f}".format(ref_pct))
table_file.write("\n")
def plot_top_fits(self, spectrum, fit_results):
# log = logging.getLogger(name=self.__class__.__name__)
figure_list = []
for i, component_count in enumerate(
fit_results.component_count_fit_table.keys()
):
f, ax = plt.subplots()
f.suptitle(spectrum.file_name + "\n" + "Fit Path")
sorted_fits = fit_results.component_count_fit_table[component_count][:10]
ax.scatter(
y=range(len(sorted_fits)),
x=[spectrum_fit.nss for spectrum_fit in sorted_fits],
)
ax.set_title("{} component(s)".format(component_count))
ax.set_xlabel("NSS")
ax.set_ylabel("order")
add_date_time_footer(ax)
f.tight_layout()
figure_list.append(f)
return figure_list
def get_fit_quality_score_text(self, any_given_fit):
return ["MSE: {:8.5f}".format(any_given_fit.nss)]
@staticmethod
def permute_row_elements(df):
for i in range(df.shape[0]):
df.values[i, :] = shuffle(df.values[i, :])
return df
def cluster_reference_spectra(
self, reference_df, pdist_metric="correlation", linkage_method="complete"
):
# log = logging.getLogger(name=self.__class__.__name__)
distance_for_sample_pairs = pdist(
X=np.transpose(reference_df.values), metric=pdist_metric
)
# plt.figure()
# plt.title(title)
# plt.hist(distance_for_sample_pairs)
# plt.xlabel('{} distance'.format(pdist_metric))
# plt.ylabel('{} pairs'.format(variable_by_sample_df.shape))
# plt.show()
resample_count = 1000
expected_distance_list = []
for i in range(resample_count):
# permute the elements of each row of variable_by_sample_df
p_variable_by_sample_df = self.permute_row_elements(reference_df.copy())
p_distance_for_sample_pairs = pdist(
X=np.transpose(p_variable_by_sample_df.values), metric=pdist_metric
)
p_linkage_distance_variable_by_sample = hc.linkage(
y=p_distance_for_sample_pairs, method=linkage_method
)
p_dendrogram = hc.dendrogram(
Z=p_linkage_distance_variable_by_sample, no_plot=True
)
expected_distance_list.extend(
[d for (_, _, d, _) in p_dendrogram["dcoord"]]
)
p = 95.0
# alpha = 1.0 - p / 100.0
cutoff_distance = np.percentile(expected_distance_list, q=p)
# print('cutoff distance is {}'.format(cutoff_distance))
# plt.figure()
# plt.hist(expected_distance_list)
# plt.title('dendrogram distance null distribution')
# plt.show()
linkage_distance_variable_by_sample = hc.linkage(
y=distance_for_sample_pairs, method=linkage_method
)
return linkage_distance_variable_by_sample, cutoff_distance
def write_best_fit_arrays(self, best_fit_dir_path):
log = logging.getLogger(name=self.__class__.__name__)
for spectrum, fit_results in self.fit_table.items():
file_base_name, file_name_ext = os.path.splitext(spectrum.file_name)
fit_file_path = os.path.abspath(os.path.join(best_fit_dir_path, file_base_name + "_fit.txt"))
log.info("writing best fit to {}".format(fit_file_path))
fit_df = pd.DataFrame(
{
"energy": fit_results.best_fit.interpolant_incident_energy,
"spectrum": fit_results.best_fit.unknown_spectrum_b,
"fit": fit_results.best_fit.fit_spectrum_b,
"residual": fit_results.best_fit.residuals,
}
)
fit_df.to_csv(fit_file_path, sep="\t", float_format="%8.4f", index=False)
```
#### File: mr-fitty/mrfitty/database.py
```python
from contextlib import contextmanager
from hashlib import sha256
from sqlalchemy import create_engine
from sqlalchemy import Column, Float, Integer, String
from sqlalchemy import ForeignKey, Table
from sqlalchemy import UniqueConstraint
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
FitBase = declarative_base()
class DBFile(FitBase):
__tablename__ = "file"
id = Column(Integer, primary_key=True)
path = Column(String, unique=True)
digest = Column(String, unique=True)
fit_reference_spectrum = Table(
"fit_reference_spectrum",
FitBase.metadata,
Column("fit_id", ForeignKey("fit.id"), primary_key=True),
Column(
"reference_spectrum_id", ForeignKey("reference_spectrum.id"), primary_key=True
),
)
class DBReferenceSpectrum(FitBase):
__tablename__ = "reference_spectrum"
__table_args__ = (
UniqueConstraint(
"spectrum_file_id", "start_energy", "end_energy", name="ref_1"
),
)
id = Column(Integer, primary_key=True)
start_energy = Column(Float)
end_energy = Column(Float)
spectrum_file_id = Column(Integer, ForeignKey("file.id"))
spectrum_file = relationship("DBFile")
fits = relationship(
"DBFit",
secondary="fit_reference_spectrum",
back_populates="reference_spectra",
lazy="dynamic",
)
class DBUnknownSpectrum(FitBase):
__tablename__ = "unknown_spectrum"
__table_args__ = (
UniqueConstraint(
"spectrum_file_id", "start_energy", "end_energy", name="unk_1"
),
)
id = Column(Integer, primary_key=True)
start_energy = Column(Float)
end_energy = Column(Float)
spectrum_file_id = Column(Integer, ForeignKey("file.id"))
spectrum_file = relationship("DBFile")
fits = relationship("DBFit", back_populates="unknown_spectrum", lazy="dynamic")
class DBFit(FitBase):
__tablename__ = "fit"
id = Column(Integer, primary_key=True)
start_energy = Column(Float)
end_energy = Column(Float)
sse = Column(Float)
unknown_spectrum_id = Column(Integer, ForeignKey("unknown_spectrum.id"))
unknown_spectrum = relationship("DBUnknownSpectrum", back_populates="fits")
reference_spectra = relationship(
"DBReferenceSpectrum", secondary="fit_reference_spectrum", back_populates="fits"
)
@contextmanager
def session_scope(Session_):
"""Provide a transactional scope around a series of operations."""
session = Session_()
try:
yield session
session.commit()
except (BaseException, Exception):
session.rollback()
raise
finally:
session.close()
class FitDatabase:
def __init__(self, url, **kwargs):
self.url = url
self.engine = create_engine(self.url, **kwargs)
self.Session = sessionmaker(bind=self.engine)
def create_tables(self):
FitBase.metadata.create_all(self.engine)
def get_session_ctx_mgr(self):
return session_scope(self.Session)
@staticmethod
def get_file_digest(path):
h = sha256()
with open(path, mode="rb") as b:
h.update(b.read())
digest = h.hexdigest()
return digest
def insert_file(self, session, path):
session.add(DBFile(path=path, digest=self.get_file_digest(path=path)))
def insert_reference_spectrum(self, session, reference_spectrum):
dbfile = (
session.query(DBFile)
.filter(DBFile.path == reference_spectrum.file_path)
.one_or_none()
)
if dbfile is None:
self.insert_file(session, path=reference_spectrum.file_path)
dbfile = (
session.query(DBFile)
.filter(DBFile.path == reference_spectrum.file_path)
.one()
)
else:
pass
dbspectrum = DBReferenceSpectrum(
spectrum_file=dbfile,
start_energy=reference_spectrum.data_df.index[0],
end_energy=reference_spectrum.data_df.index[-1],
)
session.add(dbspectrum)
return dbspectrum
@staticmethod
def query_reference_spectra(session, path):
return (
session.query(DBReferenceSpectrum)
.join(DBFile)
.filter(DBFile.path == path)
.one()
)
def insert_unknown_spectrum(self, session, unknown_spectrum):
dbfile = (
session.query(DBFile)
.filter(DBFile.path == unknown_spectrum.file_path)
.one_or_none()
)
if dbfile is None:
self.insert_file(session, path=unknown_spectrum.file_path)
dbfile = (
session.query(DBFile)
.filter(DBFile.path == unknown_spectrum.file_path)
.one()
)
else:
pass
dbspectrum = DBUnknownSpectrum(
spectrum_file=dbfile,
start_energy=unknown_spectrum.data_df.index[0],
end_energy=unknown_spectrum.data_df.index[-1],
)
session.add(dbspectrum)
return dbspectrum
@staticmethod
def query_unknown_spectra(session, path):
"""
Unknown spectrum database records are unique by path.
The start and end energies for these records are not necessarily the same as for fits.
Parameters
----------
session database session
path path of unknown spectrum file
Returns
-------
One instance of DBUnknownSpectrum
"""
return (
session.query(DBUnknownSpectrum)
.join(DBFile)
.filter(DBFile.path == path)
.one()
)
def insert_fit(self, session, fit):
"""
The associated reference and unknown spectra must be already in the database.
Parameters
----------
session database session
fit instance of SpectrumFit
"""
dbfit = DBFit(
start_energy=fit.get_start_energy(),
end_energy=fit.get_end_energy(),
sse=fit.nss,
)
db_unknown_spectrum = self.query_unknown_spectra(
session=session, path=fit.unknown_spectrum.file_path
)
dbfit.unknown_spectrum = db_unknown_spectrum
for r in fit.reference_spectra_seq:
db_ref = self.query_reference_spectra(session=session, path=r.file_path)
dbfit.reference_spectra.append(db_ref)
session.add(dbfit)
def query_fits(self, session, unknown_spectrum):
db_unknown_spectrum = self.query_unknown_spectra(
session=session, path=unknown_spectrum.file_path
)
return (
session.query(DBFit)
.filter(DBFit.unknown_spectrum_id == db_unknown_spectrum.id)
.order_by(DBFit.sse)
.limit(10)
.all()
)
```
#### File: mr-fitty/mrfitty/fit_task_builder.py
```python
import configparser
from glob import glob
import logging
import os
import re
from sklearn.linear_model import LinearRegression
from mrfitty.base import (
AdaptiveEnergyRangeBuilder,
FixedEnergyRangeBuilder,
PRM,
ReferenceSpectrum,
Spectrum,
)
from mrfitty.prediction_error_fit import PredictionErrorFitTask
from mrfitty.combination_fit import AllCombinationFitTask
from mrfitty.linear_model import NonNegativeLinearRegression
class ConfigurationFileError(ValueError):
pass
def get_config_parser():
cp = configparser.ConfigParser(allow_no_value=True, delimiters=("=",))
cp.optionxform = lambda option: option
return cp
def build_reference_spectrum_list_from_prm_file(prm_file_path):
"""
Read a PRM file to create a list of ReferenceSpectrum
instances, maximum component count, and minimum component
count from a PRM file.
:param prm_file_path:
:return:
list of ReferenceSpectrum instances
maximum component count
minimum component count
"""
log = logging.getLogger(name=__file__)
reference_spectrum_list = []
log.info("reading PRM file {}".format(prm_file_path))
prm = PRM.read_prm(prm_file_path)
# read reference files
for i, fp in enumerate(prm.reference_file_path_list):
log.info("reading reference file {}: {}".format(i, fp))
reference_spectrum = ReferenceSpectrum.read_file(fp)
reference_spectrum_list.append(reference_spectrum)
return reference_spectrum_list, prm.nb_component_max, prm.nb_component_min
def _get_required_config_value(config, section, option):
if not config.has_option(section=section, option=option):
raise ConfigurationFileError(
'section [{}] missing required option "{}"'.format(section, option)
)
else:
return config.get(section=section, option=option)
def _get_required_config_value_list(config, section):
if not config.has_section(section=section):
raise ConfigurationFileError("required section [{}] missing".format(section))
else:
return config.items(section=section)
def build_reference_spectrum_list_from_config_prm_section(config):
log = logging.getLogger(name=__name__)
max_component_count = int(_get_required_config_value(config, "prm", "NBCompoMax"))
min_component_count = int(_get_required_config_value(config, "prm", "NBCompoMin"))
reference_spectrum_list = [
ReferenceSpectrum.read_file(file_path_or_buffer=option_name)
for option_name, option_value in _get_required_config_value_list(config, "prm")
if len(option_value) == 0
]
log.debug("NBCompoMax: %d", max_component_count)
log.debug("NBCompoMin: %d", min_component_count)
log.debug("Reference list length:\n %d", len(reference_spectrum_list))
if min_component_count <= 0:
raise ConfigurationFileError(
'NBCompoMin must be greater than zero, not "{}"'.format(min_component_count)
)
elif max_component_count <= 0:
raise ConfigurationFileError(
'NBCompoMax must be greater than zero, not "{}"'.format(max_component_count)
)
elif min_component_count > max_component_count:
raise ConfigurationFileError(
'NBCompoMin "{}" is greater than NBCompoMax "{}"'.format(
min_component_count, max_component_count
)
)
else:
return max_component_count, min_component_count, reference_spectrum_list
def build_reference_spectrum_list_from_config_file(config):
"""
Read reference spectrum file glob(s) from configuration file to create
and return a list of ReferenceSpectrum instances.
:param config: configparser instance
:return: list of ReferenceSpectrum instances
"""
log = logging.getLogger(name=__name__)
references = config.items("references")
log.debug(references)
reference_spectrum_list, _ = ReferenceSpectrum.read_all(
[
os.path.expanduser(reference_file_glob)
for reference_file_glob, _ in references
]
)
if len(reference_spectrum_list) == 0:
raise ConfigurationFileError(
'no reference spectrum files were found using globs "{}"'.format(references)
)
else:
return reference_spectrum_list
def build_unknown_spectrum_list_from_config_file(config):
log = logging.getLogger(name=__name__)
unknown_spectrum_file_path_list = []
for j, (unknown_spectrum_glob, _) in enumerate(config.items("data")):
log.info("unknown spectrum glob: {}".format(unknown_spectrum_glob))
glob_pattern_expanded = os.path.expanduser(unknown_spectrum_glob)
unknown_spectrum_file_path_list.extend(glob(glob_pattern_expanded))
log.info("found {} data files".format(len(unknown_spectrum_file_path_list)))
unknown_spectrum_list = []
for unknown_spectrum_file_path in unknown_spectrum_file_path_list:
log.info("reading data file {}".format(unknown_spectrum_file_path))
unknown_spectrum = Spectrum.read_file(unknown_spectrum_file_path)
unknown_spectrum_list.append(unknown_spectrum)
if len(unknown_spectrum_list) == 0:
raise ConfigurationFileError(
'no spectrum files were found using globs "{}"'.format(config.items("data"))
)
else:
return unknown_spectrum_list
def get_fit_parameters_from_config_file(config, prm_max_cmp, prm_min_cmp):
# these are the options specified in the [fit] section:
# maximum_component_count
# minimum_component_count
# fit_method: lsq or nnlsq
# component_count_method: combination_fit or prediction_error
# bootstrap_count: a positive integer, 1000 by default
#
log = logging.getLogger(name=__name__)
if not config.has_section("fit"):
raise ConfigurationFileError(
"required section [fit] is missing from configuration file"
)
else:
if (prm_max_cmp is None) and (
not config.has_option("fit", "maximum_component_count")
):
raise ConfigurationFileError(
"required parameter maximum_component_count is missing "
'from section [fit] in configuration file "{}"'.format(config)
)
elif (prm_min_cmp is None) and (
not config.has_option("fit", "minimum_component_count")
):
raise ConfigurationFileError(
"required parameter minimum_component_count is missing "
"from section [fit] in configuration file {}".format(config)
)
else:
max_cmp = config.getint("fit", "maximum_component_count", fallback=2)
if prm_max_cmp is not None:
log.warning(
"MaxCompo={} from PRM will be used instead of"
"maximum_component_count={} from [fit] section.".format(
prm_max_cmp, max_cmp
)
)
max_cmp = prm_max_cmp
min_cmp = config.getint("fit", "minimum_component_count", fallback=1)
if prm_min_cmp is not None:
log.warning(
"MinCompo={} from PRM will be used instead of"
"minimum_component_count={} from [fit] section.".format(
prm_min_cmp, min_cmp
)
)
min_cmp = prm_min_cmp
config_fit_method = config.get("fit", "fit_method", fallback="lsq")
if config_fit_method == "lsq":
fit_method_class = LinearRegression
elif config_fit_method == "nnlsq":
fit_method_class = NonNegativeLinearRegression
else:
raise ConfigurationFileError(
'Unrecognized fit_method "{}" in section [fit]. '
"Use lsq for least-squares or nnlsq for non-negative least squares.".format(
config_fit_method
)
)
config_component_count_method = config.get(
"fit", "component_count_method", fallback="combination_fit"
)
if config_component_count_method == "combination_fit":
fit_task_class = AllCombinationFitTask
elif config_component_count_method == "prediction_error":
fit_task_class = PredictionErrorFitTask
else:
raise ConfigurationFileError(
'unrecognized component_count_method "{}" in section [fit]'.format(
config_component_count_method
)
)
config_fit_bootstrap_count = config.get(
"fit", "bootstrap_count", fallback="1000"
)
if re.match(r"\d+", config_fit_bootstrap_count) is not None:
bootstrap_count = int(config_fit_bootstrap_count)
else:
raise ConfigurationFileError(
'"bootstrap_count={}" in section [fit] must be an integer greater than 0'.format(
config_fit_bootstrap_count
)
)
return max_cmp, min_cmp, fit_method_class, fit_task_class, bootstrap_count
def get_plotting_parameters_from_config_file(config):
if not config.has_section("plots"):
raise ConfigurationFileError(
"required section [plots] is missing from configuration file"
)
else:
best_fits_plot_limit = config.getint(
"plots", "best_fits_plot_limit", fallback=3
)
return best_fits_plot_limit
def build_fit_task(config):
log = logging.getLogger(name=__name__)
# read section [references]
# support a PRM file such as
# prm = path/to/one.prm
# or
# a list of one or more file globs such as
# arsenic_2_reference_spectra/*.e
# arsenic_3_reference_spectra/*.e
prm_max_cmp = None
prm_min_cmp = None
if config.has_section("references"):
if config.has_option("references", "prm"):
prm_file_path = os.path.expanduser(config.get("references", "prm"))
(
reference_spectrum_list,
prm_max_cmp,
prm_min_cmp,
) = build_reference_spectrum_list_from_prm_file(prm_file_path)
else:
reference_spectrum_list = build_reference_spectrum_list_from_config_file(
config
)
elif config.has_section("reference_spectra"):
if config.has_option("reference_spectra", "prm"):
prm_file_path = os.path.expanduser(config.get("reference_spectra", "prm"))
(
reference_spectrum_list,
prm_max_cmp,
prm_min_cmp,
) = build_reference_spectrum_list_from_prm_file(prm_file_path)
else:
raise ConfigurationFileError(
"section [reference_spectra] is missing required parameter prm"
)
else:
raise ConfigurationFileError(
"configuration file is missing required section [references]"
)
energy_range = get_energy_range_from_config(config)
unknown_spectrum_list = build_unknown_spectrum_list_from_config_file(config)
(
max_cmp,
min_cmp,
fit_method_class,
fit_task_class,
bootstrap_count,
) = get_fit_parameters_from_config_file(config, prm_max_cmp, prm_min_cmp)
best_fits_plot_limit = get_plotting_parameters_from_config_file(config)
if 0 < min_cmp <= max_cmp:
component_count_range = range(min_cmp, max_cmp + 1)
log.debug("component count range: {}".format(component_count_range))
else:
raise ConfigurationFileError(
f"minimum and maximum component counts are not valid:\n\tminimum: {min_cmp}\n\tmaximum: {max_cmp}"
)
fit_task = fit_task_class(
ls=fit_method_class,
reference_spectrum_list=reference_spectrum_list,
unknown_spectrum_list=unknown_spectrum_list,
energy_range_builder=energy_range,
component_count_range=component_count_range,
best_fits_plot_limit=best_fits_plot_limit,
bootstrap_count=bootstrap_count,
)
return fit_task
def get_energy_range_from_config(config):
log = logging.getLogger(name=__name__)
if config.has_option("parameters", "fit_energy_start") and config.has_option(
"parameters", "fit_energy_stop"
):
fit_energy_start = config.getfloat("parameters", "fit_energy_start")
fit_energy_stop = config.getfloat("parameters", "fit_energy_stop")
energy_range = FixedEnergyRangeBuilder(fit_energy_start, fit_energy_stop)
log.info(
"fitting with fixed energy range %d to %d",
fit_energy_start,
fit_energy_stop,
)
elif not (config.has_option("parameters", "fit_energy_start")) and not (
config.has_option("parameters", "fit_energy_stop")
):
energy_range = AdaptiveEnergyRangeBuilder()
log.info("fitting with adaptive energy ranges")
else:
raise Exception(
"only one of fit_energy_start and fit_energy_stop was specified in the configuration"
)
return energy_range
```
#### File: mr-fitty/mrfitty/prediction_error_fit.py
```python
from collections import defaultdict
import logging
import matplotlib.pyplot as plt
import numpy as np
import scikits.bootstrap
import sklearn.model_selection
from mrfitty.base import AdaptiveEnergyRangeBuilder
from mrfitty.combination_fit import AllCombinationFitTask
from mrfitty.linear_model import NonNegativeLinearRegression
from mrfitty.plot import (
prediction_error_box_plots,
prediction_error_confidence_interval_plot,
best_fit_for_component_count_box_plots,
)
class PredictionErrorFitTask(AllCombinationFitTask):
def __init__(
self,
reference_spectrum_list,
unknown_spectrum_list,
ls=NonNegativeLinearRegression,
energy_range_builder=AdaptiveEnergyRangeBuilder(),
component_count_range=range(1, 4),
best_fits_plot_limit=3,
bootstrap_count=1000,
):
super().__init__(
ls=ls,
reference_spectrum_list=reference_spectrum_list,
unknown_spectrum_list=unknown_spectrum_list,
energy_range_builder=energy_range_builder,
best_fits_plot_limit=best_fits_plot_limit,
component_count_range=component_count_range,
)
self.bootstrap_count = bootstrap_count
def get_fit_quality_score_text(self, any_given_fit):
return [
"MSPE 95% ci of median: {:.5f} <-- {:.5f} --> {:.5f}".format(
any_given_fit.median_C_p_ci_lo,
any_given_fit.median_C_p,
any_given_fit.median_C_p_ci_hi,
),
"MSE: {:<8.5f}".format(any_given_fit.nss),
]
def choose_best_component_count(self, all_counts_spectrum_fit_table):
"""
Calculate the prediction error statistics for top fits.
Parameters
----------
all_counts_spectrum_fit_table (dict)
dictionary with component count keys, sorted list of spectrum fit list values
Returns
-------
best_fit (SpectrumFit)
the fit having lowest 95% confidence interval of median prediction error AND lowest number
of reference components
"""
component_counts = list(all_counts_spectrum_fit_table)
a_fit = all_counts_spectrum_fit_table[component_counts[0]][0]
log = logging.getLogger(__name__ + ":" + a_fit.unknown_spectrum.file_name)
log.debug(
"choosing best component count from %s", all_counts_spectrum_fit_table
)
component_count_to_median_cp = {
component_count: np.Inf
for component_count in all_counts_spectrum_fit_table.keys()
}
component_count_to_median_cp_ci_lo_hi = {
component_count: (np.Inf, np.Inf)
for component_count in all_counts_spectrum_fit_table.keys()
}
all_counts_spectrum_fit_pe_table = defaultdict(list)
for component_count_i in sorted(all_counts_spectrum_fit_table.keys()):
# calculate C_p for the first ? fits with component_count_i
log.debug(
"calculating CI of median C_p for %d component(s)", component_count_i
)
sorted_fits_for_i_components = sorted(
all_counts_spectrum_fit_table[component_count_i],
key=lambda fit: fit.nss,
)
for fit_j in sorted_fits_for_i_components[:20]:
prediction_errors, _ = self.calculate_prediction_error_list(
fit_j, n_splits=self.bootstrap_count
)
fit_j.mean_C_p = np.mean(prediction_errors)
mean_ci_lo, mean_ci_hi = scikits.bootstrap.ci(
data=prediction_errors, statfunction=np.mean
)
fit_j.mean_C_p_ci_lo = mean_ci_lo
fit_j.mean_C_p_ci_hi = mean_ci_hi
fit_j.median_C_p = np.median(prediction_errors)
median_ci_lo, median_ci_hi = scikits.bootstrap.ci(
data=prediction_errors, statfunction=np.median
)
fit_j.median_C_p_ci_lo = median_ci_lo
fit_j.median_C_p_ci_hi = median_ci_hi
fit_j.prediction_errors = prediction_errors
all_counts_spectrum_fit_pe_table[component_count_i].append(fit_j)
all_counts_spectrum_fit_pe_table[component_count_i] = sorted(
all_counts_spectrum_fit_pe_table[component_count_i],
key=lambda fit: (
fit.median_C_p,
fit.median_C_p_ci_lo,
fit.median_C_p_ci_hi,
),
)
# for each fit find all fits with overlapping ci
# 4 cases:
#
# <-- k --> j.lo > k.hi (keep checking)
# <-- j -->
#
# <-- k --> j.lo <= k.hi <= j.hi
# <-- j -->
#
# <-- k --> j.lo > k.lo and j.hi < k.hi
# <-- j -->
#
# <- k -> j.lo <= k.lo and j.hi > k.hi
# <-- j -->
#
# <-- k --> j.lo <= k.lo <= j.hi
# <-- j -->
#
# <-- k --> j.hi < k.lo (stop checking)
# <-- j -->
for fit_j in all_counts_spectrum_fit_pe_table[component_count_i]:
for fit_k in all_counts_spectrum_fit_pe_table[component_count_i]:
if fit_j == fit_k:
log.debug(
"* component count %d: %8.5f <-- %8.5f --> %8.5f",
component_count_i,
fit_j.median_C_p_ci_lo,
fit_j.median_C_p,
fit_j.median_C_p_ci_hi,
)
elif fit_j.median_C_p_ci_hi < fit_k.median_C_p_ci_lo:
# assuming later fits will not overlap with fit_j
break
elif fit_j.median_C_p_ci_lo > fit_k.median_C_p_ci_hi:
# assuming later fits could overlap with fit_j
pass
elif (
fit_j.median_C_p_ci_lo
<= fit_k.median_C_p_ci_lo
<= fit_j.median_C_p_ci_hi
):
log.debug(
" component count %d: %8.5f <-- %8.5f --> %8.5f",
component_count_i,
fit_k.median_C_p_ci_lo,
fit_k.median_C_p,
fit_k.median_C_p_ci_hi,
)
elif (
fit_j.median_C_p_ci_lo
<= fit_k.median_C_p_ci_hi
<= fit_j.median_C_p_ci_hi
):
log.debug(
" component count %d: %8.5f <-- %8.5f --> %8.5f",
component_count_i,
fit_k.median_C_p_ci_lo,
fit_k.median_C_p,
fit_k.median_C_p_ci_hi,
)
else:
log.debug(
" component count %d: %8.5f <-- %8.5f --> %8.5f",
component_count_i,
fit_k.median_C_p_ci_lo,
fit_k.median_C_p,
fit_k.median_C_p_ci_hi,
)
log.debug("***")
best_fit_for_component_count = all_counts_spectrum_fit_pe_table[
component_count_i
][0]
component_count_to_median_cp[
component_count_i
] = best_fit_for_component_count.median_C_p
component_count_to_median_cp_ci_lo_hi[component_count_i] = (
best_fit_for_component_count.median_C_p_ci_lo,
best_fit_for_component_count.median_C_p_ci_hi,
)
log.debug(
"component count to median cp: {}".format(component_count_to_median_cp)
)
log.debug(
"component count to median cp confidence interval: {}".format(
component_count_to_median_cp_ci_lo_hi
)
)
best_component_count, C_p_lo, C_p_hi = self.get_best_ci_component_count(
component_count_to_median_cp, component_count_to_median_cp_ci_lo_hi
)
best_fit = all_counts_spectrum_fit_table[best_component_count][0]
log.info("best fit: {}".format(best_fit))
return best_fit
@staticmethod
def get_best_ci_component_count(
component_count_to_median_cp,
component_count_to_median_cp_ci_lo_hi,
logger_name_suffix="",
):
"""
Use the 'best subset selection' criterion to choose the 'best' component count using
confidence intervals for median C_p (prediction error). Choose the component count with
the smallest median C_p. If two or more C_p confidence intervals overlap choose the lower
component count.
component count lo hi
1 0.3 0.4
2 0.1 0.2
:param component_count_to_median_cp:
:param component_count_to_median_cp_ci_lo_hi:
:param logger_name_suffix: str
:return: (int) best component count
"""
log = logging.getLogger(name=__name__ + ":" + logger_name_suffix)
best_component_count = max(component_count_to_median_cp_ci_lo_hi.keys())
n_lo, n_hi = component_count_to_median_cp_ci_lo_hi[best_component_count]
for n in sorted(component_count_to_median_cp_ci_lo_hi.keys())[:-1]:
n_lo, n_hi = component_count_to_median_cp_ci_lo_hi[n]
n_plus_1_lo, n_plus_1_hi = component_count_to_median_cp_ci_lo_hi[n + 1]
log.info("comparing C_p ci for component counts %d and %d", n, n + 1)
log.info(
" component count %d: %8.5f <-- %8.5f --> %8.5f",
n,
n_lo,
component_count_to_median_cp[n],
n_hi,
)
log.info(
" component count %d: %8.5f <-- %8.5f --> %8.5f",
n + 1,
n_plus_1_lo,
component_count_to_median_cp[n + 1],
n_plus_1_hi,
)
# must handle two cases:
# n_plus_1_hi >= n_lo -> choose n
# n_plus_1_hi < n_lo -> try n+1
if n_plus_1_hi >= n_lo:
best_component_count = n
break
return best_component_count, n_lo, n_hi
def calculate_prediction_error_list(self, fit, n_splits, test_size=0.2):
"""
Given a fit calculate normalized prediction error on n_splits models with randomly withheld data.
Parameters
----------
fit - instance of SpectrumFit
n_splits - number of times to calculate prediction error, 1000 is recommended
test_size - fraction of data to withhold from training, 0.2 is recommended
Returns
-------
normalized_C_p_list - list of normalized prediction errors, one per model
model_residuals - (fit.reference_spectra_A_df.shape[0] x n_splits) numpy array of residuals
for each model with NaNs at training indices
"""
normalized_C_p_list = []
model_residuals = np.full(
shape=(fit.reference_spectra_A_df.shape[0], n_splits),
fill_value=np.nan,
dtype=np.double,
)
for i, (predicted_b, train_index, test_index) in enumerate(
self.fit_and_predict(fit, n_splits=n_splits, test_size=test_size)
):
model_residuals[test_index, i] = (
fit.unknown_spectrum_b.values[test_index] - predicted_b
)
cp = np.sqrt(np.nansum(np.square(model_residuals[test_index, i])))
normalized_cp = cp / len(test_index)
normalized_C_p_list.append(normalized_cp)
return normalized_C_p_list, model_residuals
def fit_and_predict(self, fit, n_splits=1000, test_size=0.2):
cv = sklearn.model_selection.ShuffleSplit(
n_splits=n_splits, test_size=test_size
)
for i, (train_index, test_index) in enumerate(
cv.split(fit.reference_spectra_A_df.values)
):
lm = self.ls()
lm.fit(
fit.reference_spectra_A_df.values[train_index],
fit.unknown_spectrum_b.values[train_index],
)
predicted_b = lm.predict(fit.reference_spectra_A_df.values[test_index])
yield predicted_b, train_index, test_index
def plot_top_fits(self, spectrum, fit_results):
# log = logging.getLogger(name=self.__class__.__name__ + ":" + spectrum.file_name)
figure_list = []
top_fit_per_component_count = {}
for i, component_count in enumerate(
fit_results.component_count_fit_table.keys()
):
pe_fits = [
fit
for fit in fit_results.component_count_fit_table[component_count]
if hasattr(fit, "median_C_p")
]
sorted_fits = sorted(pe_fits, key=lambda fit: fit.median_C_p)[:10]
top_fit_per_component_count[component_count] = sorted_fits[0]
f, ax = plt.subplots()
prediction_error_box_plots(
ax=ax,
title=f"Best {component_count}-component Fits\n{spectrum.file_name}",
sorted_fits=sorted_fits,
)
f.tight_layout()
figure_list.append(f)
g, ax = plt.subplots()
prediction_error_confidence_interval_plot(
ax=ax,
title=f"Best {component_count}-component Fits\n{spectrum.file_name}",
sorted_fits=sorted_fits,
)
g.tight_layout()
figure_list.append(g)
f, ax = plt.subplots()
best_fit_for_component_count_box_plots(
ax=ax,
title=f"Best Fits\n{spectrum.file_name}",
top_fit_per_component_count=top_fit_per_component_count,
)
f.tight_layout()
figure_list.append(f)
return figure_list
```
#### File: mrfitty/tests/test_fit_task_builder.py
```python
import pytest
from mrfitty.fit_task_builder import (
build_reference_spectrum_list_from_config_file,
build_reference_spectrum_list_from_config_prm_section,
build_unknown_spectrum_list_from_config_file,
ConfigurationFileError,
get_config_parser,
get_fit_parameters_from_config_file,
_get_required_config_value,
)
_spectrum_file_content = """\
11825.550 0.62757215E-02 0.62429776E-02 -0.58947170E-03
11830.550 0.30263933E-02 0.29936416E-02 -0.55479576E-03
11835.550 0.15935143E-03 0.12659210E-03 -0.45673882E-03
11840.550 -0.20089439E-02 -0.20417109E-02 -0.31491527E-03
"""
def test__get_required_config_value():
config = get_config_parser()
test_section = "blah"
test_option = "bleh"
test_value = "blih"
config.add_section(section=test_section)
config.set(section=test_section, option=test_option, value=test_value)
assert test_value == _get_required_config_value(
config=config, section=test_section, option=test_option
)
with pytest.raises(ConfigurationFileError):
_get_required_config_value(config=config, section="missing", option="missing")
def test_build_reference_spectrum_list_from_prm_section(fs):
reference_config = get_config_parser()
reference_config.read_string(
"""\
[prm]
NBCompoMax = 4
NBCompoMin = 1
arsenate_aqueous_avg_als_cal.e
arsenate_sorbed_anth_avg_als_cal.e
"""
)
fs.create_file(
file_path="arsenate_aqueous_avg_als_cal.e", contents=_spectrum_file_content
)
fs.create_file(
file_path="arsenate_sorbed_anth_avg_als_cal.e", contents=_spectrum_file_content
)
(
max_count,
min_count,
reference_list,
) = build_reference_spectrum_list_from_config_prm_section(reference_config)
assert max_count == 4
assert min_count == 1
assert len(reference_list) == 2
def test_build_reference_spectrum_list_from_prm_section__bad_component_counts(fs):
reference_config = get_config_parser()
reference_config.read_string(
"""\
[prm]
NBCompoMax = 1
NBCompoMin = 4
arsenate_aqueous_avg_als_cal.e
arsenate_sorbed_anth_avg_als_cal.e
"""
)
fs.create_file(
file_path="arsenate_aqueous_avg_als_cal.e", contents=_spectrum_file_content
)
fs.create_file(
file_path="arsenate_sorbed_anth_avg_als_cal.e", contents=_spectrum_file_content
)
with pytest.raises(ConfigurationFileError):
build_reference_spectrum_list_from_config_prm_section(reference_config)
def test_build_reference_spectrum_list_from_config_file(fs):
reference_config = get_config_parser()
reference_config.read_string(
"""\
[references]
references/*.e
"""
)
fs.create_dir(directory_path="references")
fs.create_file(
file_path="references/arsenate_aqueous_avg_als_cal.e",
contents=_spectrum_file_content,
)
fs.create_file(
file_path="references/arsenate_sorbed_anth_avg_als_cal.e",
contents=_spectrum_file_content,
)
reference_list = build_reference_spectrum_list_from_config_file(reference_config)
assert len(reference_list) == 2
def test_build_unknown_spectrum_list_from_config_file(fs):
data_config = get_config_parser()
data_config.read_string(
"""\
[data]
data/*.e
"""
)
fs.create_dir(directory_path="data")
fs.create_file(file_path="data/data_0.e", contents=_spectrum_file_content)
fs.create_file(file_path="data/data_1.e", contents=_spectrum_file_content)
data_list = build_unknown_spectrum_list_from_config_file(data_config)
assert len(data_list) == 2
def test_get_fit_parameters_from_config_file():
fit_config = get_config_parser()
fit_config.read_string(
"""\
[fit]
max_component_count = 3
min_component_count = 1
"""
)
(
max_cmp,
min_cmp,
fit_method_class,
fit_task_class,
bootstrap_count,
) = get_fit_parameters_from_config_file(fit_config, prm_max_cmp=3, prm_min_cmp=1)
assert max_cmp == 3
assert min_cmp == 1
assert bootstrap_count == 1000
def test_get_good_fit_parameter_bootstrap_count_from_config_file():
fit_config = get_config_parser()
fit_config.read_string(
"""\
[fit]
max_component_count = 3
min_component_count = 1
bootstrap_count = 2000
"""
)
(
max_cmp,
min_cmp,
fit_method_class,
fit_task_class,
bootstrap_count,
) = get_fit_parameters_from_config_file(fit_config, prm_max_cmp=3, prm_min_cmp=1)
assert max_cmp == 3
assert min_cmp == 1
assert bootstrap_count == 2000
def test_get_bad_fit_parameter_bootstrap_count_from_config_file():
fit_config = get_config_parser()
fit_config.read_string(
"""\
[fit]
max_component_count = 3
min_component_count = 1
bootstrap_count = haha
"""
)
with pytest.raises(ConfigurationFileError):
(
max_cmp,
min_cmp,
fit_method_class,
fit_task_class,
bootstrap_count,
) = get_fit_parameters_from_config_file(
fit_config, prm_max_cmp=3, prm_min_cmp=1
)
```
#### File: mrfitty/tests/test_mrfitty.py
```python
import logging
import os.path
import pytest
from mrfitty.__main__ import cli
from mrfitty.fit_task_builder import get_config_parser
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(name=__name__)
@pytest.mark.skip()
def test_main(fs):
# write the necessary input files:
# configuration file
# reference files
# sample files
# prm file
# then check for the expected output files
# plot pdf
# table txt
# sample fit files
fs.CreateFile(
"ref_1.e",
"""\
1000.0\t0.01
1000.1\t0.02
1000.2\t0.03
1000.3\t0.04
1000.4\t0.05
""",
)
fs.CreateFile(
"ref_2.e",
"""\
1000.0\t0.05
1000.1\t0.04
1000.2\t0.03
1000.3\t0.02
1000.4\t0.01
""",
)
fs.CreateFile(
"ref_3.e",
"""\
1000.0\t0.01
1000.1\t0.01
1000.2\t0.01
1000.3\t0.01
1000.4\t0.01
""",
)
fs.CreateFile(
"test_main.prm",
"""\
NbCompoMax=3
NbCompoMin=1
ref=ref_1.e
ref=ref_2.e
ref=ref_3.e
""",
)
# sample 1 is twice ref_1
fs.CreateFile(
"sample_1.e",
"""\
1000.1\t0.02
1000.2\t0.04
1000.3\t0.06
""",
)
# sample 2 is half ref_2
fs.CreateFile(
"sample_2.e",
"""\
1000.1\t0.015
1000.2\t0.010
1000.3\t0.005
""",
)
# sample 3 is ref_1 plus ref_3
fs.CreateFile(
"sample_3.e",
"""\
1000.1\t0.03
1000.2\t0.04
1000.3\t0.05
""",
)
get_config_parser()
fs.CreateFile(
"test_main.cfg",
"""\
[reference_spectra]
prm = test_main.prm
[data]
sample*.e
[output]
best_fit_files_dir = .
plots_pdf_fp = test_main_plots.pdf
table_fp = test_main_table.txt
reference_plots_pdf = test_main_reference_plots.pdf
""",
)
result = cli(["test_main.cfg"])
assert result.exit_code == 0
assert os.path.exists("test_main_plots.pdf")
assert os.path.exists("test_main_table.txt")
assert os.path.exists("sample_1_fit.txt")
``` |
{
"source": "jklynch/ophyd-addon",
"score": 2
} |
#### File: ophyd-addon/ophyd_addon/ioc_util.py
```python
import contextvars
import functools
internal_process = contextvars.ContextVar("internal_process", default=False)
def no_reentry(func):
@functools.wraps(func)
async def inner(*args, **kwargs):
if internal_process.get():
return
try:
internal_process.set(True)
return await func(*args, **kwargs)
finally:
internal_process.set(False)
return inner
```
#### File: ophyd_addon/tests/test_tiffsimdetector.py
```python
import pytest
from bluesky.plans import count
from ophyd import _caproto_shim
from ophyd_addon.areadetector.document_builders import NewPerkinElmerDetector
@pytest.mark.skip
def test_tiffsimdetector(RE, databroker):
tiff_sim_detector = NewPerkinElmerDetector(name="sim")
with databroker() as db:
RE.subscribe(db.insert)
RE(count([tiff_sim_detector]))
``` |
{
"source": "jklynch/pizza-box",
"score": 2
} |
#### File: pizza-box/pizza_box/handlers.py
```python
import os
import numpy as np
import pandas as pd
from databroker.assets.handlers_base import HandlerBase
class APBBinFileHandler(HandlerBase):
"Read electrometer *.bin files"
def __init__(self, fpath):
# It's a text config file, which we don't store in the resources yet, parsing for now
fpath_txt = f"{os.path.splitext(fpath)[0]}.txt"
with open(fpath_txt, "r") as fp:
content = fp.readlines()
content = [x.strip() for x in content]
_ = int(content[0].split(":")[1])
# Gains = [int(x) for x in content[1].split(":")[1].split(",")]
# Offsets = [int(x) for x in content[2].split(":")[1].split(",")]
# FAdiv = float(content[3].split(":")[1])
# FArate = float(content[4].split(":")[1])
# trigger_timestamp = float(content[5].split(":")[1].strip().replace(",", "."))
raw_data = np.fromfile(fpath, dtype=np.int32)
columns = ["timestamp", "i0", "it", "ir", "iff", "aux1", "aux2", "aux3", "aux4"]
num_columns = len(columns) + 1 # TODO: figure out why 1
raw_data = raw_data.reshape((raw_data.size // num_columns, num_columns))
derived_data = np.zeros((raw_data.shape[0], raw_data.shape[1] - 1))
derived_data[:, 0] = (
raw_data[:, -2] + raw_data[:, -1] * 8.0051232 * 1e-9
) # Unix timestamp with nanoseconds
for i in range(num_columns - 2):
derived_data[:, i + 1] = raw_data[:, i] # ((raw_data[:, i] ) - Offsets[i]) / Gains[i]
self.df = pd.DataFrame(data=derived_data, columns=columns)
self.raw_data = raw_data
def __call__(self):
return self.df
``` |
{
"source": "jklynch/profile_collection_srx",
"score": 2
} |
#### File: profile_collection_srx/startup/61-xrf.py
```python
print(f'Loading {__file__}...')
import epics
import os
import collections
import numpy as np
import time as ttime
import matplotlib.pyplot as plt
import bluesky.plans as bp
from bluesky.plans import outer_product_scan, scan
from bluesky.callbacks import LiveGrid
from bluesky.callbacks.fitting import PeakStats
from bluesky.preprocessors import subs_wrapper
import bluesky.plan_stubs as bps
from bluesky.plan_stubs import mv, abs_set
from bluesky.simulators import plot_raster_path
def hf2dxrf(*, xstart, xnumstep, xstepsize,
ystart, ynumstep, ystepsize, acqtime,
shutter=True, align=False, xmotor=hf_stage.x, ymotor=hf_stage.y,
numrois=1, extra_dets=[],
setenergy=None, u_detune=None, echange_waittime=10, samplename=None, snake=True):
'''input:
xstart, xnumstep, xstepsize : float
ystart, ynumstep, ystepsize : float
acqtime : float
acqusition time to be set for both xspress3 and F460
numrois : integer
number of ROIs set to display in the live raster scans.
This is for display ONLY. The actualy number of ROIs
saved depend on how many are enabled and set in the
read_attr However noramlly one cares only the raw XRF
spectra which are all saved and will be used for fitting.
energy (float): set energy, use with caution, hdcm might
become misaligned
u_detune (float): amount of undulator to
detune in the unit of keV
'''
# Record relevant metadata in the Start document, defined in 90-usersetup.py
scan_md = {}
get_stock_md(scan_md)
scan_md['sample'] = {'name': samplename}
scan_md['scan_input'] = str([xstart, xnumstep, xstepsize, ystart, ynumstep, ystepsize, acqtime])
scan_md['scaninfo'] = {'type': 'XRF',
'raster' : True}
# Setup detectors
dets = [sclr1, xs]
dets = dets + extra_dets
dets_by_name = {d.name : d
for d in dets}
# Scaler
if (acqtime < 0.001):
acqtime = 0.001
sclr1.preset_time.put(acqtime)
# XS3
xs.external_trig.put(False)
xs.cam.acquire_time.put(acqtime)
xs.total_points.put((xnumstep + 1) * (ynumstep + 1))
if ('merlin' in dets_by_name):
dpc = dets_by_name['merlin']
# Setup Merlin
dpc.cam.trigger_mode.put(0)
dpc.cam.acquire_time.put(acqtime)
dpc.cam.acquire_period.put(acqtime + 0.005)
dpc.cam.num_images.put(1)
dpc.hdf5.stage_sigs['num_capture'] = (xnumstep + 1) * (ynumstep + 1)
dpc._mode = SRXMode.step
dpc.total_points.put((xnumstep + 1) * (ynumstep + 1))
if ('xs2' in dets_by_name):
xs2 = dets_by_name['xs2']
xs2.external_trig.put(False)
xs2.settings.acquire_time.put(acqtime)
xs2.total_points.put((xnumstep + 1) * (ynumstep + 1))
# Setup the live callbacks
livecallbacks = []
# Setup scanbroker to update time remaining
def time_per_point(name, doc, st=ttime.time()):
if ('seq_num' in doc.keys()):
scanrecord.scan0.tpp.put((doc['time'] - st) / doc['seq_num'])
scanrecord.scan0.curpt.put(int(doc['seq_num']))
scanrecord.time_remaining.put((doc['time'] - st) / doc['seq_num'] *
((xnumstep + 1) * (ynumstep + 1) - doc['seq_num']) / 3600)
livecallbacks.append(time_per_point)
# Setup LiveTable
livetableitem = [xmotor.name, ymotor.name, i0.name]
xstop = xstart + xnumstep * xstepsize
ystop = ystart + ynumstep * ystepsize
for roi_idx in range(numrois):
roi_name = 'roi{:02}'.format(roi_idx+1)
# roi_key = getattr(xs.channel1.rois, roi_name).value.name
roi_key = xs.channels.channel01.get_mcaroi(mcaroi_number=roi_idx).total_rbv.name
livetableitem.append(roi_key)
roimap = LiveGrid((ynumstep+1, xnumstep+1), roi_key,
clim=None, cmap='viridis',
xlabel='x (mm)', ylabel='y (mm)',
extent=[xstart, xstop, ystart, ystop],
x_positive='right', y_positive='down')
livecallbacks.append(roimap)
if ('xs2' in dets_by_name):
for roi_idx in range(numrois):
roi_key = getattr(xs2.channel1.rois, roi_name).value.name
livetableitem.append(roi_key)
fig = plt.figure('xs2_ROI{:02}'.format(roi_idx+1))
fig.clf()
roimap = LiveGrid((ynumstep+1, xnumstep+1), roi_key,
clim=None, cmap='viridis',
xlabel='x (mm)', ylabel='y (mm)',
extent=[xstart, xstop, ystart, ystop],
x_positive='right', y_positive='down',
ax=fig.gca())
livecallbacks.append(roimap)
if ('merlin' in dets_by_name) and (hasattr(dpc, 'stats1')):
fig = plt.figure('DPC')
fig.clf()
dpc_tmap = LiveGrid((ynumstep+1, xnumstep+1),
dpc.stats1.total.name, clim=None, cmap='viridis',
xlabel='x (mm)', ylabel='y (mm)',
x_positive='right', y_positive='down',
extent=[xstart, xstop, ystart, ystop],
ax=fig.gca())
livecallbacks.append(dpc_tmap)
# Change energy (if provided)
if (setenergy is not None):
if (u_detune is not None):
energy.detune.put(u_detune)
print('Changing energy to ', setenergy)
yield from mv(energy, setenergy)
print('Waiting time (s) ', echange_waittime)
yield from bps.sleep(echange_waittime)
def at_scan(name, doc):
scanrecord.current_scan.put(doc['uid'][:6])
scanrecord.current_scan_id.put(str(doc['scan_id']))
scanrecord.current_type.put(scan_md['scaninfo']['type'])
scanrecord.scanning.put(True)
def finalize_scan(name, doc):
scanrecord.scanning.put(False)
# Setup the scan
hf2dxrf_scanplan = outer_product_scan(dets,
ymotor, ystart, ystop, ynumstep+1,
xmotor, xstart, xstop, xnumstep+1, snake,
md=scan_md)
hf2dxrf_scanplan = subs_wrapper(hf2dxrf_scanplan,
{'all': livecallbacks,
'start': at_scan,
'stop': finalize_scan})
# Move to starting position
yield from mv(xmotor, xstart,
ymotor, ystart)
# Peak up monochromator at this energy
if (align):
yield from peakup_fine(shutter=shutter)
# Open shutter
if (shutter):
yield from mv(shut_b,'Open')
# Run the scan
scaninfo = yield from hf2dxrf_scanplan
#TO-DO: implement fast shutter control (close)
if (shutter):
yield from mv(shut_b, 'Close')
# Write to scan log
if ('merlin' in dets_by_name):
logscan_event0info('2dxrf_withdpc')
# Should this be here?
merlin.hdf5.stage_sigs['num_capture'] = 0
else:
logscan_detailed('2dxrf')
return scaninfo
# I'm not sure how to use this function
def multi_region_h(regions, energy_list=None, **kwargs):
ret = []
for r in regions:
inp = {}
inp.update(kwargs)
inp.update(r)
rs_uid = yield from hf2dxrf(**inp)
ret.extend(rs_uid)
return ret
# Not sure how often this will be used....but at least it's updated
def hf2dxrf_repeat(num_scans=None, waittime=10,
xstart=None, xnumstep=None, xstepsize=None,
ystart=None, ynumstep=None, ystepsize=None,
acqtime=None, numrois=0, i0map_show=False, itmap_show = False
):
'''
This function will repeat the 2D XRF scans on the same spots for specified number of the scans.
input:
num_scans (integer): number of scans to be repeated on the same position.
waittime (float): wait time in sec. between each scans. Recommand to have few seconds for the HDF5 to finish closing.
Other inputs are described as in hf2dxrf.
'''
if num_scans is None:
raise Exception('Please specify "num_scans" as the number of scans to be run. E.g. num_scans = 3.')
for i in range(num_scans):
yield from hf2dxrf(xstart=xstart, xnumstep=xnumstep, xstepsize=xstepsize,
ystart=ystart, ynumstep=ynumstep, ystepsize=ystepsize,
acqtime=acqtime, numrois=numrois)
if (i != num_scans-1):
print(f'Waiting {waittime} seconds between scans...')
yield from bps.sleep(waittime)
def hf2dxrf_ioc(samplename=None, align=False, numrois=1, shutter=True, waittime=10):
'''
invokes hf2dxrf repeatedly with parameters provided separately.
waittime [sec] time to wait between scans
shutter [bool] scan controls shutter
'''
scanlist = [ scanrecord.scan15, scanrecord.scan14, scanrecord.scan13,
scanrecord.scan12, scanrecord.scan11, scanrecord.scan10,
scanrecord.scan9, scanrecord.scan8, scanrecord.scan7,
scanrecord.scan6, scanrecord.scan5, scanrecord.scan4,
scanrecord.scan3, scanrecord.scan2, scanrecord.scan1,
scanrecord.scan0 ]
Nscan = 0
for scannum in range(len(scanlist)):
thisscan = scanlist.pop()
Nscan = Nscan + 1
if thisscan.ena.get() == 1:
scanrecord.current_scan.put('Scan {}'.format(Nscan))
xstart = thisscan.p1s.get()
xnumstep = int(thisscan.p1stp.get())
xstepsize = thisscan.p1i.get()
ystart = thisscan.p2s.get()
ynumstep = int(thisscan.p2stp.get())
ystepsize = thisscan.p2i.get()
acqtime = thisscan.acq.get()
hf2dxrf_gen = yield from hf2dxrf(xstart=xstart, xnumstep=xnumstep, xstepsize=xstepsize,
ystart=ystart, ynumstep=ynumstep, ystepsize=ystepsize,
acqtime=acqtime, samplename=None, align=False, numrois=1,
shutter=True)
if (len(scanlist) is not 0):
yield from bps.sleep(waittime)
scanrecord.current_scan.put('')
# def fermat_plan(x_range, y_range, dr, factor, exp_time=0.2):
def fermat_plan(*args, **kwargs):
x_range = args[0]
y_range = args[1]
dr = args[2]
factor = args[3]
# print(f'{x_range}\t{y_range}')
# print(args)
kwargs.setdefault('exp_time', 0.2)
# Setup motors
x_motor = nano_stage.sx
y_motor = nano_stage.sy
# Setup detectors
dets = [sclr1, xs, xbpm2, merlin, bpm4, temp_nanoKB]
# print("ready to call fermat_master...")
yield from fermat_master_plan(dets, x_motor, y_motor, *args, **kwargs)
def fermat_master_plan(*args, exp_time=None, **kwargs):
# Synchronize exposure times
sclr1.preset_time.put(exp_time)
xs.external_trig.put(False)
xs.cam.acquire_time.put(exp_time)
merlin.cam.acquire_time.put(exp_time)
merlin.cam.acquire_period.put(exp_time + 0.005)
scan_md = {}
get_stock_md(scan_md)
scan_md['scan']['merlin'] = {'merlin_exp_time' : exp_time,
'merlin_exp_period' : exp_time + 0.005}
plan = bp.rel_spiral_fermat(*args, **kwargs)
d = plot_raster_path(plan, args[1].name, args[2].name, probe_size=.001, lw=0.5)
num_points = d['path'].get_path().vertices.shape[0]
print(f"Number of points: {num_points}")
xs.total_points.put(num_points)
yield from bps.mv(merlin.total_points, num_points,
merlin.hdf5.num_capture, num_points)
merlin.hdf5.stage_sigs['num_capture'] = num_points
yield from rel_spiral_fermat(*args, **kwargs, md=scan_md)
# Check the fermat spiral points
# This does not run within the run engine
# plot_raster_path(rel_spiral_fermat([], nano_stage.sx, nano_stage.sy, 2, 2,
# 0.5, 1), nano_stage.sx.name, nano_stage.sy.name)
def check_fermat_plan(xrange, yrange, dr, factor):
xmotor = nano_stage.sx
ymotor = nano_stage.sy
plot_raster_path(rel_spiral_fermat([], xmotor, ymotor, xrange, yrange, dr, factor), xmotor.name, ymotor.name)
ax = plt.gca()
line = ax.lines[0]
print(f'The scan will have {len(line.get_xdata())} points.')
def export_flying_merlin2tiff(scanid=-1, wd=None):
if wd is None:
wd = '/home/xf05id1/current_user_data/'
print('Loading data...')
h = db[int(scanid)]
d = h.data('merlin_image', stream_name='stream0', fill=True)
d = np.array(list(d))
d = np.squeeze(d)
d = np.array(d, dtype='float32')
x = np.array(list(h.data('enc1', stream_name='stream0', fill=True)))
y = np.array(list(h.data('enc2', stream_name='stream0', fill=True)))
I0= np.array(list(h.data('i0', stream_name='stream0', fill=True)))
# Flatten arrays
(N, M) = x.shape
x_flat = np.reshape(x, (N*M, ))
y_flat = np.reshape(y, (N*M, ))
I0_flat = np.reshape(I0, (N*M, ))
# Get scanid
if (scanid < 0):
scanid = h.start['scan_id']
print('Writing data...')
fn = 'scan%d.tif' % scanid
fn_txt = 'scan%d.txt' % scanid
io.imsave(wd + fn, d)
np.savetxt(wd + fn_txt, np.array((x_flat, y_flat, I0_flat)))
def export_merlin2tiff(scanid=-1, wd=None):
if wd is None:
wd = '/home/xf05id1/current_user_data/'
print('Loading data...')
h = db[int(scanid)]
d = h.data('merlin_image', fill=True)
d = np.array(list(d))
d = np.squeeze(d)
d = np.array(d, dtype='float32')
x = np.array(list(h.data('nano_stage_sx', fill=True)))
y = np.array(list(h.data('nano_stage_sy', fill=True)))
I0= np.array(list(h.data('sclr_i0', fill=True)))
# Get scanid
if (scanid < 0):
scanid = h.start['scan_id']
print('Writing data...')
fn = 'scan%d.tif' % scanid
fn_txt = 'scan%d.txt' % scanid
io.imsave(wd + fn, d)
np.savetxt(wd + fn_txt, np.array((x, y, I0)))
def nano_xrf(xstart, xstop, xstep,
ystart, ystop, ystep, dwell,
shutter=True, extra_dets=None,
xmotor=nano_stage.sx, ymotor=nano_stage.sy, flag_snake=True):
# calculate number of points
xnum = np.int(np.abs(np.round((xstop - xstart)/xstep)) + 1)
ynum = np.int(np.abs(np.round((ystop - ystart)/ystep)) + 1)
# Setup detectors
if extra_dets is None:
extra_dets = []
dets = [sclr1, xs, xbpm2, xmotor, ymotor] + extra_dets
# Record relevant metadata in the Start document, defined in 90-usersetup.py
scan_md = {}
get_stock_md(scan_md)
# scan_md['scan_input'] = str([xstart, xstop, xstep, ystart, ystop, ystep, dwell])
# scan_md['scaninfo'] = {'type': 'XRF',
# 'raster' : True}
scan_md['scan']['type'] = 'XRF_STEP'
scan_md['scan']['scan_input'] = [xstart, xstop, xstep, ystart, ystop, ystep, dwell]
scan_md['scan']['detectors'] = [d.name for d in dets]
scan_md['scan']['fast_axis'] = {'motor_name' : xmotor.name,
'units' : xmotor.motor_egu.get()}
scan_md['scan']['slow_axis'] = {'motor_name' : ymotor.name,
'units' : ymotor.motor_egu.get()}
scan_md['scan']['theta'] = {'val' : nano_stage.th.user_readback.get(),
'units' : nano_stage.th.motor_egu.get()}
scan_md['scan']['delta'] = {'val' : 0,
'units' : xmotor.motor_egu.get()}
scan_md['scan']['snake'] = 1 if flag_snake else 0
scan_md['scan']['shape'] = (xnum, ynum)
# Set counting time
sclr1.preset_time.put(dwell)
xs.external_trig.put(False)
xs.cam.acquire_time.put(dwell)
xs.total_points.put(xnum * ynum)
if (merlin in dets):
merlin.cam.acquire_time.put(dwell)
merlin.cam.acquire_period.put(dwell + 0.005)
merlin.hdf5.stage_sigs['num_capture'] = xnum * ynum
scan_md['scan']['merlin'] = {'merlin_exp_time' : dwell,
'merlin_exp_period' : dwell + 0.005}
# LiveTable
livecallbacks = []
# roi_key = getattr(xs.channel1.rois, roi_name).value.name
roi_key = xs.channels.channel01.get_mcaroi(mcaroi_number=1).total_rbv.name
livecallbacks.append(LiveTable([xmotor.name, ymotor.name, roi_key]))
# livetableitem.append(roi_key)
# roi_name = 'roi{:02}'.format(1)
livecallbacks.append(LiveGrid((ynum, xnum), roi_key,
clim=None, cmap='viridis',
xlabel='x [um]', ylabel='y [um]',
extent=[xstart, xstop, ystart, ystop],
x_positive='right', y_positive='down'))
myplan = grid_scan(dets,
ymotor, ystart, ystop, ynum,
xmotor, xstart, xstop, xnum, flag_snake,
md=scan_md)
myplan = subs_wrapper(myplan,
{'all': livecallbacks})
# Open shutter
# if (shutter):
# yield from mv(shut_b,'Open')
yield from check_shutters(shutter, 'Open')
# grid scan
uid = yield from myplan
# Close shutter
# if (shutter):
# yield from mv(shut_b,'Close')
yield from check_shutters(shutter, 'Close')
return uid
``` |
{
"source": "jklynch/suitcase-nxsas",
"score": 3
} |
#### File: suitcase/nxsas/utils.py
```python
from collections import Mapping, Sequence
import copy
import json
import logging
import re
import h5py
import numpy as np
def _copy_nexus_md_to_nexus_h5(nexus_md, h5_group_or_dataset):
"""
Read a metadata dictionary with nexus-ish keys and create a corresponding nexus structure in an H5 file.
Allowed structures:
a group with _attributes:
"entry" : {
"NXAttributes": {"NX_Class": "NXEntry", "default": "data"}
}
will look like
.../
<group entry>
<attr "NX_Class": "NXEntry">
<attr "default: "data">
a group with a dataset:
"entry": {
"_attributes": {"NX_Class": "NXEntry", "default": "data"},
"program_name": "EPICS areaDetector",
}
will look like
.../
<group "entry">
<attr "NX_Class": "NXEntry">
<attr "default: "data">
<dataset "program_name": "EPICS areaDetector">
a dataset with attributes:
"entry": {
"_attributes": {"NX_Class": "NXEntry", "default": "data"},
"program_name": {
"_attributes": {
"NDAttrDescription": "program name",
"NDAttrName": "ProgramName",
"NDAttrSource": "91dcLAX:GUPNumber",
"NDAttrSourceType": "NDAttrSourceEPICSPV"
},
"_dataset": "EPICS areaDetector",
}
}
will look like
.../
<group "entry">
<attr "NX_Class": "NXEntry">
<attr "default: "data">
<dataset "program_name": "EPICS areaDetector">
<attr "NDAttrDescription": "program name">
<attr "NDAttrName: "ProgramName">
<attr "NDAttrSource": "91dcLAX:GUPNumber">
<attr "NDAttrSourceType": "NDAttrSourceEPICSPV">
a group with a link to part of the bluesky structure
"entry": {
"_attributes": {"NX_Class": "NXEntry", "default": "data"},
"GUPNumber": "#bluesky/start/gup_number"
}
will look like
.../
<group "entry">
<attr "NX_Class": "NXEntry">
<attr "default: "data">
<link "GUPNumber" to <dataset /bluesky/start/gup_number>>
a group with a link with attributes to part of the bluesky structure
note: the "NDAttr..."s are not NeXus
"entry": {
"_attributes": {"NX_Class": "NXEntry", "default": "data"},
"GUPNumber": {
"_attributes": {
"NDAttrDescription": "GUP proposal number",
"NDAttrName": "GUPNumber",
"NDAttrSource": "91dcLAX:GUPNumber",
"NDAttrSourceType": "NDAttrSourceEPICSPV"
},
"_link": "#bluesky/start/gup_number"
}
will look like
.../
<group "entry">
<attr "NX_Class": "NXEntry">
<attr "default: "data">
<link "GUPNumber" to <dataset /bluesky/start/gup_number>>
<attr "NDAttrDescription": "GUP proposal number">
<attr "NDAttrName": "GUPNumber">
<attr "NDAttrSource": "91dcLAX:GUPNumber">
<attr "NDAttrSourceType": "NDAttrSourceEPICSPV">
a group with subgroups:
"entry": {
"_attributes": {"NX_Class": "NXEntry", "default": "data"}
"instrument": {
"_attributes": {"NX_Class": "NXInstrument",},
"name_1": "#bluesky/start/beamline_id",
"name_2": {
"_attributes": {"NX_This": "NXThat"},
"_link": "#bluesky/start/beamline_id",
},
}
For example:
"entry": {
"_attributes": {"NX_Class": "NXEntry", "default": "data"},
"GUPNumber": {
"_attributes": {
"NDAttrDescription": "GUP proposal number",
"NDAttrName": "GUPNumber",
"NDAttrSource": "91dcLAX:GUPNumber",
"NDAttrSourceType": "NDAttrSourceEPICSPV"
},
"_link": "#bluesky/start/gup_number"
},
"title": {
"_attributes": {
"NDAttrDescription": "sample name",
"NDAttrName": "SampleTitle",
"NDAttrSource": "91dcLAX:sampleTitle",
"NDAttrSourceType": "NDAttrSourceEPICSPV"
},
"_link": "#bluesky/start/gup_number"
},
"program_name": "EPICS areaDetector",
"instrument": {
"_attributes": {"NX_Class": "NXInstrument",},
"name_1": "#bluesky/start/beamline_id",
"name_2": {
"_attributes": {"NX_This": "NXThat"},
"_link": "#bluesky/start/beamline_id",
},
"aperture": {
"_attributes": {"NX_Class": "NXAperture",},
"vcenter": 1.0,
"vsize": 2.0,
"description": "USAXSslit",
},
},
},
},
Parameters
----------
nexus_md: dict-like
"""
for nexus_key, nexus_value in nexus_md.items():
if nexus_key in ("_data", "_link"):
# this key/value has already been processed
continue
elif nexus_key == "_attributes":
for attr_name, attr_value in nexus_value.items():
h5_group_or_dataset.attrs[attr_name] = attr_value
elif isinstance(nexus_value, Mapping):
# we arrive here in a case such as:
# "program_name": {
# "_attributes": {"attr_1": "abc", "attr_2": "def"},
# "_link": "#bluesky/start/program_name"
# }
# where nexus_key is "program_name" and
# nexus_value is the associated dictionary
if "_link" in nexus_value:
h5_group_or_dataset[nexus_key] = _get_h5_group_or_dataset(
bluesky_document_path=_parse_bluesky_document_path(
nexus_value["_link"]
),
h5_file=h5_group_or_dataset.file,
)
_copy_nexus_md_to_nexus_h5(
nexus_md=nexus_value, h5_group_or_dataset=h5_group_or_dataset[nexus_key]
)
elif "_data" in nexus_value:
# we arrive here in a case such as:
# "program_name": {
# "_attributes": {"attr_1": "abc", "attr_2": "def"},
# "_data": "the name of the program"
# }
# where nexus_key is "program_name" and
# nexus_value is the associated dictionary
h5_group_or_dataset.create_dataset(name=nexus_key, data=nexus_value["_data"])
_copy_nexus_md_to_nexus_h5(
nexus_md=nexus_value, h5_group_or_dataset=h5_group_or_dataset[nexus_key]
)
else:
# otherwise create a group
_copy_nexus_md_to_nexus_h5(
nexus_md=nexus_value, h5_group_or_dataset=h5_group_or_dataset.create_group(nexus_key),
)
elif isinstance(nexus_value, str) and nexus_value.startswith("#bluesky"):
# create a link
bluesky_document_path = _parse_bluesky_document_path(nexus_value)
h5_group_or_dataset[nexus_key] = _get_h5_group_or_dataset(
bluesky_document_path, h5_group_or_dataset.file
)
else:
h5_group_or_dataset.create_dataset(name=nexus_key, data=nexus_value)
_bluesky_doc_query_re = re.compile(
r"^#bluesky/"
r"(?P<doc>(start|stop|desc/(?P<stream>\w+)))"
r"(?P<all_keys>(/\w+)*)"
r"(@(?P<attribute>\w+))?"
)
def _parse_bluesky_document_path(bluesky_document_path):
"""
regex101.com
#bluesky/start/blah/bleh@blih :
doc: start
keys: ("blah", "bleh")
attribute: blih
#bluesky/desc/primary/blah/bleh
doc: desc
stream: primary
keys: /blah/bleh
"""
m = _bluesky_doc_query_re.match(bluesky_document_path)
if m is None:
raise Exception(f"failed to parse '{bluesky_document_path}'")
else:
path_info = copy.copy(m.groupdict())
if path_info["doc"].startswith("desc"):
# path_info["doc"] is "desc/stream_name"
# but I want just "desc" so split off "/stream_name
path_info["doc"] = path_info["doc"].split("/")[0]
# path_info["all_keys"] is something like <KEY>"
# but I want a tuple like ("abc", "def") so split on "/"
# the first element of the split list is an empty string
# leave it out with [1:]
path_info["keys"] = tuple(path_info["all_keys"].split("/"))[1:]
return path_info
def _get_h5_group_or_dataset(bluesky_document_path, h5_file):
# look up the h5 group corresponding to the bluesky document path
doc = bluesky_document_path["doc"]
h5_target_group = h5_file["bluesky"][doc]
for key in bluesky_document_path["keys"]:
h5_target_group = h5_target_group[key]
return h5_target_group
def _copy_metadata_to_h5_attrs(a_mapping, h5_group):
"""
Recursively reproduce a python "mapping" (typically a dict)
as h5 nested groups and attributes.
"""
for key, value in a_mapping.items():
if isinstance(value, Mapping):
# found a dict-like value
# create a new h5 group for it
# and recursively copy its keys and values to h5 groups and attributes
_copy_metadata_to_h5_attrs(
a_mapping=value, h5_group=h5_group.create_group(key)
)
else:
# a special case
if value is None:
value = "None"
try:
# this is where an h5 attribute is assigned
h5_group.attrs[key] = value
except TypeError:
# `value` is too complicated to be a h5 attribute
# an example of a key-value pair that will cause TypeError is
# {'dimensions': [[['time'], 'primary']]}
# instead we will store it as JSON
h5_group.attrs[key] = json.dumps(value)
def _copy_metadata_to_h5_datasets(a_mapping, h5_group):
"""
Recursively reproduce a python "mapping" (typically a dict)
as h5 nested groups and datasets.
"""
log = logging.Logger("suitcase.nxsas", level="DEBUG")
for key, value in a_mapping.items():
if isinstance(value, Mapping):
# found a dict-like value
# create a new h5 group for it
# and recursively copy its keys and values to h5 groups and datasets
group = h5_group.create_group(key)
log.debug("created h5 group %s", group)
_copy_metadata_to_h5_datasets(a_mapping=value, h5_group=group)
else:
# a special case
if value is None:
value = "None"
elif value == b"\x00":
# for example:
# "en_monoen_grating_clr_enc_lss": {
# "source": "PV:XF:07ID1-OP{Mono:PGM1-Ax:GrtP}Mtr_ENC_LSS_CLR_CMD.PROC",
# "dtype": "integer",
# "shape": [],
# "units": "",
# "lower_ctrl_limit": b"\x00",
# "upper_ctrl_limit": b"\x00",
# "object_name": "en",
#
# },
# will cause a ValueError: VLEN strings do not support embedded NULLs
value = ""
# this is where an h5 dataset is assigned
# string datasets are special because they must have dtype=h5py.string_dtype()
try:
# use Sequence to handle list and tuple
if isinstance(value, str) or (
isinstance(value, Sequence)
and all([isinstance(x, str) for x in value])
):
d = h5_group.create_dataset(
name=key, data=np.array(value, dtype=h5py.string_dtype())
)
else:
d = h5_group.create_dataset(name=key, data=value)
except TypeError as err:
log.warning(
"failed to create dataset for key '%s' and value '%s'", key, value
)
log.warning("exception: %s", err)
log.warning("storing JSON-encoded value instead")
d = h5_group.create_dataset(
name=key,
data=np.array(json.dumps(value), dtype=h5py.string_dtype()),
)
except BaseException as ex:
log.warning(
"failed to create dataset on group '%s' for key '%s'", h5_group, key
)
log.exception(ex)
raise ex
log.debug("created dataset %s", d)
``` |
{
"source": "jklynch/suitcase-sas",
"score": 2
} |
#### File: nxsas/tests/test_sst_nexus_metadata.py
```python
import logging
import h5py
import event_model
from suitcase import nxsas
from suitcase.nxsas.tests.rsoxs_run_documents import (
rsoxs_start_doc,
rsoxs_descriptor_en_doc,
rsoxs_event_page_en_doc,
)
techniques_md = {
"md": {
"techniques": [
# SAXS technique
{
"version": 1,
"technique": "SAXS",
"nxsas": {
"entry": {
"_attributes": {"NX_Class": "NXEntry", "default": "data"},
"end_time": {
"_attributes": {
"NDAttrDescription": "image ending time",
"NDAttrName": "EndTime",
"NDAttrSource": "91dcLAX:SAXS:EndExposureTime",
"NDAttrSourceType": "NDAttrSourceEPICSPV",
},
"_link": "#bluesky/stop/time",
},
"title": {
"_attributes": {
"NDAttrDescription": "sample name",
"NDAttrName": "SampleTitle",
"NDAttrSource": "91dcLAX:sampleTitle",
"NDAttrSourceType": "NDAttrSourceEPICSPV",
},
"_link": "#bluesky/start/sample_name",
},
"program_name": "EPICS areaDetector",
"instrument": {
"_attributes": {"NX_Class": "NXInstrument"},
"name_1": "#bluesky/start/beamline_id", # create a link
"name_2": { # create a link with attributes?
"_attributes": {"NX_This": "NXThat"},
"_link": "#bluesky/start/beamline_id",
},
"aperture": {
"_attributes": {"NX_Class": "NXAperture"},
"vcenter": 1.0,
"vsize": 2.0,
"description": "USAXSslit",
},
},
},
},
},
# more techniques ...
]
}
}
def test_start_nexus_metadata(caplog, tmp_path):
caplog.set_level(logging.DEBUG, logger="suitcase.nxsas")
start_doc_md = {}
start_doc_md.update(rsoxs_start_doc)
start_doc_md.update(techniques_md)
# componse_run will raise an exception if "time" or "uid" are in the metadata
start_doc_md.pop("time")
start_doc_md.pop("uid")
documents = []
(
start_doc,
compose_descriptor,
compose_resource,
compose_stop,
) = event_model.compose_run(
# 'run start' document
metadata=start_doc_md
)
documents.append(("start", start_doc))
stop_doc = compose_stop()
documents.append(("stop", stop_doc))
artifacts = nxsas.export(documents, tmp_path)
assert len(artifacts["stream_data"]) == 1
output_filepath = artifacts["stream_data"][0]
assert output_filepath.exists()
with h5py.File(output_filepath, "r") as h5f:
assert "bluesky" in h5f
print(list(h5f["bluesky"]))
assert "start" in h5f["bluesky"]
assert len(h5f["bluesky"]["start"]) == 42
assert len(h5f["bluesky"].attrs) == 0
assert all(h5f["bluesky"]["start"]["detectors"][()] == ["Synced", "en_energy"])
assert all(
h5f["bluesky"]["start"]["motors"][()]
== ["WAXS Exposure", "SAXS Exposure", "en"]
)
assert h5f["bluesky"]["start"]["num_intervals"][()] == 127
assert h5f["bluesky"]["start"]["num_points"][()] == 128
assert h5f["bluesky"]["start"]["plan_name"][()] == "full_carbon_scan_nd"
assert h5f["bluesky"]["start"]["plan_type"][()] == "generator"
assert h5f["bluesky"]["start"]["scan_id"][()] == 6852
assert h5f["bluesky"]["start"]["time"][()] == start_doc["time"]
assert h5f["bluesky"]["start"]["uid"][()] == start_doc["uid"]
assert len(h5f["bluesky"]) == 4
assert "hints" in h5f["bluesky"]["start"]
assert "dimensions" in h5f["bluesky"]["start"]["hints"]
# the "dimensions" attribute has been jsonified because it is complicated
# assert (
# h5f["bluesky"]["hints"].attrs["dimensions"]
# == '[[["random_walk:dt"], "primary"]]'
# )
# assert json.loads(h5f["bluesky"]["hints"].attrs["dimensions"]) == [
# [["random_walk:dt"], "primary"]
# ]
assert "md" in h5f["bluesky"]["start"]
assert "plan_args" in h5f["bluesky"]["start"]
assert "detectors" in h5f["bluesky"]["start"]["plan_args"]
# assert h5f["bluesky"]["start"]["plan_args"][()] == start_doc["plan_args"]
def test_descriptor_nexus_metadata(caplog, tmp_path):
caplog.set_level(logging.DEBUG, logger="suitcase.nxsas")
start_doc_md = {}
start_doc_md.update(rsoxs_start_doc)
start_doc_md.update(techniques_md)
start_doc_md.pop("time")
start_doc_md.pop("uid")
documents = []
(
start_doc,
compose_descriptor,
compose_resource,
compose_stop,
) = event_model.compose_run(
# 'run start' document
metadata=start_doc_md
)
documents.append(("start", start_doc))
descriptor_doc_md = dict()
descriptor_doc_md.update(rsoxs_descriptor_en_doc)
# compose_descriptor will raise an exception if "run_start" is in the metadata
descriptor_doc_md.pop("run_start")
descriptor_doc, _, _ = compose_descriptor(**descriptor_doc_md)
documents.append(("descriptor", descriptor_doc))
stop_doc = compose_stop()
documents.append(("stop", stop_doc))
artifacts = nxsas.export(documents, tmp_path)
assert len(artifacts["stream_data"]) == 1
output_filepath = artifacts["stream_data"][0]
assert output_filepath.exists()
with h5py.File(output_filepath, "r") as h5f:
assert "bluesky" in h5f
print(list(h5f["bluesky"]))
assert "primary" in h5f["bluesky"]["descriptors"]
assert "data_keys" in h5f["bluesky"]["descriptors"]["primary"]
assert "en_energy" in h5f["bluesky"]["descriptors"]["primary"]["data_keys"]
def test_event_page_nexus_metadata(tmp_path):
start_doc_md = {}
start_doc_md.update(rsoxs_start_doc)
start_doc_md.update(techniques_md)
# compose_run will throw an exception if "time" and "uid" are in the metadata
start_doc_md.pop("time")
start_doc_md.pop("uid")
documents = []
(
start_doc,
compose_descriptor,
compose_resource,
compose_stop,
) = event_model.compose_run(
# 'run start' document
metadata=start_doc_md
)
documents.append(("start", start_doc))
descriptor_doc_md = dict()
descriptor_doc_md.update(rsoxs_descriptor_en_doc)
# compose_descriptor will raise an exception if "run_start" is in the metadata
descriptor_doc_md.pop("run_start")
descriptor_doc, compose_event, compose_event_page = compose_descriptor(
**descriptor_doc_md
)
documents.append(("descriptor", descriptor_doc))
event_md = dict()
event_md.update(rsoxs_event_page_en_doc)
# event_md["seq_num"] = [1]
# the descriptor uid will interfere with compose_event
event_md.pop("descriptor")
event_doc = compose_event(**event_md)
documents.append(("event", event_doc))
stop_doc = compose_stop()
documents.append(("stop", stop_doc))
artifacts = nxsas.export(documents, tmp_path)
assert len(artifacts["stream_data"]) == 1
output_filepath = artifacts["stream_data"][0]
assert output_filepath.exists()
with h5py.File(output_filepath, "r") as h5f:
assert "bluesky" in h5f
assert "primary" in h5f["bluesky"]["events"]
assert "en_energy" in h5f["bluesky"]["events"]["primary"]["data"]
assert h5f["bluesky"]["events"]["primary"]["data"]["en_energy"].shape == (1,)
assert h5f["bluesky"]["events"]["primary"]["data"]["en_energy"][()] == [
270.0012299
]
# now test the NeXus structure
assert "entry" in h5f
assert len(h5f["entry"].attrs) == 2
assert h5f["entry"].attrs["NX_Class"] == "NXEntry"
assert h5f["entry"].attrs["default"] == "data"
assert "end_time" in h5f["entry"]
assert isinstance(h5f["entry"]["end_time"], h5py.Dataset)
print(f"end_time: {h5f['entry']['end_time']}")
assert h5f["entry"]["end_time"][()] == stop_doc["time"]
assert len(h5f["entry"]["end_time"].attrs) == 4
```
#### File: nxsas/tests/test_with_example_data.py
```python
from suitcase import nxsas
def do_not_test_export(tmp_path, example_data):
# Exercise the exporter on the myriad cases parametrized in example_data.
documents = example_data()
nxsas.export(documents, tmp_path)
# For extra credit, capture the return value from export in a variable...
# artifacts = export(documents, tmp_path)
# ... and read back the data to check that it looks right.
``` |
{
"source": "jklynch/suitcase-utils",
"score": 2
} |
#### File: utils/tests/conftest.py
```python
import bluesky
from bluesky.tests.conftest import RE # noqa
from bluesky.plans import count
from bluesky.plan_stubs import trigger_and_read, configure
from ophyd.sim import SynGauss, SynAxis
import numpy as np
try:
from ophyd.sim import DirectImage
except ImportError:
from ophyd import Device, Component as Cpt
from ophyd.sim import SynSignal
class DirectImage(Device):
img = Cpt(SynSignal, kind="hinted")
def __init__(self, *args, func=None, **kwargs):
super().__init__(*args, **kwargs)
if func is not None:
self.img._func = func
def trigger(self):
return self.img.trigger()
import event_model
import pytest
from .. import UnknownEventType
import warnings
if not hasattr(SynGauss, "configure"):
class SynGauss(SynGauss):
def configure(self, d):
if d:
raise ValueError
return {}, {}
# This line is used to ignore the deprecation warning for bulk_events in tests
warnings.filterwarnings("ignore", message="The document type 'bulk_events'*")
_md = {"reason": "test", "user": "temp user", "beamline": "test_beamline"}
# Some useful plans for use in testing
def simple_plan(dets):
"""A simple plane which runs count with num=5"""
md = {**_md, **{"test_plan_name": "simple_plan"}}
yield from count(dets, num=5, md=md)
def multi_stream_one_descriptor_plan(dets):
"""A plan that has two streams but on descriptor per stream)"""
md = {**_md, **{"test_plan_name": "multi_stream_one_descriptor_plan"}}
@bluesky.preprocessors.baseline_decorator(dets)
def _plan(dets):
yield from count(dets, md=md)
yield from _plan(dets)
def one_stream_multi_descriptors_plan(dets):
'''A plan that has one stream but two descriptors per stream)'''
md = {**_md, **{'test_plan_name': 'simple_plan'}}
@bluesky.preprocessors.run_decorator(md=md)
def _internal_plan(dets):
yield from trigger_and_read(dets)
for det in dets:
yield from configure(det, {})
yield from trigger_and_read(dets)
yield from _internal_plan(dets)
def _make_single(ignore):
if ignore:
pytest.skip()
motor = SynAxis(name="motor", labels={"motors"})
det = SynGauss(
"det", motor, "motor", center=0, Imax=1, sigma=1, labels={"detectors"}
)
return [det]
def _make_image(ignore):
if ignore:
pytest.skip()
direct_img = DirectImage(
func=lambda: np.array(np.ones((10, 10))), name="direct", labels={"detectors"}
)
return [direct_img]
def _make_image_list(ignore):
if ignore:
pytest.skip()
direct_img_list = DirectImage(
func=lambda: [[1] * 10] * 10, name="direct", labels={"detectors"}
)
direct_img_list.img.name = "direct_img_list"
return [direct_img_list]
@pytest.fixture(
params=[
_make_single,
_make_image,
_make_image_list,
lambda ignore: _make_image(ignore) + _make_image_list(ignore),
],
scope="function",
)
def detector_list(request): # noqa
return request.param
@pytest.fixture(params=["event", "bulk_events", "event_page"], scope="function")
def event_type(request):
def _event_type_func(ignore):
if request.param in ignore:
pytest.skip()
return request.param
return _event_type_func
@pytest.fixture(params=[simple_plan, multi_stream_one_descriptor_plan,
one_stream_multi_descriptors_plan],
scope='function')
def plan_type(request):
'''Returns a function that provides plan_types for testing.'''
def _plan_type_func(skip_tests_with=None):
'''Skips the current test or returns the plan_type in request.param for
a number of test cases.
skip_tests_with : list optional
pytest.skip() any test with request.param in this list
'''
if skip_tests_with is None:
skip_tests_with = []
if request.param in skip_tests_with:
pytest.skip()
return request.param
return _plan_type_func
@pytest.fixture(params=['test-', 'scan_{start[uid]}-'],
scope='function')
def file_prefix_list(request): # noqa
'''Returns a function that provides file_prefixes for testing.
'''
def _file_prefix_list_func(skip_tests_with=None):
'''Skips the current test or returns the file prefix in request.param for
a number of test cases.
skip_tests_with : list optional
pytest.skip() any test with request.param in this list
'''
if skip_tests_with is None:
skip_tests_with = []
if request.param in skip_tests_with:
pytest.skip()
return request.param
return _file_prefix_list_func
@pytest.fixture()
def generate_data(RE, detector_list, event_type): # noqa
'''A fixture that returns event data for a number of test cases.
Returns a list of (name, doc) tuples for the plan passed in as an arg.
Parameters
----------
RE : object
pytest fixture object imported from `bluesky.test.conftest`
detector_list : list
pytest fixture defined in `suitcase.utils.conftest` which returns a
list of detectors
event_type : list
pytest fixture defined in `suitcase.utils.conftest` which returns a
list of 'event_types'.
'''
def _generate_data_func(plan, skip_tests_with=None, md=None):
'''Generates data to be used for testing of suitcase.*.export(..)
functions
Parameters
----------
plan : the plan to use to generate the test data
Returns
-------
collector : list
A list of (name, doc) tuple pairs generated by the run engine.
skip_tests_with : list, optional
any test having request.param in this list will be skipped
md : dict, optional
metadata to be passed to the RunEngine
'''
if skip_tests_with is None:
skip_tests_with = []
if md is None:
md = {}
# define the output lists and an internal list.
collector = []
event_list = []
# define the collector function depending on the event_type
if event_type(skip_tests_with) == 'event':
def collect(name, doc):
collector.append((name, doc))
if name == 'event':
event_list.append(doc)
elif event_type(skip_tests_with) == 'event_page':
def collect(name, doc):
if name == 'event':
event_list.append(doc)
elif name == 'stop':
collector.append(('event_page',
event_model.pack_event_page(
*event_list)))
collector.append((name, doc))
else:
collector.append((name, doc))
elif event_type(skip_tests_with) == 'bulk_events':
def collect(name, doc):
if name == 'event':
event_list.append(doc)
elif name == 'stop':
collector.append(('bulk_events', {'primary': event_list}))
collector.append((name, doc))
else:
collector.append((name, doc))
else:
raise UnknownEventType('Unknown event_type kwarg passed to '
'suitcase.utils.events_data')
# collect the documents
RE(plan(detector_list(skip_tests_with)), collect, md=md)
return collector
return _generate_data_func
@pytest.fixture
def example_data(generate_data, plan_type):
'''A fixture that returns event data for a number of test cases.
Returns a function that returns a list of (name, doc) tuples for each of
the plans in plan_type.
.. note::
It is recommended that you use this fixture for testing of
``suitcase-*`` export functions, for an example see
``suitcase-tiff.tests``. This will mean that future additions to the
test suite here will be automatically applied to all ``suitcase-*``
repos. Some important implementation notes:
1. These fixtures are imported into other suitcase libraries via those
libraries' ``conftest.py`` file. This is automatically set up by
suitcases-cookiecutter, and no additional action is required.
2. If any of the fixture parameters above are not valid for
the suitcase you are designing and cause testing issues please skip
them internally by adding them to the ``skip_tests_with`` kwarg list
via the line
``collector = example_data(skip_tests_with=[param_to_ignore, ...])``.
Take note ``param_to_ignore`` is the exact parameter, i.e. in case you
want to ignore the tests against ``simple_plan`` in ``plan_type``
``param_to_ignore`` must actually be the function, not a string
reference, which will need to be imported using:
``from suitcase.utils.tests.conftest import simple_plan``
Parameters
----------
generate_data : list
pytest fixture defined in `suitcase.utils.conftest` which returns a
function that accepts a plan as an argument and returns name, doc pairs
plan_type : list
pytest fixture defined in `suitcase.utils.conftest` which returns a
list of 'plans' to test against.
'''
def _example_data_func(skip_tests_with=None, md=None):
'''returns a list of (name, doc) tuples for a number of test cases
skip_tests_with : list optional
any test having request.param in this list will be skipped
md : dict optional
dict or dict-like object containing metadata to be added to the
RunEngine
'''
return generate_data(
plan_type(skip_tests_with), skip_tests_with=skip_tests_with, md=md)
return _example_data_func
``` |
{
"source": "jklynch/suitcase-xdi",
"score": 2
} |
#### File: suitcase-xdi/scripts/demonstration.py
```python
from pprint import pprint
from bluesky import RunEngine
from bluesky.plans import count
from bluesky.preprocessors import SupplementalData
from event_model import RunRouter
from suitcase.xdi import Serializer
from ophyd.sim import det1, det2, det3, motor1, motor2
def pretty_print(name, doc):
pprint(name)
pprint(doc)
def serializer_factory(name, start_doc):
serializer = Serializer("xdi")
serializer("start", start_doc)
return [serializer], []
RE = RunEngine({})
sd = SupplementalData()
RE.preprocessors.append(sd)
sd.baseline = [det3, motor1, motor2]
RE.subscribe(pretty_print)
RE.subscribe(RunRouter([serializer_factory]))
suitcase_meta_data = {"config-file-path": "XDI.toml"}
xdi_meta_data = {"Element_symbol": "A", "Element_edge": "K", "Mono_d_spacing": 10.0}
nx_meta_data = {
"Source": {"name": "NSLS-II"},
"Instrument": {"name": "BMM"},
"Beam": {"incident_energy": 1000.0},
}
dets = [det1, det2]
RE(
count(dets, num=5),
md={"suitcase-xdi": suitcase_meta_data, "NX": nx_meta_data, "XDI": xdi_meta_data},
)
# look for file
``` |
{
"source": "jklypchak13/CFB_Scorigami",
"score": 2
} |
#### File: CFB_Scorigami/src/generate_pages.py
```python
from typing import ChainMap
from file_manger import get_template, write_file, read_cache, write_cache, check_directories
from scraper import get_games
from scorigami_table import ScorigamiTable
CACHE_FILE = 'data/scorgiami_cache.json'
def generate_index(score_table):
max_score = score_table.max_score()
template = get_template('index.html')
write_file(template.render(table=score_table,
max_score=max_score), 'index.html')
if __name__ == "__main__":
check_directories()
cached_games, cached_years = read_cache(CACHE_FILE)
new_games, all_years = get_games(cached_years)
all_games = cached_games
for year, games in new_games.items():
all_games.extend(games)
table = ScorigamiTable(all_games)
generate_index(table)
important_games = table.extract_games()
write_cache(CACHE_FILE, important_games, all_years)
```
#### File: CFB_Scorigami/test/test_file_manager.py
```python
import pytest
from . import context
from file_manger import read_cache, write_cache
from data_types import Game
from datetime import date as Date
@pytest.fixture
def sample_games():
games = [
Game("Rutgers", "Princeton", 6, 4, Date(1869, 11, 6)),
Game("Princeton", "Rutgers", 8, 0, Date(1869, 11, 13))
]
return games
@pytest.fixture
def sample_years():
years = [1869]
return years
def test_read_cache(sample_games, sample_years):
input_file = 'test/data/sample_cache.json'
games, years = read_cache(input_file)
assert games == sample_games
assert years == sample_years
def test_write_cache(sample_games, sample_years):
output_file = 'test/data/cache_output.json'
input_file = 'test/data/sample_cache.json'
write_cache(output_file, sample_games, sample_years)
expected = ''
with open(input_file, 'r') as fp:
expected = fp.read()
result = ''
with open(output_file, 'r') as fp:
result = fp.read()
assert result == expected
``` |
{
"source": "jkmartindale/androguard",
"score": 2
} |
#### File: androguard/tests/test_ns_apk.py
```python
import unittest
import sys
PATH_INSTALL = "./"
sys.path.append(PATH_INSTALL)
from androguard.core.bytecodes import apk
APKS = [
"examples/NowSecure/com.kaspersky.kes.apk",
"examples/NowSecure/com.king.bubblewitch3_119351.apk",
]
class APKTest(unittest.TestCase):
def testAPK(self):
for apk_path in APKS:
with open(apk_path, "r") as fd:
a = apk.APK(fd.read(), True)
self.assertTrue(a)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jkmathes/slack-machine",
"score": 2
} |
#### File: storage/backends/dynamodb.py
```python
import logging
import datetime
import calendar
import pickle
import codecs
import boto3
import botocore
from machine.storage.backends.base import MachineBaseStorage
logger = logging.getLogger(__name__)
class DynamoDBStorage(MachineBaseStorage):
"""
A storage plugin to allow native slack-machine storage
into AWS DynamoDB
Configuration of the connection to AWS itself is done via
standard environment variables or pre-written configuration
files, such as ~/.aws/{config}
For local testing, the endpoint URL can be modified using
slack-machine setting `DYNAMODB_ENDPOINT_URL`
If `DYNAMODB_CREATE_TABLE` is set within slack-machine
settings, this driver will create the table in AWS automatically
Additionally, if you need a DynamoDB client to be customized,
a custom client can be passed in with the `DYNAMODB_CLIENT`
slack-machine setting
Data in DynamoDB is stored as a pickled base64 string to
avoid complications in setting and fetching (bytes)
"""
def __init__(self, settings):
super().__init__(settings)
args = {}
if 'DYNAMODB_ENDPOINT_URL' in settings:
args['endpoint_url'] = settings['DYNAMODB_ENDPOINT_URL']
self._key_prefix = settings.get('DYNAMODB_KEY_PREFIX', 'SM')
self._table_name = settings.get('DYNAMODB_TABLE_NAME', 'slack-machine-state')
if 'DYNAMODB_CLIENT' in settings:
self._db = settings['DYNAMODB_CLIENT']
else:
db = boto3.resource('dynamodb', **args)
self._db = db
create_table = settings.get('DYNAMODB_CREATE_TABLE', False)
if create_table:
try:
self._table = self._db.create_table(
TableName=self._table_name,
KeySchema=[
{'AttributeName': 'sm-key', 'KeyType': 'HASH'}
],
AttributeDefinitions=[
{'AttributeName': 'sm-key', 'AttributeType': 'S'}
],
BillingMode='PAY_PER_REQUEST'
)
self._table.meta.client.get_waiter('table_exists').wait(TableName=self._table_name)
ttl = {'Enabled': True, 'AttributeName': 'sm-expire'}
self._table.meta.client.update_time_to_live(
TableName=self._table_name, TimeToLiveSpecification=ttl)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'ResourceInUseException':
logger.info(
'DynamoDB table[{}] exists, skipping creation'.format(self._table_name))
else:
raise e
self._table = self._db.Table(self._table_name)
def _prefix(self, key):
"""
Given a slack-machine lookup key, generate a prefixed-key
to be used in the DynamoDB table lookup
:param key: the SM key to prefix
"""
return '{}:{}'.format(self._key_prefix, key)
def has(self, key):
"""
Check if the key exists in DynamoDB
:param key: the SM key to check
:return: ``True/False`` whether the key exists in DynamoDB
:raises ClientError: if the client was unable to communicate with DynamoDB
"""
try:
r = self._table.get_item(Key={'sm-key': self._prefix(key)})
return True if 'Item' in r else False
except botocore.exceptions.ClientError as e:
logger.error('Unable to get item[{}]'.format(self._prefix(key)))
raise e
def get(self, key):
"""
Retrieve item data by key
:param key: the SM key to fetch against
:return: the raw data for the provided key, as (byte)string. Returns ``None`` when
the key is unknown or the data has expired
:raises ClientError: if the client was unable to communicate with DynamoDB
"""
try:
r = self._table.get_item(Key={'sm-key': self._prefix(key)})
if 'Item' in r:
v = r['Item']['sm-value']
return pickle.loads(codecs.decode(v.encode(), 'base64'))
else:
return None
except botocore.exceptions.ClientError as e:
logger.error('Unable to get item[{}]'.format(self._prefix(key)))
raise e
def set(self, key, value, expires=None):
"""
Store item data by key
:param key: the key under which to store the data
:param value: data as (byte)string
:param expires: optional expiration time in seconds, after which the
data should not be returned any more
:raises ClientError: if the client was unable to communicate with DynamoDB
"""
item = {
'sm-key': self._prefix(key),
'sm-value': codecs.encode(pickle.dumps(value), 'base64').decode()
}
if expires:
ttl = datetime.datetime.utcnow() + datetime.timedelta(seconds=expires)
item['sm-expire'] = calendar.timegm(ttl.timetuple())
try:
self._table.put_item(Item=item)
except botocore.exceptions.ClientError as e:
logger.error('Unable to set item[{}]'.format(self._prefix(key)))
raise e
def delete(self, key):
"""
Delete item data by key
:param key: key for which to delete the data
:raises ClientError: if the client was unable to communicate with DynamoDB
"""
try:
self._table.delete_item(Key={'sm-key': self._prefix(key)})
except botocore.exceptions.ClientError as e:
logger.error('Unable to delete item[{}]'.format(self._prefix(key)))
raise e
def size(self):
"""
Calculate the total size of the storage
:return: total size of storage in bytes (integer)
"""
t = self._table.meta.client.describe_table(TableName=self._table_name)
return t['Table']['TableSizeBytes']
``` |
{
"source": "jkmathuriya/Video-Interpolation-Using-Different-OpticalFlow-Algorithms",
"score": 3
} |
#### File: Video-Interpolation-Using-Different-OpticalFlow-Algorithms/Functions/grdient.py
```python
import numpy as np
import scipy.ndimage
def grad_cal(img0, img2):
img0 = img0/ 255
img2 = img2 / 255
#kernels
kernel_x = np.array([[-1,1],[-1, 1]]) / 4
kernel_y = np.array([[-1, -1], [1, 1]]) / 4
kernel_t = np.array([[1, 1], [1, 1]]) / 4
## Calculating gradients by convolving kernels
fx = scipy.ndimage.convolve(input=img0, weights=kernel_x)+scipy.ndimage.convolve(input=img2, weights=kernel_x)
fy = scipy.ndimage.convolve(input=img0, weights=kernel_y)+scipy.ndimage.convolve(input=img2, weights=kernel_y)
ft = scipy.ndimage.convolve(input=img2, weights=kernel_t)+scipy.ndimage.convolve(input=img0, weights=-1*kernel_t)
return [fx,fy,ft]
```
#### File: Video-Interpolation-Using-Different-OpticalFlow-Algorithms/Multiscale_Horn_schunk/multiscale_horn_schunk.py
```python
from Horn_Schunk.Horn_schunk import *
from Functions.Pyramid import *
from matplotlib import pyplot as plt
from Functions.grdient import *
def iterative_horn_schunk_flow(img0, img2,old_flow,lambada,max_iter,epsilon):
a_old = old_flow[0]
b_old = old_flow[1]
fx, fy, ft = grad_cal(img0, img2)
# warping image with old flow
pred = np.round((img0 / 255 + fx * a_old + fy * b_old + ft) * 255)
pred[pred > 255] = 255
# Calculating a1~ and b1~
flow, grad = horn_schunk_flow(pred, img2,lambada,max_iter,epsilon)
# New flow
new_a = a_old + flow[0]
new_b = b_old + flow[1]
return [new_a, new_b]
def multiscale_horn_schunk_flow(img0, img2,lambada,max_iter,epsilon, levels):
pyr0, shapes0 = pyramid_down(img0, levels)
pyr2, shapes2 = pyramid_down(img2, levels)
# for i in range(levels-1,-1,-1):
# plt.figure()
# plt.imshow(pyr0[0:shapes0[i][0],0:shapes0[i][1],i],cmap="gray")
# plt.figure()
# plt.imshow(pyr2[0:shapes0[i][0],0:shapes0[i][1],i], cmap="gray")
# print(shapes0)
# plt.show()
# Calculate initial flow at lowest scale
[a, b], grad = horn_schunk_flow(pyr0[0:shapes0[levels - 1][0], 0:shapes0[levels - 1][1], levels - 1],
pyr2[0:shapes0[levels - 1][0], 0:shapes0[levels - 1][1], levels - 1],lambada,max_iter,epsilon)
# upsample flow for next level
a2 = cv2.pyrUp(a)
b2 = cv2.pyrUp(b)
for i in range(levels - 2, -1, -1):
[a, b] = iterative_horn_schunk_flow(pyr0[0:shapes0[i][0], 0:shapes0[i][1], i],
pyr2[0:shapes0[i][0], 0:shapes0[i][1], i],[a2, b2],lambada,max_iter,epsilon)
# upsample flow for next level
a2 = cv2.pyrUp(a)
b2 = cv2.pyrUp(b)
grad = grad_cal(img0, img2)
return [a, b], grad
``` |
{
"source": "jkmiao/captcha",
"score": 2
} |
#### File: captcha/decap/apicode.py
```python
import os
from models.chimath.cm_code import CMCode
from models.tggvcr.tggvcr import TGGVCR
from models.tgcodesp.tgcodesp import TGcodesp
import base64
from cStringIO import StringIO
from urllib import urlopen
class Apicode(object):
def __init__(self, path='model'):
"""
模型预先加载
"""
self.models = {}
self.models['GNE'] = TGGVCR() # 中英文通用模型
self.models['CM'] = CMCode() # 数学计算题验证码, 类似工商吉林山东或17小说网 http://passport.17k.com/mcode.jpg?r=8417905093765
self.models['gne'] = TGcodesp('gne')
def predict(self, codetype, fname, code_len=None, detail=False):
if codetype not in self.models:
raise ValueError("input captcha type error: %s " % type)
# base64编码图片
if len(fname)>1000:
fname = StringIO(base64.b64decode(fname))
return self.models[codetype].predict(fname, code_len=code_len, detail=detail)
def predict_url(self, codetype, url, code_len=None, detail=False):
if codetype not in self.models:
raise ValueError("input captcha type error: %s " % type)
# 图片url
if url.startswith('http'):
fname = StringIO(urlopen(url).read())
return self.models[codetype].predict(fname, code_len=code_len, detail=detail)
if __name__ == '__main__':
test = Apicode()
ctype = 'gne'
path = 'img/test/%s/' % ctype.lower()
fnames = [os.path.join(path, fname) for fname in os.listdir(path)][:10]
for fname in fnames:
print fname
# base64编码
fname = base64.b64encode(open(fname).read())
print test.predict(ctype, fname, detail=True)
```
#### File: decap/train/cm_code.py
```python
from keras.models import load_model
from keras import backend as K
from sklearn import metrics
import numpy as np
from PIL import Image
import string
import os
import time
class CharacterTable(object):
"""
字符串编解码
"""
def __init__(self):
self.chars = u'0123456789abcs'
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indice_chars = dict((i, c) for i, c in enumerate(self.chars))
def encode(self, strs, maxlen=None):
maxlen = maxlen if maxlen else len(strs)
vec = np.zeros((maxlen, len(self.chars)))
for i, c in enumerate(strs):
vec[i, self.char_indices[c]] = 1
return vec.flatten()
def decode(self, vec, n_len=6):
vec = vec[:, 2:, :]
ctc_decode = K.ctc_decode(vec, input_length=np.ones(vec.shape[0])*vec.shape[1])[0][0]
y_out = K.get_value(ctc_decode)
res = ''.join([self.indice_chars[x] for x in y_out[0]])
return res.lower()
class CMCode(object):
def __init__(self, model_path='model/tgcode_ctc_cm.h5'):
self.ctable = CharacterTable()
self.image_h = 30
self.image_w = 200
base_path = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(base_path, model_path)
self.model = load_model(model_path)
def predict(self, fname):
img = Image.open(fname).convert('RGB').resize((self.image_w, self.image_h), Image.ANTIALIAS)
imgM = np.array(img, dtype=np.uint8).transpose(1, 0, 2)
imgM = np.expand_dims(imgM, 0)
y_pred = self.model.predict(imgM)
y_pred = self.ctable.decode(y_pred)
return y_pred
if __name__ == "__main__":
test = CMCode()
path = 'img/test_data/cm21'
fnames = [os.path.join(path, fname) for fname in os.listdir(path) if fname.endswith('jpg')][:50]
start_time = time.time()
cnt = 0
for fname in fnames:
y_pred = test.predict(fname)
y_test = fname.split('/')[-1].split('_')[0].lower()
if y_pred == y_test:
cnt += 1
print y_pred, y_test
print 'Accuracy: ', float(cnt)/len(fnames)
print 'Time: ', (time.time() - start_time) / len(fnames)
```
#### File: decap/train/gen_data_cm.py
```python
from PIL import Image, ImageDraw, ImageFont
import random
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
"""
生成中文数字四则运算数据
c: chinese
m: math
"""
class ImageCaptcha(object):
def __init__(self, size=(200, 60)):
self.bgColor = self.random_color(200, 255)
self.txt_num = u'零一二三四五六七八九十壹貳叄肆伍陸柒捌玖拾'
self.txt_op = list(u'加减乘+-x') + [u"加上", u"减去", "乘以"]
self.txt_eq = [u'等于', '等于几']
self.image = Image.new('RGB', size, self.bgColor)
def random_color(self, minc=0, maxc=255):
return (random.randint(minc, maxc),
random.randint(minc, maxc),
random.randint(minc, maxc))
def gen_text(self, cnt=6):
"""
随机生成验证码文本
"""
a = random.choice(self.txt_num)
b = random.choice(self.txt_op)
c = random.choice(self.txt_num)
d = random.choice(self.txt_eq)
print a, b, c, d
text = a+b+c+d
return ''.join(text)
def draw_text(self, pos, txt):
draw = ImageDraw.Draw(self.image)
fontSize = random.choice([22, 26, 28, 30])
fontType = random.choice(['simsun.ttc']) #, 'Kaiti-SC-Bold.ttf', 'DrioidSans-Bold.ttf'])
font = ImageFont.truetype(fontType, fontSize)
fontColor = self.random_color(1, 180)
draw.text(pos, txt, font=font, fill=fontColor)
def rotate(self, angle):
self.image = self.image.rotate(random.randint(-1*angle, angle), expand=0)
def clear_bg(self):
width, height = self.image.size
for x in range(width):
for y in range(height):
pix = self.image.getpixel((x, y))
if pix == (0, 0, 0):
self.image.putpixel((x,y), self.bgColor)
def random_point(self):
width, height = self.image.size
x = random.randint(0, width)
y = random.randint(0, height)
return (x, y)
def add_noise(self):
start_point = self.random_point()
end_point = self.random_point()
draw = ImageDraw.Draw(self.image)
for i in range(random.randint(2, 5)):
draw.line((start_point, end_point), fill=self.random_color(), width=random.randint(0,2))
def gen_captcha_image(self, text):
for i, txt in enumerate(text):
x = 2 + i * 30 + random.randint(-2, 2)
y = random.randint(2, 10)
self.draw_text((x, y), txt)
self.rotate(3)
self.add_noise()
self.clear_bg()
return self.image
def label_convert(label, detail=True):
"""
结果转换
"""
map_dict=dict((x, y) for x,y in zip(u'零一二三四五六七八九壹貳叄肆伍陸柒捌玖加减乘除+-x', u'0123456789123456789+-*/+-*'))
map_dict[u'十'] = u'10'
map_dict[u'拾'] = u'10'
res = ''
for c in label:
if c in map_dict:
res += map_dict[c]
if detail:
res = eval(res)
return str(res)
if __name__ == '__main__':
for i in range(10):
test = ImageCaptcha()
label = test.gen_text()
img = test.gen_captcha_image(label)
img.save('img/origin/%s_%d.jpg' % (label_convert(label), i))
if i%5==0:
print i, 'done'
```
#### File: decap/train/train_model.py
```python
from keras.models import Model, Input, load_model
from keras.layers import *
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras import backend as K
import numpy as np
from PIL import Image
import random
import os
from utils import CharacterTable
def img_preprocess(img, image_w=200, image_h=50):
"""
图片预处理, 统一规范化到 (200, 50) 宽高
:type img: PIL.Image: 输入图片
:type image_w: int 输出宽
:type image_h: int 输出高
:rtype : PIL.Image, 规范化后的图片
"""
w, h = img.size
new_width = w * image_h/h # 等比例缩放
img = img.resize((new_width, image_h), Image.ANTIALIAS) # 等高
extra_blank = (image_w-img.width)/2
img = img.crop((-extra_blank, 0, image_w - extra_blank, image_h))
return img
def load_data(path, ctable, width=200, height=50, code_len=5):
"""
载入某个文件夹下的所有图像
:type ctable: Charactable 标签编解码
:type width: int: 预处理后输出宽
:type height: int: 预处理后输出高
:type code_len: int: 答案最大长度
:rtype: [data, input_label, input_len, label_len], oriLabel
"""
fnames = [os.path.join(path, fname) for fname in os.listdir(path) ]
if len(fnames)>30000:
fnames = random.sample(fnames, 30000)
data = np.zeros((len(fnames), width, height, 3)) # 数据类型
input_len = np.ones(len(fnames), dtype=np.uint8) * 19 # 21-2 reshape时的维度
input_label = np.zeros((len(fnames), code_len), dtype=np.uint8)
label_len = np.zeros(len(fnames), dtype=np.uint8)
oriLabel = []
for idx, fname in enumerate(fnames):
try:
img = Image.open(fname).convert('RGB')
imgLabel = (fname.split('/')[-1].split('_')[0]).decode('utf-8')
tmp = ctable.encode(imgLabel.lower())
except Exception as e:
print e
os.system('mv %s ../../img/error/' % (fname))
continue
if len(imgLabel)<3:
print 'too short label', fname
os.system('mv %s ../../img/error/' % (fname))
continue
else:
img = img_preprocess(img, width, height)
input_label[idx] = ctable.encode(imgLabel.lower())
data[idx] = np.array(img).transpose(1, 0, 2)
label_len[idx] = len(imgLabel)
oriLabel.append(imgLabel)
return [data, input_label, input_len, label_len], oriLabel
def ctc_lambda_func(args):
"""
内置 ctc 损失
"""
y_pred, labels, input_len, label_len = args
y_pred = y_pred[:, 2:, :]
return K.ctc_batch_cost(labels, y_pred, input_len, label_len)
def build_model(width, height, code_len, n_class):
"""
创建模型
"""
input_tensor = Input((width, height, 3), name='input_tensor')
x = input_tensor
for i in range(3):
x = Conv2D(128, (3, 3), activation='relu')(x)
x = Conv2D(128, (3, 3), activation='relu')(x)
x = MaxPooling2D((2,2))(x)
x = BatchNormalization()(x)
conv_shape = x.get_shape()
print 'conv_shape', conv_shape
x = Reshape(target_shape=(int(conv_shape[1]), int(conv_shape[2]*conv_shape[3])))(x)
x = Bidirectional(GRU(128, return_sequences=True), name='BiGRU1')(x)
x = Dropout(0.25)(x)
x = Dense(n_class, activation='softmax')(x)
base_model = Model(inputs=input_tensor, outputs=x)
labels = Input(name='the_labels', shape=[code_len], dtype='float32')
input_len = Input(name='input_len', shape=[1], dtype='int64')
label_len = Input(name='label_len', shape=[1], dtype='int64')
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc_loss')([x, labels, input_len, label_len])
ctc_model = Model(inputs=[input_tensor, labels, input_len, label_len], outputs=[loss_out])
# 编译
base_model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
ctc_model.compile(loss={'ctc_loss':lambda y_true, y_pred: y_pred}, optimizer='rmsprop')
return base_model, ctc_model
def train_model(path, ctable, base_model, ctc_model, model_name, code_len, acc=0.92, nb_epoch=50, test_path=None):
"""
载入创建的模型结构和数据,进行训练
:type :object 训练好的模型
"""
check_point = ModelCheckpoint(filepath='model/%s_weights.h5' % model_name, monitor='val_loss', save_best_only=True, save_weights_only=True)
early_stopping = EarlyStopping(monitor='val_loss', patience=3)
# 载入训练数据
[X_data, labels, input_len, label_len], oriLabel = load_data(path, ctable, code_len=code_len)
inputs = {
'input_tensor': X_data,
'the_labels': labels,
'input_len': input_len,
'label_len': label_len
}
outputs = {'ctc_loss': np.zeros([len(labels)])}
for epoch in range(nb_epoch/5):
hist = ctc_model.fit(inputs, outputs, batch_size=32, validation_split=0.1, epochs=1, callbacks=[check_point, early_stopping]) # 每次保存验证集中loss最小的模型
train_acc = test_model(base_model, path, ctable)
print epoch, 'train acc', train_acc, 'stop acc', acc
if test_path:
test_acc = test_model(base_model, test_path, ctable)
print epoch, 'test acc', test_acc
if train_acc > acc: # 准确率达到期望的acc(默认0.92) 就提前退出
break
print ''
return base_model
def test_model(base_model, path, ctable, cnt=100, width=200, height=50):
"""
指定路径模型准确率测试
:type path: 测试文件路径
:type ctable: 编解码实例
:rtype float: 指定文件夹下的图片准确率
"""
fnames = [os.path.join(path, fname) for fname in os.listdir(path)]
if len(fnames)<cnt:
cnt = len(fnames)
fnames = random.sample(fnames, cnt) # 每次随机抽取100张做测试
cnt = 0
for idx, fname in enumerate(fnames):
img = Image.open(fname).convert('RGB')
img = img_preprocess(img, width, height)
imgM = np.array(img).transpose(1, 0, 2)
imgM = np.expand_dims(imgM, 0)
y_pred = base_model.predict(imgM)
y_out = ctable.decode(y_pred)
y_true = fname.split('/')[-1].split('_')[0].lower()
if y_out == y_true:
cnt += 1
if idx%10==0:
print 'y_out', y_out, 'y_true',y_true
acc = float(cnt)/len(fnames)
return acc
``` |
{
"source": "jkmnt/pg",
"score": 2
} |
#### File: pg/utils/test_mono_drv.py
```python
import socket
import array
import threading
import random
import Image, ImageFile, ImageTk, ImageDraw, ImageOps, ImageChops, ImageFont
CMD_CLEAR = 0
CMD_SHOW = 1
CMD_SET_CLIP = 2
CMD_DRAW_VLINE = 3
CMD_DRAW_HLINE = 4
CMD_DRAW_LINE = 5
CMD_DRAW_FILL = 6
CMD_DRAW_FRAME = 7
CMD_DRAW_TEXTLINE = 8
def get_size(Data):
Size = [0,0]
Size[0] = ord(Data[0]) | (ord(Data[1]) << 8)
Size[1] = ord(Data[2]) | (ord(Data[3]) << 8)
return (Size[0], Size[1])
def unpack_image(Data):
size = get_size(Data)
Im = Image.fromstring( "1", (size[1], size[0]), Data[4:], "raw", "1;IR", 0, 1)
Im = Im.rotate(90).transpose(Image.FLIP_TOP_BOTTOM)
return Im
def connect():
HOST = '127.0.0.1' # Local host
PORT = 50007 # Server port
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
return s
# cmd Id and args are 32-bits
def send_cmd(socket, cmdId, args = []):
cmdReq = array.array('l', [cmdId] + args)
socket.send(cmdReq)
def get_ack(sock):
ack = sock.recv(1024)
assert ack == 'Ok\0', 'ACK error !'
def draw_hline(img, args):
draw = ImageDraw.Draw(img)
x0, x1, y0, y1 = args[0:3] + [args[2]]
draw.line([x0, y0, x1, y1], fill = not args[3])
del draw
def draw_vline(img, args):
draw = ImageDraw.Draw(img)
x0, x1, y0, y1 = [args[0]] + args[0:3]
draw.line([x0, y0, x1, y1], fill = not args[3])
del draw
def draw_line(img, args):
draw = ImageDraw.Draw(img)
draw.line(args[0:4], fill = not args[4])
del draw
def draw_fill(img, args):
draw = ImageDraw.Draw(img)
draw.rectangle(args[0:4], fill = not args[4], outline = not args[4])
del draw
def draw_frame(img, args):
draw = ImageDraw.Draw(img)
draw.rectangle(args[0:4], fill = 1, outline = not args[4])
del draw
def draw_textline(img, args):
fnt = ImageFont.truetype('f04b_11.ttf', 8)
draw = ImageDraw.Draw(img)
# This font have a different offset in TTF and xbm
draw.text([args[0], args[1] - 1 ], 'Test string', font = fnt, fill = not args[2])
del draw
def compare_img(im0, im1):
return im0.convert('1').tostring() == im1.convert('1').tostring()
def reset_test(sock, clip = [0, 0, 250, 131]):
send_cmd(sock, CMD_SET_CLIP, clip)
get_ack(sock)
send_cmd(sock, CMD_CLEAR)
get_ack(sock)
return Image.new( '1', (251, 160), color = 1)
def get_result(sock):
send_cmd(sock, CMD_SHOW)
# There could be delay in socket, and data may be splitted
indata = sock.recv(8192)
expect = get_size(indata)
expect = expect[0] * (expect[1] / 8) + 4
expect -= len(indata);
while expect:
new_data = sock.recv(2048)
expect -= len(new_data)
indata += new_data
assert expect >= 0, 'Negative expect!'
return unpack_image(indata)
def test_vline(im, sock, args):
send_cmd(sock, CMD_DRAW_VLINE, args)
get_ack(sock)
draw_vline(im, args)
def test_hline(im, sock, args):
send_cmd(sock, CMD_DRAW_HLINE, args)
get_ack(sock)
draw_hline(im, args)
def test_fill(im, sock, args):
send_cmd(sock, CMD_DRAW_FILL, args)
get_ack(sock)
draw_fill(im, args)
def test_frame(im, sock, args):
send_cmd(sock, CMD_DRAW_FRAME, args)
get_ack(sock)
draw_frame(im, args)
def test_textline(im, sock, args):
send_cmd(sock, CMD_DRAW_TEXTLINE, args)
get_ack(sock)
draw_textline(im, args)
def test_line(im, sock, args):
send_cmd(sock, CMD_DRAW_LINE, args)
get_ack(sock)
x0, y0, x1, y1, c = args
dx = abs(x1 - x0)
dy = abs(y1 - y0)
if dy < dx:
if x0 < x1:
if y0 < y1:
octant = 1
else:
octant = 8
else:
if y0 < y1:
octant = 4
else:
octant = 5
else:
if x0 < x1:
if y0 < y1:
octant = 2
else:
octant = 7
else:
if y0 < y1:
octant = 3
else:
octant = 6
if octant in [8, 3, 2, 1]:
draw_line(im, [x1, y1, x0, y0, c])
else:
draw_line(im, [x0, y0, x1, y1, c])
def test_hlines(sock):
for x0 in range(250):
for x1 in range(250):
pim = reset_test(sock)
res = test_hline(pim, sock, [x0, x1, random.randint(0, 130), 1])
dut = get_result(sock)
if not compare_img(pim, dut):
return 'x0 = %d, x1 = %d' % (x0, x1)
return 'Ok'
def test_vlines(sock):
for y0 in range(130):
for y1 in range(130):
pim = reset_test(sock)
res = test_vline(pim, sock, [random.randint(0, 250), y0, y1, 1])
dut = get_result(sock)
if not compare_img(pim, dut):
return 'y0 = %d y1 = %d' % (y0, y1)
return 'Ok'
def vdiff(im0, im1):
im0 = ImageOps.colorize( im0.convert('L'), (255, 0, 0), (255, 255, 255))
im1 = ImageOps.colorize( im1.convert('L'), (0, 255, 0), (255, 255, 255))
ImageChops.multiply(im0, im1).show()
def test_lines(sock):
for i in range(130*130):
x0 = random.randint(0, 250)
x1 = random.randint(0, 250)
y0 = random.randint(0, 130)
y1 = random.randint(0, 130)
pim = reset_test(sock)
res = test_line(pim, sock, [x0, y0, x1, y1, 1])
dut = get_result(sock)
if not compare_img(pim, dut):
vdiff(pim, dut)
return 'x0 = %d y0 = %d x1 = %d y1 = %d i = %d' % (x0, y0, x1, y1, i)
return 'Ok'
def test_fills(sock):
for y0 in range(130):
for y1 in range(130):
x0 = random.randint(0, 250)
x1 = random.randint(0, 250)
pim = reset_test(sock)
res = test_fill(pim, sock, [x0, y0, x1, y1, 1])
dut = get_result(sock)
if not compare_img(pim, dut):
return 'x0 = %d, y0 = %d, x1 = %d, y1 = %d' % (x0, y0, y1, y1)
return 'Ok'
def test_frames(sock):
for y0 in range(130):
for y1 in range(130):
x0 = random.randint(0, 250)
x1 = random.randint(0, 250)
# PIL generates no frame for x0 = x1. We do generate it.
if x0 == x1:
x1 += 1
pim = reset_test(sock)
res = test_frame(pim, sock, [x0, y0, x1, y1, 1])
dut = get_result(sock)
if not compare_img(pim, dut):
vdiff(pim, dut)
return 'x0 = %d, y0 = %d, x1 = %d, y1 = %d' % (x0, y0, x1, y1)
return 'Ok'
def test_textlines(sock):
for x0 in range(250):
for y0 in range(120):
pim = reset_test(sock)
res = test_textline(pim, sock, [x0, y0, 1])
dut = get_result(sock)
if not compare_img(pim, dut):
vdiff(pim, dut)
return 'x0 = %d, y0 = %d' % (x0, y0)
return 'Ok'
ts = [
['hlines', test_hlines],
['vlines', test_vlines],
['lines', test_lines],
['fills', test_fills],
['frames', test_frames],
['textlines', test_textlines],
]
sock = connect()
failed = []
for test in ts:
print 'Testing %s' % test[0]
res = test[1](sock)
if res != 'Ok':
print '\tFailed: %s' % res
failed.append([test[0], res])
if not failed:
print 'All tests passed'
else:
print '\nFailed tests:'
for i in failed:
print '%s (%s)' % (i[0], i[1])
sock.close()
``` |
{
"source": "jkmnt/tiny_eax_mode",
"score": 3
} |
#### File: jkmnt/tiny_eax_mode/cfgs.py
```python
import struct
import xtea
try:
from Crypto.Cipher import AES
class AESCfg:
BLOCKSIZE = 16
BLOCKSIZE_MASK = (1 << 128) - 1
ENDIAN = 'big'
class ECB:
def __init__(self, key):
self.enc = AES.new(key, AES.MODE_ECB)
def run(self, pt):
return self.enc.encrypt(pt)
except:
pass
class XTEACfg:
BLOCKSIZE = 8
BLOCKSIZE_MASK = (1 << 64) - 1
ENDIAN = 'little'
class ECB:
def __init__(self, key):
self.enc = xtea.XTEA(key)
def run(self, pt):
return self.enc.encrypt(pt)
```
#### File: jkmnt/tiny_eax_mode/eax.py
```python
def gf_double(a, blocksize):
if blocksize == 16:
if a >> 127:
a = (a << 1) ^ 0x87 # 0x87 for the 128 bit
else:
a = a << 1
else:
if a >> 63:
a = (a << 1) ^ 0x1B # 0x1B for 64 bit
else:
a = a << 1
return a & ((1 << (blocksize * 8)) - 1)
def xorstrings(b0, b1):
return bytes([a^b for a, b in zip(b0, b1)])
# simple pythonic implementations. the streamlike interface is in eax_stream
def ctr(cfg, key, nonce, data):
enc = cfg.ECB(key)
out = b''
nonce_int = int.from_bytes(nonce, cfg.ENDIAN, signed=False)
cnt = 0
for i in range(0, len(data), cfg.BLOCKSIZE):
block = data[i:i+cfg.BLOCKSIZE]
k = (nonce_int + cnt) & cfg.BLOCKSIZE_MASK
k = k.to_bytes(cfg.BLOCKSIZE, cfg.ENDIAN)
xorbuf = enc.run(k)
out += xorstrings(block, xorbuf)
cnt += 1
return out
def omac(cfg, key, data, tweak):
enc = cfg.ECB(key)
L = enc.run(bytes([0] * cfg.BLOCKSIZE))
L_int = int.from_bytes(L, cfg.ENDIAN, signed=False)
L2_int = gf_double(L_int, cfg.BLOCKSIZE)
L4_int = gf_double(L2_int, cfg.BLOCKSIZE)
L2 = L2_int.to_bytes(cfg.BLOCKSIZE, cfg.ENDIAN)
L4 = L4_int.to_bytes(cfg.BLOCKSIZE, cfg.ENDIAN)
# always big so the tweak is last byte
tweakbytes = int.to_bytes(tweak, cfg.BLOCKSIZE, 'big')
data = tweakbytes + data
data = bytearray(data)
if len(data) % cfg.BLOCKSIZE:
data += bytes([0x80])
data = data.ljust((len(data) + cfg.BLOCKSIZE - 1) & -cfg.BLOCKSIZE, b'\0')
data[-cfg.BLOCKSIZE:] = xorstrings(data[-cfg.BLOCKSIZE:], L4)
else:
data[-cfg.BLOCKSIZE:] = xorstrings(data[-cfg.BLOCKSIZE:], L2)
mac = bytes(cfg.BLOCKSIZE)
for i in range(0, len(data), cfg.BLOCKSIZE):
block = data[i:i+cfg.BLOCKSIZE]
xorred = xorstrings(block, mac)
mac = enc.run(xorred)
return mac
def eax_enc(cfg, key, nonce, header, pt):
N = omac(cfg, key, nonce, 0)
H = omac(cfg, key, header, 1)
ct = ctr(cfg, key, N, pt)
C = omac(cfg, key, ct, 2)
tag = xorstrings(xorstrings(N, C), H)
return (ct, tag)
# authenticate, but do not encrypt plaintext
def eax_just_auth(cfg, key, nonce, header, pt):
N = omac(cfg, key, nonce, 0)
H = omac(cfg, key, header, 1)
C = omac(cfg, key, pt, 2)
tag = xorstrings(xorstrings(N, C), H)
return (pt, tag)
def eax_dec(cfg, key, nonce, header, ct):
N = omac(cfg, key, nonce, 0)
H = omac(cfg, key, header, 1)
C = omac(cfg, key, ct, 2)
tag_local = xorstrings(xorstrings(N, C), H)
pt = ctr(cfg, key, N, ct)
return (pt, tag_local)
``` |
{
"source": "jknagin/Udacity-CarND-P2-AdvancedLaneLines",
"score": 3
} |
#### File: Udacity-CarND-P2-AdvancedLaneLines/lib/calibration.py
```python
import cv2
import numpy as np
from lib import utils
from typing import List
def find_chessboard_corners(img, nx: int, ny: int):
ret, corners = cv2.findChessboardCorners(img, (nx, ny), None) # corners becomes imgpoints in calibrate_camera
return ret, corners
def draw_chessboard_corners(img, nx: int, ny: int, corners, ret: bool):
img = cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
return img
def calibrate_camera(objpoints, imgpoints, gray_shape):
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray_shape[::-1], None, None)
return ret, mtx, dist, rvecs, tvecs
def calibrate_with_chessboard_images(calibration_images: List[str], nx: int, ny: int):
objp = np.zeros((nx * ny, 3), dtype=np.float32)
objp[:, :2] = np.mgrid[:nx, :ny].T.reshape(-1, 2)
objpoints = []
imgpoints = []
gray_shape = None
for calibration_image in calibration_images:
img = utils.read_image(calibration_image)
gray = utils.grayscale(img)
gray_shape = gray.shape
ret, corners = find_chessboard_corners(gray, nx, ny)
# print(calibration_image, ret)
if ret:
objpoints.append(objp)
imgpoints.append(corners)
# img = draw_chessboard_corners(img, nx, ny, corners, ret)
# plt.imshow(img)
# plt.title(calibration_image)
# plt.show()
ret, mtx, dist, _, _ = calibrate_camera(objpoints, imgpoints, gray_shape)
return ret, mtx, dist
def undistort(img, mtx, dist):
undistorted = cv2.undistort(img, mtx, dist, None, mtx)
return undistorted
``` |
{
"source": "jknapka/gearbox",
"score": 3
} |
#### File: gearbox/gearbox/commandmanager.py
```python
import inspect
import logging
import pkg_resources
LOG = logging.getLogger(__name__)
class EntryPointWrapper(object):
"""Wrap up a command class already imported to make it look like a plugin.
"""
def __init__(self, name, command_class):
self.name = name
self.command_class = command_class
def load(self, require=False):
return self.command_class
class CommandManager(object):
"""Discovers commands and handles lookup based on argv data.
:param namespace: String containing the setuptools entrypoint namespace
for the plugins to be loaded. For example,
``'cliff.formatter.list'``.
:param convert_underscores: Whether cliff should convert underscores to
spaces in entry_point commands.
"""
def __init__(self, namespace, convert_underscores=True):
self.commands = {}
self.namespace = namespace
self.convert_underscores = convert_underscores
self._load_commands()
def _load_commands(self):
# NOTE(jamielennox): kept for compatability.
self.load_commands(self.namespace)
def load_commands(self, namespace):
"""Load all the commands from an entrypoint"""
for ep in pkg_resources.iter_entry_points(namespace):
LOG.debug('found command %r', ep.name)
cmd_name = (ep.name.replace('_', ' ')
if self.convert_underscores
else ep.name)
self.commands[cmd_name] = ep
return
def __iter__(self):
return iter(self.commands.items())
def add_command(self, name, command_class):
self.commands[name] = EntryPointWrapper(name, command_class)
def find_command(self, argv):
"""Given an argument list, find a command and
return the processor and any remaining arguments.
"""
search_args = argv[:]
name = ''
while search_args:
if search_args[0].startswith('-'):
name = '%s %s' % (name, search_args[0])
raise ValueError('Invalid command %r' % name)
next_val = search_args.pop(0)
name = '%s %s' % (name, next_val) if name else next_val
if name in self.commands:
cmd_ep = self.commands[name]
if hasattr(cmd_ep, 'resolve'):
cmd_factory = cmd_ep.resolve()
else:
# NOTE(dhellmann): Some fake classes don't take
# require as an argument. Yay?
arg_spec = inspect.getargspec(cmd_ep.load)
if 'require' in arg_spec[0]:
cmd_factory = cmd_ep.load(require=False)
else:
cmd_factory = cmd_ep.load()
return (cmd_factory, name, search_args)
else:
raise ValueError('Unknown command %r' % next(iter(argv), ''))
```
#### File: gearbox/commands/patch.py
```python
from __future__ import print_function
import os
import fnmatch
import re
from argparse import RawDescriptionHelpFormatter
from gearbox.command import Command
class PatchCommand(Command):
def get_description(self):
return r'''Patches files by replacing, appending or deleting text.
This is meant to provide a quick and easy way to replace text and
code in your projects.
Here are a few examples, this will replace all xi:include occurrences
with py:extends in all the template files recursively:
$ gearbox patch -R '*.html' xi:include -r py:extends
It is also possible to rely on regex and python for more complex
replacements, like updating the Copyright year in your documentation:
$ gearbox patch -R '*.rst' -x 'Copyright(\s*)(\d+)' -e -r '"Copyright\\g<1>"+__import__("datetime").datetime.utcnow().strftime("%Y")'
Works on a line by line basis, so it is not possible to match text
across multiple lines.
'''
def get_parser(self, prog_name):
parser = super(PatchCommand, self).get_parser(prog_name)
parser.formatter_class = RawDescriptionHelpFormatter
parser.add_argument('pattern',
help='The glob pattern of files that should be matched')
parser.add_argument('text',
help='text that should be looked up in matched files.')
parser.add_argument('-r', '--replace',
dest='replacement',
help='Replace occurrences of text with REPLACEMENT')
parser.add_argument('-a', '--append',
dest='addition',
help='Append ADDITION after the line with matching text.')
parser.add_argument('-d', '--delete',
action='store_true',
help='Delete lines matching text.')
parser.add_argument('-x', '--regex',
dest='regex',
action="store_true",
help='Parse the text as a regular expression.')
parser.add_argument('-R', '--recursive',
dest='recursive',
action="store_true",
help='Look for files matching pattern in subfolders too.')
parser.add_argument('-e', '--eval',
dest='eval',
action='store_true',
help='Eval the replacement as Python code before applying it.')
return parser
def _walk_recursive(self):
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in filenames:
yield os.path.join(root, filename)
def _walk_flat(self):
root = os.getcwd()
for filename in os.listdir(root):
yield os.path.join(root, filename)
def _replace_regex(self, line, text, replacement):
return re.sub(text, replacement, line)
def _replace_plain(self, line, text, replacement):
return line.replace(text, replacement)
def _match_regex(self, line, text):
return re.search(text, line) is not None
def _match_plain(self, line, text):
return text in line
def take_action(self, opts):
walk = self._walk_flat
if opts.recursive:
walk = self._walk_recursive
match = self._match_plain
if opts.regex:
match = self._match_regex
replace = self._replace_plain
if opts.regex:
replace = self._replace_regex
matches = []
for filepath in walk():
if fnmatch.fnmatch(filepath, opts.pattern):
matches.append(filepath)
print('%s files matching' % len(matches))
for filepath in matches:
replacement = opts.replacement
if opts.eval and replacement:
replacement = str(eval(replacement, globals()))
addition = opts.addition
if opts.eval and addition:
addition = str(eval(addition, globals()))
matches = False
lines = []
with open(filepath) as f:
for line in f:
if not match(line, opts.text):
lines.append(line)
continue
matches = True
empty_line = not line.strip()
if opts.replacement:
line = replace(line, opts.text, replacement)
if empty_line or line.strip() and not opts.delete:
lines.append(line)
if opts.addition:
lines.append(addition+'\n')
print('%s Patching %s' % (matches and '!' or 'x', filepath))
if matches:
with open(filepath, 'w') as f:
f.writelines(lines)
```
#### File: gearbox/commands/setup_app.py
```python
from __future__ import print_function
import os
from gearbox.command import Command
from paste.deploy import appconfig
class SetupAppCommand(Command):
def get_description(self):
return "Setup an application, given a config file"
def get_parser(self, prog_name):
parser = super(SetupAppCommand, self).get_parser(prog_name)
parser.add_argument("-c", "--config",
help='application config file to read (default: development.ini)',
dest='config_file', default="development.ini")
parser.add_argument('--name',
action='store',
dest='section_name',
default=None,
help='The name of the section to set up (default: app:main)')
return parser
def take_action(self, opts):
config_spec = opts.config_file
section = opts.section_name
if section is None:
if '#' in config_spec:
config_spec, section = config_spec.split('#', 1)
else:
section = 'main'
if not ':' in section:
plain_section = section
section = 'app:'+section
else:
plain_section = section.split(':', 1)[0]
if not config_spec.startswith('config:'):
config_spec = 'config:' + config_spec
if plain_section != 'main':
config_spec += '#' + plain_section
config_file = config_spec[len('config:'):].split('#', 1)[0]
config_file = os.path.join(os.getcwd(), config_file)
conf = appconfig(config_spec, relative_to=os.getcwd())
ep_name = conf.context.entry_point_name
ep_group = conf.context.protocol
dist = conf.context.distribution
if dist is None:
raise RuntimeError("The section %r is not the application (probably a filter). You should add #section_name, where section_name is the section that configures your application" % plain_section)
self._setup_config(dist, config_file, section, {}, verbosity=self.app.options.verbose_level)
def _setup_config(self, dist, filename, section, vars, verbosity):
"""
Called to setup an application, given its configuration
file/directory.
The default implementation calls
``package.websetup.setup_config(command, filename, section,
vars)`` or ``package.websetup.setup_app(command, config,
vars)``
With ``setup_app`` the ``config`` object is a dictionary with
the extra attributes ``global_conf``, ``local_conf`` and
``filename``
"""
modules = [line.strip() for line in dist.get_metadata_lines('top_level.txt')
if line.strip() and not line.strip().startswith('#')]
if not modules:
print('No modules are listed in top_level.txt')
print('Try running python setup.py egg_info to regenerate that file')
for mod_name in modules:
mod_name = mod_name + '.websetup'
try:
mod = self._import_module(mod_name)
except ImportError as e:
print(e)
desc = getattr(e, 'args', ['No module named websetup'])[0]
if not desc.startswith('No module named websetup'):
raise
mod = None
if mod is None:
continue
if hasattr(mod, 'setup_app'):
if verbosity:
print('Running setup_app() from %s' % mod_name)
self._call_setup_app(mod.setup_app, filename, section, vars)
elif hasattr(mod, 'setup_config'):
if verbosity:
print('Running setup_config() from %s' % mod_name)
mod.setup_config(None, filename, section, vars)
else:
print('No setup_app() or setup_config() function in %s (%s)' % (mod.__name__, mod.__file__))
def _call_setup_app(self, func, filename, section, vars):
filename = os.path.abspath(filename)
if ':' in section:
section = section.split(':', 1)[1]
conf = 'config:%s#%s' % (filename, section)
conf = appconfig(conf)
conf.filename = filename
func(None, conf, vars)
def _import_module(self, s):
"""
Import a module.
"""
mod = __import__(s)
parts = s.split('.')
for part in parts[1:]:
mod = getattr(mod, part)
return mod
```
#### File: gearbox/gearbox/main.py
```python
from __future__ import print_function
import argparse
import inspect
import sys
import os
import pkg_resources
import logging
import warnings
from .utils.plugins import find_egg_info_dir
from .commands.help import HelpCommand, HelpAction
from .commandmanager import CommandManager
log = logging.getLogger('gearbox')
class GearBox(object):
NAME = os.path.splitext(os.path.basename(sys.argv[0]))[0]
LOG_DATE_FORMAT = '%H:%M:%S'
LOG_GEARBOX_FORMAT = '%(asctime)s,%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s'
DEFAULT_VERBOSE_LEVEL = 1
def __init__(self):
self.command_manager = CommandManager('gearbox.commands')
self.command_manager.add_command('help', HelpCommand)
self.parser = argparse.ArgumentParser(description="TurboGears2 Gearbox toolset",
add_help=False)
parser = self.parser
parser.add_argument(
'--version',
action='version',
version='%(prog)s {0}'.format(
pkg_resources.get_distribution("gearbox").version
),
)
verbose_group = parser.add_mutually_exclusive_group()
verbose_group.add_argument(
'-v', '--verbose',
action='count',
dest='verbose_level',
default=self.DEFAULT_VERBOSE_LEVEL,
help='Increase verbosity of output. Can be repeated.',
)
verbose_group.add_argument(
'-q', '--quiet',
action='store_const',
dest='verbose_level',
const=0,
help='Suppress output except warnings and errors.',
)
parser.add_argument(
'--log-file',
action='store',
default=None,
help='Specify a file to log output. Disabled by default.',
)
parser.add_argument(
'-h', '--help',
action=HelpAction,
nargs=0,
default=self, # tricky
help="Show this help message and exit.",
)
parser.add_argument(
'--debug',
default=False,
action='store_true',
help='Show tracebacks on errors.',
)
parser.add_argument(
'--relative',
default=False,
action='store_true',
dest='relative_plugins',
help='Load plugins and applications also from current path.',
)
def _configure_logging(self):
if self.options.debug:
warnings.simplefilter('default')
try:
logging.captureWarnings(True)
except AttributeError:
pass
root_logger = logging.getLogger('')
root_logger.setLevel(logging.INFO)
# Set up logging to a file
if self.options.log_file:
file_handler = logging.FileHandler(filename=self.options.log_file)
formatter = logging.Formatter(self.LOG_GEARBOX_FORMAT, datefmt=self.LOG_DATE_FORMAT)
file_handler.setFormatter(formatter)
root_logger.addHandler(file_handler)
# Always send higher-level messages to the console via stderr
console = logging.StreamHandler(sys.stderr)
console_level = {0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG,
}.get(self.options.verbose_level, logging.DEBUG)
console.setLevel(console_level)
formatter = logging.Formatter(self.LOG_GEARBOX_FORMAT, datefmt=self.LOG_DATE_FORMAT)
console.setFormatter(formatter)
root_logger.addHandler(console)
def run(self, argv):
"""Application entry point"""
try:
self.options, remainder = self.parser.parse_known_args(argv)
self._configure_logging()
if self.options.relative_plugins:
curdir = os.getcwd()
sys.path.insert(0, curdir)
pkg_resources.working_set.add_entry(curdir)
try:
self._load_commands_for_current_dir()
except pkg_resources.DistributionNotFound as e:
try:
error_msg = repr(e)
except:
error_msg = 'Unknown Error'
log.error('Failed to load project commands with error '
'``%s``, have you installed your project?' % error_msg)
except Exception as err:
if hasattr(self, 'options'):
debug = self.options.debug
else:
debug = True
if debug:
log.exception(err)
else:
log.error(err)
return 1
return self._run_subcommand(remainder)
def _run_subcommand(self, argv):
try:
subcommand = self.command_manager.find_command(argv)
except ValueError as err:
if self.options.debug:
log.exception(err)
else:
log.error(err)
return 2
cmd_factory, cmd_name, sub_argv = subcommand
kwargs = {}
if 'cmd_name' in self._getargspec(cmd_factory)[0]: # Check to see if 'cmd_name' is in cmd_factory's args
kwargs['cmd_name'] = cmd_name
cmd = cmd_factory(self, self.options, **kwargs)
try:
full_name = ' '.join([self.NAME, cmd_name])
cmd_parser = cmd.get_parser(full_name)
parsed_args = cmd_parser.parse_args(sub_argv)
return cmd.run(parsed_args)
except Exception as err:
log.exception(err)
return 4
def _load_commands_for_current_dir(self):
egg_info_dir = find_egg_info_dir(os.getcwd())
if egg_info_dir:
package_name = os.path.splitext(os.path.basename(egg_info_dir))[0]
try:
pkg_resources.require(package_name)
except pkg_resources.DistributionNotFound as e:
msg = '%sNot Found%s: %s (is it an installed Distribution?)'
if str(e) != package_name:
raise pkg_resources.DistributionNotFound(msg % (str(e) + ': ', ' for', package_name))
else:
raise pkg_resources.DistributionNotFound(msg % ('', '', package_name))
dist = pkg_resources.get_distribution(package_name)
for epname, ep in dist.get_entry_map('gearbox.plugins').items():
self.load_commands_for_package(ep.module_name)
def load_commands_for_package(self, package_name):
dist = pkg_resources.get_distribution(package_name)
for epname, ep in dist.get_entry_map('gearbox.project_commands').items():
self.command_manager.commands[epname.replace('_', ' ')] = ep
def _getargspec(self, func):
if not hasattr(inspect, 'signature'):
return inspect.getargspec(func.__init__)
else: # pragma: no cover
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = tuple((
p.default for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty
)) or None
return args, varargs, varkw, defaults
def main():
args = sys.argv[1:]
gearbox = GearBox()
return gearbox.run(args)
``` |
{
"source": "jknaresh/django-graphql-ariadne",
"score": 2
} |
#### File: django_apollo_example/api/queries.py
```python
from api.models import Post
def list_posts_resolver(obj, info):
try:
posts = [post.to_dict() for post in Post.get_queryset().all()]
print(posts)
payload = {"success": True, "posts": posts}
except Exception as error:
payload = {"success": False, "errors": [str(error)]}
return payload
``` |
{
"source": "jkneng/nlp_course",
"score": 3
} |
#### File: nlp_course/week02_classification/seminar_main.py
```python
import pandas as pd
import numpy as np
import nltk
# DATA preview
data = pd.read_csv("./Train_rev1.csv", index_col=None)
print(data.shape)
data['Log1pSalary'] = np.log1p(data['SalaryNormalized']).astype('float32')
text_columns = ["Title", "FullDescription"]
categorical_columns = ["Category", "Company", "LocationNormalized", "ContractType", "ContractTime"]
TARGET_COLUMN = "Log1pSalary"
data[text_columns] = data[text_columns].fillna('NaN')
data[categorical_columns] = data[categorical_columns].fillna('NaN') # cast missing values to string "NaN"
print(data.sample(3))
# DATA preprocessing
tokenizer = nltk.tokenize.WordPunctTokenizer()
# YOUR CODE HERE
for col in text_columns:
data[col] = data[col].map(str.lower).map(tokenizer.tokenize)
# data[text_columns] = data[text_columns].apply(tokenizer.tokenize)
print("Tokenized:")
print(data["FullDescription"][2::100000])
print(data["FullDescription"][2][:50])
# assert data["FullDescription"][2][:50] == 'mathematical modeller / simulation analyst / opera'
# assert data["Title"][54321] == 'international digital account manager ( german )'
print('Tokenizer is right')
from collections import Counter
token_counts = Counter()
# Count how many times does each token occur in both "Title" and "FullDescription" in total
#TODO <YOUR CODE>
for col in text_columns:
for wl in data[col]:
token_counts.update(wl)
print("Total unique tokens :", len(token_counts))
print('\n'.join(map(str, token_counts.most_common(n=5))))
print('...')
print('\n'.join(map(str, token_counts.most_common()[-3:])))
assert token_counts.most_common(1)[0][1] in range(2600000, 2700000)
assert len(token_counts) in range(200000, 210000)
print('Correct!')
min_count = 10
# tokens from token_counts keys that had at least min_count occurrences throughout the dataset
tokens = sorted(t for t, c in token_counts.items() if c >= min_count)
# Add a special tokens for unknown and empty words
UNK, PAD = "UNK", "PAD"
tokens = [UNK, PAD] + tokens
print("Vocabulary size:", len(tokens))
assert type(tokens) == list
assert len(tokens) in range(32000, 35000)
assert 'me' in tokens
assert UNK in tokens
print("Correct!")
#<your code here - dict of token name to its index in tokens>
token_to_id = {tok: i for i, tok in enumerate(tokens)}
# map text lines into neural network-digestible matrices.
UNK_IX, PAD_IX = map(token_to_id.get, [UNK, PAD])
def as_matrix(sequences, max_len=None):
""" Convert a list of tokens into a matrix with padding """
if isinstance(sequences[0], str):
sequences = list(map(str.split, sequences))
max_len = min(max(map(len, sequences)), max_len or float('inf'))
matrix = np.full((len(sequences), max_len), np.int32(PAD_IX))
for i,seq in enumerate(sequences):
row_ix = [token_to_id.get(word, UNK_IX) for word in seq[:max_len]]
matrix[i, :len(row_ix)] = row_ix
return matrix
from sklearn.feature_extraction import DictVectorizer
# we only consider top-1k most frequent companies to minimize memory usage
top_companies, top_counts = zip(*Counter(data['Company']).most_common(1000))
recognized_companies = set(top_companies)
data["Company"] = data["Company"].apply(lambda comp: comp if comp in recognized_companies else "Other")
categorical_vectorizer = DictVectorizer(dtype=np.float32, sparse=False)
categorical_vectorizer.fit(data[categorical_columns].apply(dict, axis=1))
print('apply dict:\n', data[categorical_columns].apply(dict, axis=1))
from sklearn.model_selection import train_test_split
data_train, data_val = train_test_split(data, test_size=0.2, random_state=42)
data_train.index = range(len(data_train))
data_val.index = range(len(data_val))
print("Train size = ", len(data_train))
print("Validation size = ", len(data_val))
# data to tensor
import torch
def make_batch(data, max_len=None, word_dropout=0, device=torch.device('cpu')):
"""
Creates a keras-friendly dict from the batch data.
:param word_dropout: replaces token index with UNK_IX with this probability
:returns: a dict with {'title' : int64[batch, title_max_len]
"""
batch = {}
batch["Title"] = as_matrix(data["Title"].values, max_len)
batch["FullDescription"] = as_matrix(data["FullDescription"].values, max_len)
batch['Categorical'] = categorical_vectorizer.transform(data[categorical_columns].apply(dict, axis=1))
if word_dropout != 0:
batch["FullDescription"] = apply_word_dropout(batch["FullDescription"], 1. - word_dropout)
if TARGET_COLUMN in data.columns:
batch[TARGET_COLUMN] = data[TARGET_COLUMN].values
return to_tensors(batch, device)
def to_tensors(batch, device):
batch_tensors = dict()
for key, arr in batch.items():
if key in ["FullDescription", "Title"]:
batch_tensors[key] = torch.tensor(arr, device=device, dtype=torch.int64)
else:
batch_tensors[key] = torch.tensor(arr, device=device)
return batch_tensors
def apply_word_dropout(matrix, keep_prop, replace_with=UNK_IX, pad_ix=PAD_IX,):
dropout_mask = np.random.choice(2, np.shape(matrix), p=[keep_prop, 1 - keep_prop])
dropout_mask &= matrix != pad_ix
return np.choose(dropout_mask, [matrix, np.full_like(matrix, replace_with)])
# model definition
import torch
import torch.nn as nn
import torch.nn.functional as F
class SalaryPredictor(nn.Module):
def __init__(self, n_tokens=len(tokens), n_cat_features=len(categorical_vectorizer.vocabulary_), hid_size=64):
super().__init__()
# YOUR CODE HERE
embedding_dim = 300
self.emb = nn.Embedding(n_tokens, embedding_dim)
out_channels1, out_channels2 = 32, 32
kernel_size1, kernerl_size2 = 2, 2
self.conv1 = nn.Conv1d(embedding_dim, out_channels1, kernel_size1)
self.conv2 = nn.Conv1d(embedding_dim, out_channels2, kernerl_size2)
self.fc_cat = nn.Linear(n_cat_features, hid_size)
self.fc_out = nn.Linear(out_channels1 + out_channels2 + hid_size, 1)
@staticmethod
def conv_and_pool(x, conv):
x = conv(x)
x = F.relu(x)
x = F.max_pool1d(x, x.size(2))
x = x.squeeze(2)
return x
def forward(self, batch):
# YOUR CODE HERE
x1 = self.emb(batch['Title'])
x1 = x1.permute(0, 2, 1)
x2 = self.emb(batch['FullDescription'])
x2 = x2.permute(0, 2, 1)
x1 = self.conv_and_pool(x1, self.conv1)
x2 = self.conv_and_pool(x2, self.conv2)
x3 = self.fc_cat(batch['Categorical'])
x = torch.cat((x1, x2, x3), 1)
out = self.fc_out(x)
out = out.squeeze(1)
return out
model = SalaryPredictor()
batch = make_batch(data_train[:100])
criterion = nn.MSELoss()
dummy_pred = model(batch)
print(dummy_pred.shape)
dummy_loss = criterion(dummy_pred, batch[TARGET_COLUMN])
assert dummy_pred.shape == torch.Size([100])
assert len(torch.unique(dummy_pred)) > 20, "model returns suspiciously few unique outputs. Check your initialization"
assert dummy_loss.ndim == 0 and 0. <= dummy_loss <= 250., "make sure you minimize MSE"
# train and eval
def iterate_minibatches(data, batch_size=256, shuffle=True, cycle=False, device=torch.device('cpu'), **kwargs):
""" iterates minibatches of data in random order """
while True:
indices = np.arange(len(data))
if shuffle:
indices = np.random.permutation(indices)
for start in range(0, len(indices), batch_size):
batch = make_batch(data.iloc[indices[start : start + batch_size]], **kwargs)
yield batch
if not cycle: break
# training
import tqdm
BATCH_SIZE = 16
EPOCHS = 5
DEVICE = torch.device('cpu') # TODO
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # TODO
# print metrics
def print_metrics(model, data, batch_size=BATCH_SIZE, name="", **kw):
squared_error = abs_error = num_samples = 0.0
model.eval()
with torch.no_grad():
for batch in iterate_minibatches(data, batch_size=batch_size, shuffle=False, **kw):
batch_pred = model(batch)
squared_error += torch.sum(torch.square(batch_pred - batch[TARGET_COLUMN]))
abs_error += torch.sum(torch.abs(batch_pred - batch[TARGET_COLUMN]))
num_samples += len(batch_pred)
mse = squared_error.detach().cpu().numpy() / num_samples
mae = abs_error.detach().cpu().numpy() / num_samples
print("%s results:" % (name or ""))
print("Mean square error: %.5f" % mse)
print("Mean absolute error: %.5f" % mae)
return mse, mae
model = SalaryPredictor().to(DEVICE)
criterion = nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
for epoch in range(EPOCHS):
print(f"epoch: {epoch}")
model.train()
for i, batch in tqdm.tqdm(enumerate(
iterate_minibatches(data_train, batch_size=BATCH_SIZE, device=DEVICE)),
total=len(data_train) // BATCH_SIZE
):
pred = model(batch)
loss = criterion(pred, batch[TARGET_COLUMN])
optimizer.zero_grad()
loss.backward()
optimizer.step()
print_metrics(model, data_val)
# Bonus part: explaining model predictions
def explain(model, sample, col_name='Title'):
""" Computes the effect each word had on model predictions """
sample = dict(sample)
sample_col_tokens = [tokens[token_to_id.get(tok, 0)] for tok in sample[col_name].split()]
data_drop_one_token = pd.DataFrame([sample] * (len(sample_col_tokens) + 1))
for drop_i in range(len(sample_col_tokens)):
data_drop_one_token.loc[drop_i, col_name] = ' '.join(UNK if i == drop_i else tok
for i, tok in enumerate(sample_col_tokens))
*predictions_drop_one_token, baseline_pred = model.predict(make_batch(data_drop_one_token))[:, 0]
diffs = baseline_pred - predictions_drop_one_token
return list(zip(sample_col_tokens, diffs))
from IPython.display import HTML, display_html
def draw_html(tokens_and_weights, cmap=plt.get_cmap("bwr"), display=True,
token_template="""<span style="background-color: {color_hex}">{token}</span>""",
font_style="font-size:14px;"
):
def get_color_hex(weight):
rgba = cmap(1. / (1 + np.exp(weight)), bytes=True)
return '#%02X%02X%02X' % rgba[:3]
tokens_html = [
token_template.format(token=token, color_hex=get_color_hex(weight))
for token, weight in tokens_and_weights
]
raw_html = """<p style="{}">{}</p>""".format(font_style, ' '.join(tokens_html))
if display:
display_html(HTML(raw_html))
return raw_html
i = 36605
tokens_and_weights = explain(model, data.loc[i], "Title")
draw_html([(tok, weight * 5) for tok, weight in tokens_and_weights], font_style='font-size:20px;');
tokens_and_weights = explain(model, data.loc[i], "FullDescription")
draw_html([(tok, weight * 10) for tok, weight in tokens_and_weights]);
i = 12077
tokens_and_weights = explain(model, data.loc[i], "Title")
draw_html([(tok, weight * 5) for tok, weight in tokens_and_weights], font_style='font-size:20px;');
tokens_and_weights = explain(model, data.loc[i], "FullDescription")
draw_html([(tok, weight * 10) for tok, weight in tokens_and_weights]);
i = np.random.randint(len(data))
print("Index:", i)
print("Salary (gbp):", np.expm1(model.predict(make_batch(data.iloc[i: i+1]))[0, 0]))
tokens_and_weights = explain(model, data.loc[i], "Title")
draw_html([(tok, weight * 5) for tok, weight in tokens_and_weights], font_style='font-size:20px;');
tokens_and_weights = explain(model, data.loc[i], "FullDescription")
draw_html([(tok, weight * 10) for tok, weight in tokens_and_weights]);
``` |
{
"source": "jknewson/WiMLib",
"score": 2
} |
#### File: WIMLib/Resources/Result.py
```python
import json
#endregion
class Result(object):
def __init__(self,identifier,descr = ""):
self.ID = identifier
self.Description = descr
self.Values = {}
```
#### File: WiMLib/WIMLib/SpatialOps.py
```python
#------------------------------------------------------------------------------
#----- SpatialOps.py ------------------------------------------------------------
#------------------------------------------------------------------------------
#
# copyright: 2016 WiM - USGS
#
# authors: <NAME> - Ph.D. Student NC State University
# <NAME> - USGS Web Informatics and Mapping (WiM)
#
# purpose: Contains reusable global spatial methods
#
# usage: THIS SECTION NEEDS TO BE UPDATED
#
# discussion: An explaination of different overlay tools is provided by the
# link below.
#
# See:
# https://blogs.esri.com/esri/arcgis/2012/10/12/comparingoverlaytools/
#
# dates: 05 NOV 2016 jkn - Created / Date notation edited by jw
# 03 APR 2017 jw - Modified
#
#------------------------------------------------------------------------------
#region "Imports"
import shutil
import sys
import os
from os.path import split
import tempfile
import arcpy
from arcpy.sa import *
from arcpy import env
import traceback
import json
from WIMLib import WiMLogging
from contextlib import contextmanager
import PrismOps
import numpy as np
#endregion
##-------1---------2---------3---------4---------5---------6---------7---------8
## SpatialOps
##-------+---------+---------+---------+---------+---------+---------+---------+
class SpatialOps(object):
#region Constructor
def __init__(self, workspacePath):
self._WorkspaceDirectory = workspacePath
tempdir = os.path.join(self._WorkspaceDirectory,"Temp")
#make sure tempdir exists
if not os.path.exists(tempdir):
os.makedirs(tempdir)
self._TempLocation = tempfile.mkdtemp(dir=tempdir)
arcpy.env.workspace = self._TempLocation
arcpy.env.overwriteOutput = True
self._sm("initialized spatialOps")
def __exit__(self, exc_type, exc_value, traceback):
try:
shutil.rmtree(self._TempLocation, True)
arcpy.ResetEnvironments()
arcpy.ClearEnvironment("workspace")
except:
self._sm("Failed to remove temp space on close","ERROR",50)
#endregion
#region Feature methods
def Select(self, inFeature, intersectfeature, fields):
arcpy.Intersect_analysis([inFeature,intersectfeature], "intersectOutput")
def ProjectFeature(self, inFeature, sr):
#http://joshwerts.com/blog/2015/09/10/arcpy-dot-project-in-memory-featureclass/
inSR = None
out_projected_fc = None
path =""
name =""
source_curs = None
ins_curs = None
row = None
try:
inSR = arcpy.Describe(inFeature).spatialReference
if (inSR.name == sr.name): return inFeature
name = arcpy.Describe(inFeature).name +"_proj"
out_projected_fc = arcpy.management.CreateFeatureclass(self._TempLocation, name,
arcpy.Describe(inFeature).shapeType,
template=inFeature,
spatial_reference=sr)
# specify copy of all fields from source to destination
fields = ["Shape@"] + [f.name for f in arcpy.ListFields(inFeature) if not f.required]
# project source geometries on the fly while inserting to destination featureclass
with arcpy.da.SearchCursor(inFeature, fields, spatial_reference=sr) as source_curs,\
arcpy.da.InsertCursor(out_projected_fc, fields) as ins_curs:
for row in source_curs:
ins_curs.insertRow(row)
#next
#end with
return out_projected_fc
except:
tb = traceback.format_exc()
raise Exception("Failed to project feature " +tb)
finally:
inSR = None
out_projected_fc= None
path =""
name =""
if source_curs is not None: del source_curs
if ins_curs is not None: del ins_curs
if row is not None: del row
def PersistFeature(self,inFeature, path, name):
arcpy.FeatureClassToFeatureClass_conversion(inFeature,path,name)
def getAreaSqMeter(self, inFeature):
AreaValue = 0
try:
sr = arcpy.Describe(inFeature).spatialReference
if(sr.type == "Geographic"):
#USA_Contiguous_Albers_Equal_Area_Conic_USGS_version:
inFeature = self.ProjectFeature(inFeature,arcpy.SpatialReference(102039))[0]
sr = arcpy.Describe(inFeature).spatialReference
cursor = arcpy.da.SearchCursor(inFeature, "SHAPE@")
for row in cursor:
AreaValue += row[0].area * sr.metersPerUnit * sr.metersPerUnit
return AreaValue if (AreaValue > 0) else None
except:
tb = traceback.format_exc()
self._sm("Error computing area "+tb,"ERROR")
return None
def getAreaSqKilometer(self, inFeature):
AreaValue = 0
try:
sr = arcpy.Describe(inFeature).spatialReference
if(sr.type == "Geographic"):
#USA_Contiguous_Albers_Equal_Area_Conic_USGS_version:
inFeature = self.ProjectFeature(inFeature,arcpy.SpatialReference(102039))[0]
sr = arcpy.Describe(inFeature).spatialReference
cursor = arcpy.da.SearchCursor(inFeature, "SHAPE@")
for row in cursor:
AreaValue += row[0].getArea(units='SQUAREKILOMETERS') * sr.metersPerUnit * sr.metersPerUnit
return AreaValue if (AreaValue > 0) else None
except:
tb = traceback.format_exc()
self._sm("Error computing area "+tb,"ERROR")
return None
def spatialJoin(self, inFeature, maskfeature, fieldStr='',methodStr ='' ):
mask = None
fieldmappings = None
try:
sr = arcpy.Describe(inFeature).spatialReference
mask = self.ProjectFeature(maskfeature,sr)
out_projected_fc = os.path.join(self._TempLocation, "ovrlytmpsj")
if(fieldStr != '' and methodStr != ''):
# Create a new fieldmappings and add the two input feature classes.
fieldmappings = arcpy.FieldMappings()
fieldmappings.addTable(mask)
fieldmappings.addTable(inFeature)
#for each field + method
methods = [x.strip() for x in methodStr.split(';')]
Fields = [x.strip() for x in fieldStr.split(';')]
#sm(Fields.count + " Fields & " + methods.count + " Methods")
for field in Fields:
fieldIndex = fieldmappings.findFieldMapIndex(field)
for method in methods:
map = self.__getFieldMap(fieldmappings,fieldIndex,method+field,method)
if map is not None:
fieldmappings.addFieldMap(map)
#next method
#next Field
self._sm("performing spatial join ...")
return arcpy.SpatialJoin_analysis(maskfeature, inFeature, out_projected_fc,'', '', fieldmappings,"COMPLETELY_CONTAINS")
except:
tb = traceback.format_exc()
self._sm(tb,"Error",152)
finally:
mask = None
#do not release
out_projected_fc = None
def spatialOverlay(self, inFeature, maskfeature, matchOption = "COMPLETELY_CONTAINS"):
mask = None
try:
sr = arcpy.Describe(inFeature).spatialReference
mask = self.ProjectFeature(maskfeature,sr)
out_projected_fc = os.path.join(self._TempLocation, "ovrlytmpso")
self._sm("performing spatial join ...")
return arcpy.SpatialJoin_analysis(maskfeature, inFeature, out_projected_fc,'JOIN_ONE_TO_MANY', 'KEEP_COMMON', None, matchOption)
except:
tb = traceback.format_exc()
self._sm(tb,"Error",152)
finally:
mask = None
#do not release
out_projected_fc = None
def getFeatureStatistic(self, inFeature, maskFeature, statisticRules, fieldStr, WhereClause = "", matchOption = "COMPLETELY_CONTAINS"):
'''
computes the statistic
Statistic rules, semicolon separated
SUM—Adds the total value for the specified field.
MEAN—Calculates the average for the specified field.
MIN—Finds the smallest value for all records of the specified field.
MAX—Finds the largest value for all records of the specified field.
RANGE—Finds the range of values (MAX minus MIN) for the specified field.
STD—Finds the standard deviation on values in the specified field.
COUNT—Finds the number of values included in statistical calculations.
This counts each value except null values. To determine the number
of null values in a field, use the COUNT statistic on the field in
question, and a COUNT statistic on a different field which does not
contain nulls (for example, the OID if present), then subtract the two values.
FIRST—Finds the first record in the Input Table and uses its specified field value.
LAST—Finds the last record in the Input Table and uses its specified field value.
'''
map = []
values = {}
cursor = None
tblevalue=None
spOverlay = None
try:
methods = [x.strip() for x in statisticRules.split(';')]
Fields = [x.strip() for x in fieldStr.split(';')]
#sm(Fields.count + " Fields & " + methods.count + " Methods")
for field in Fields:
for method in methods:
map.append([field,method])
#next method
#next Field
spOverlay = self.spatialOverlay(inFeature,maskFeature,matchOption)
#Validate that we have values within the polygon/basin
# If we do not, set all values equal to zero
if(int(arcpy.GetCount_management(spOverlay).getOutput(0)) < 1):
self._sm("Basin contains no features", "WARNING")
for m in map: values[m[0]]={m[1]: float(0)}
return values
#endif
#If we do have values, then carry out spatial statistics
tblevalue = arcpy.Statistics_analysis(spOverlay,os.path.join(self._TempLocation, "ftmp"),map)
mappedFeilds = [x[1]+"_"+x[0] for x in map]
whereClause = WhereClause
self._sm("The WhereClause is: " + whereClause)
cursor = arcpy.da.SearchCursor(tblevalue, mappedFeilds, whereClause)
try:
for row in cursor:
i=0
for m in map:
values[m[0]]={m[1]: float(row[i])}
i+=1
return values
#Is an except catch for when row contains nothing
except:
self._sm("Now rows were found in cursor. Setting values to zero.", "WARNING")
for m in map: values[m[0]]={m[1]: float(0)}
return values
except:
tb = traceback.format_exc()
self._sm("Failed to get raster statistic " +tb,"ERROR",229)
finally:
#local cleanup
if cursor is not None: del cursor; cursor = None
if tblevalue is not None: del tblevalue; tblevalue = None
if spOverlay is not None: del spOverlay; spOverlay = None
def getFeatureCount(self,inFeature, maskFeature):
'''
Finds the number of features
'''
try:
spOverlay = self.spatialOverlay(inFeature,maskFeature)
val = int(arcpy.GetCount_management(spOverlay).getOutput(0))
return val
except:
tb = traceback.format_exc()
self._sm("Failed to get raster statistic " +tb,"ERROR",229)
finally:
#local cleanup
if spOverlay is not None: del spOverlay; spOverlay = None
#endregion
#region Raster methods
def setpRAE(self,snapgds, directory,extentgds = None, maskgds = None):
"""Set Raster Analysis Environment.
snapgds: snap IGeodataset
directory: workspace and scratch workspace directory
extentgds: extent IGeodataset
maskgds: mask IGeodataset
"""
try:
raise Exception("This is a work in progress. Its not quite right yet")
#https://pro.arcgis.com/en/pro-app/arcpy/classes/env.htm
arcpy.ResetEnvironments()
pExists = os.path.exists(directory)
#set spatial reference
if maskgds is not None:
#arcpy.env.outputCoordinateSystem = arcpy.Describe(maskgds).spatialReference
arcpy.env.extent = arcpy.Describe(maskgds).extent
#arcpy.env.mask = maskgds
else:
arcpy.env.outputCoordinateSystem = arcpy.Describe(snapgds).spatialReference
#endif
#set ouput workspace - check exists first and make
if not pExists:
#create one
os.makedirs(directory)
#endif
arcpy.env.workspace = directory
arcpy.env.scratchWorkspace = directory
#Cell Size
desc = arcpy.Describe(snapgds)
arcpy.env.cellSize = snapgds
#extent
#if extentgds is not None:
#arcpy.env.extent = extentgds
arcpy.env.snapRaster = snapgds
except:
arcpy.ResetEnvironments()
tb = traceback.format_exc()
return
def getValueAtCentroid(self, inFeature, inRaster):
try:
sr = arcpy.Describe(inRaster).spatialReference
i=0
totvalue = 0
shapeName = arcpy.Describe(inFeature).shapeFieldName
with arcpy.da.SearchCursor(inFeature, "SHAPE@", spatial_reference=sr) as source_curs:
for row in source_curs:
i = i + 1
feature = row[0]
value = arcpy.GetCellValue_management(inRaster,str(feature.centroid.X) + ' ' + str(feature.centroid.Y)).getOutput(0)
totvalue = float(value)/i
return totvalue
except:
tb = traceback.format_exc()
self._sm("Failed to get raster at point " +tb,"ERROR",220)
raise Exception("Failed to get raster at point " +tb)
def getRasterStatistic(self,inRaster, maskFeature, statisticRule):
'''
computes the statistic
Statistic rules: MINIMUM —Smallest value of all cells in the input raster.
MAXIMUM —Largest value of all cells in the input raster.
MEAN —Average of all cells in the input raster.
STD —Standard deviation of all cells in the input raster.
UNIQUEVALUECOUNT —Number of unique values in the input raster.
TOP —Top or YMax value of the extent.
LEFT —Left or XMin value of the extent.
RIGHT —Right or XMax value of the extent.
BOTTOM —Bottom or YMin value of the extent.
CELLSIZEX —Cell size in the x-direction.
CELLSIZEY —Cell size in the y-direction.
VALUETYPE —Type of the cell value in the input raster:
0 = 1-bit
1 = 2-bit
2 = 4-bit
3 = 8-bit unsigned integer
4 = 8-bit signed integer
5 = 16-bit unsigned integer
6 = 16-bit signed integer
7 = 32-bit unsigned integer
8 = 32-bit signed integer
9 = 32-bit floating point
10 = 64-bit double precision
11 = 8-bit complex
12 = 16-bit complex
13 = 32-bit complex
14 = 64-bit complex
COLUMNCOUNT —Number of columns in the input raster.
ROWCOUNT —Number of rows in the input raster.
BANDCOUNT —Number of bands in the input raster.
ANYNODATA —Returns whether there is NoData in the raster.
ALLNODATA —Returns whether all the pixels are NoData. This is the same as ISNULL.
SENSORNAME —Name of the sensor.
PRODUCTNAME —Product name related to the sensor.
ACQUISITIONDATE —Date that the data was captured.
SOURCETYPE —Source type.
CLOUDCOVER —Amount of cloud cover as a percentage.
SUNAZIMUTH —Sun azimuth, in degrees.
SUNELEVATION —Sun elevation, in degrees.
SENSORAZIMUTH —Sensor azimuth, in degrees.
SENSORELEVATION —Sensor elevation, in degrees.
OFFNADIR —Off-nadir angle, in degrees.
WAVELENGTH —Wavelength range of the band, in nanometers.
'''
outExtractByMask = None
try:
arcpy.env.cellSize = "MINOF"
sr = arcpy.Describe(inRaster).spatialReference
mask = self.ProjectFeature(maskFeature,sr)
self._LicenseManager("Spatial")
outExtractByMask = arcpy.sa.ExtractByMask(inRaster, mask)
value = arcpy.GetRasterProperties_management(outExtractByMask, statisticRule)
cellsize = float(arcpy.GetRasterProperties_management(inRaster, 'CELLSIZEX').getOutput(0))**2 # -- Added by JWX from below.
return float(value.getOutput(0))
except:
tb = traceback.format_exc()
self._sm("WARNING: Failed to get raster statistic computing centroid value.","WARNING",229)
cellsize = float(arcpy.GetRasterProperties_management(inRaster, 'CELLSIZEX').getOutput(0))**2
self._sm("Raster cell size: " + str(cellsize) , "WARNING")
maskArea = self.getAreaSqMeter(self.mask)
maskArea = maskArea*0.000001
centValue = self.getValueAtCentroid(maskFeature,inRaster) # try getting centroid
if centValue in ['NaN', 'none', 0]:
self._sm("WARNING: Raster statistic AND get value at centroid failed. Results likely erroneous.","WARNING")
return ((maskArea/cellsize)*centValue)*cellsize #Added cellsize multiplier -- JWX
finally:
outExtractByMask = None
mask = None
if sr is not None: del sr; sr = None
self._LicenseManager("Spatial",False)
def getPrismStatistic(self,inRaster, maskFeature, statisticRules, timeRange, timeMethod, dataPath):
'''
computes the statistic
Statistic rules: MINIMUM —Smallest value of all cells in the input raster.
MAXIMUM —Largest value of all cells in the input raster.
MEAN —Average of all cells in the input raster.
STD —Standard deviation of all cells in the input raster.
UNIQUEVALUECOUNT —Number of unique values in the input raster.
TOP —Top or YMax value of the extent.
LEFT —Left or XMin value of the extent.
RIGHT —Right or XMax value of the extent.
BOTTOM —Bottom or YMin value of the extent.
CELLSIZEX —Cell size in the x-direction.
CELLSIZEY —Cell size in the y-direction.
VALUETYPE —Type of the cell value in the input raster:
0 = 1-bit
1 = 2-bit
2 = 4-bit
3 = 8-bit unsigned integer
4 = 8-bit signed integer
5 = 16-bit unsigned integer
6 = 16-bit signed integer
7 = 32-bit unsigned integer
8 = 32-bit signed integer
9 = 32-bit floating point
10 = 64-bit double precision
11 = 8-bit complex
12 = 16-bit complex
13 = 32-bit complex
14 = 64-bit complex
COLUMNCOUNT —Number of columns in the input raster.
ROWCOUNT —Number of rows in the input raster.
BANDCOUNT —Number of bands in the input raster.
ANYNODATA —Returns whether there is NoData in the raster.
ALLNODATA —Returns whether all the pixels are NoData. This is the same as ISNULL.
Perhaps not relevant in this case
# SENSORNAME —Name of the sensor.
# PRODUCTNAME —Product name related to the sensor.
# ACQUISITIONDATE —Date that the data was captured.
# SOURCETYPE —Source type.
# CLOUDCOVER —Amount of cloud cover as a percentage.
# SUNAZIMUTH —Sun azimuth, in degrees.
# SUNELEVATION —Sun elevation, in degrees.
# SENSORAZIMUTH —Sensor azimuth, in degrees.
# SENSORELEVATION —Sensor elevation, in degrees.
# OFFNADIR —Off-nadir angle, in degrees.
# WAVELENGTH —Wavelength range of the band, in nanometers.
'''
try:
arcpy.env.cellSize = "MINOF"
sr = arcpy.Describe(inRaster).spatialReference
mask = self.ProjectFeature(maskFeature,sr)
self._LicenseManager("Spatial")
outExtractByMask = arcpy.sa.ExtractByMask(inRaster, mask)
rules = ['MINIMUM', 'MAXIMUM','MEAN','STD','UNIQUEVALUECOUNT', 'SUM']
statisticRules = [x.upper() for x in statisticRules.split(';')]
computation = np.all([x in rules for x in statisticRules])
if computation == True:
rasterCellIdx = arcpy.RasterToNumPyArray(outExtractByMask, nodata_to_value = -9999.00)
return float(PrismOps.get_statistic(rasterCellIdx, dataPath, statisticRules,
timeRange, timeMethod))
else:
value = arcpy.GetRasterProperties_management(outExtractByMask, statisticRules[0])
return float(value.getOutput(0))
except:
tb = traceback.format_exc()
self._sm("Failed to get raster statistic " +tb,"ERROR",229)
cellsize = float(arcpy.GetRasterProperties_management(inRaster, 'CELLSIZEX').getOutput(0))**2
self._sm("Raster cell size: " + str(cellsize) , "ERROR")
# try getting centroid
return self.getValueAtCentroid(maskFeature,inRaster)
finally:
outExtractByMask = None
mask = None
if sr is not None: del sr; sr = None
self._LicenseManager("Spatial",False)
def getRasterPercentAreas(self,inRaster, maskFeature, uniqueRasterIDfield='VALUE',rasterValueField='COUNT'):
'''
computes the statistic
'''
results ={}
try:
arcpy.env.cellSize = "MINOF"
arcpy.env.overwriteOutput = True
#define land use key value dictionary with all possible values
for row in arcpy.da.SearchCursor(inRaster, uniqueRasterIDfield):
results[str(row[0])] = 0
#next row
#make maskRaster
outExtractByMask = arcpy.sa.ExtractByMask(inRaster, maskFeature)
#arcpy.BuildRasterAttributeTable_management(outExtractByMask, "Overwrite")
rows = arcpy.SearchCursor(outExtractByMask, "", "", "VALUE; COUNT")
for row in rows:
v = row.getValue("VALUE")
c = row.getValue("COUNT")
print v,c
##get total cell count for percent area computation
#field = arcpy.da.TableToNumPyArray('in_memory/mask.img', rasterValueField, skip_nulls=True)
#sum = field[rasterValueField].sum()
#loop over masked raster rows
for row in arcpy.da.SearchCursor('in_memory/mask123.img', [uniqueRasterIDfield, rasterValueField] ):
#get values
value, count = row
percentArea = float(count)
results[str(row[0])] = percentArea
#next row
except:
tb = traceback.format_exc()
self._sm("Error computing Raster Percent Area " +tb,"ERROR",289)
return results
def getRasterPercent(self,inRaster, maskFeature, ClassificationCodes=None, uniqueRasterIDfield='VALUE',rasterValueField='COUNT'):
'''
computes the raster % statistic
classificationCodes = comma separated classification ID's
rCode is the classification requested code[s] as comma separated string
'''
attField = None
attExtract = None
constfield = None
const1 = None
mask = None
sr = None
try:
sr = arcpy.Describe(inRaster).spatialReference
mask = self.ProjectFeature(maskFeature,sr)
arcpy.env.cellSize = inRaster
arcpy.env.snapRaster = inRaster
try:
arcpy.env.mask = mask
except:
self._LicenseManager("Spatial")
outExtractByMask = ExtractByMask(inRaster, mask)
arcpy.env.mask = outExtractByMask
self._LicenseManager("Spatial", False)
arcpy.env.extent = arcpy.Describe(mask).extent
arcpy.env.outputCoordinateSystem = sr
# Check out the ArcGIS Spatial Analyst extension license
self._LicenseManager("Spatial")
#creates a constant of the enviroment
const1 = arcpy.sa.CreateConstantRaster(1)
const1.save(os.path.join(self._TempLocation,"const1.img"))
constfield = arcpy.da.TableToNumPyArray(os.path.join(self._TempLocation,"const1.img"), rasterValueField, skip_nulls=True)
totalCount = float(constfield[rasterValueField].sum())
if ClassificationCodes is not None:
SQLClause = " OR ".join(map(lambda s: uniqueRasterIDfield +"=" + s,ClassificationCodes.strip().split(",")))
else:
SQLClause = "VALUE > 0"
# Execute ExtractByAttributes
#ensure spatial analyst is checked out
attExtract = arcpy.sa.ExtractByAttributes(inRaster, SQLClause)
#must save raster
unique_name_img = arcpy.CreateUniqueName(os.path.join(self._TempLocation, "xxx.img"))
attExtract.save(unique_name_img)
if self.isRasterALLNoData(attExtract): return float(0)
#Does not respect the workspace dir, so need to set it explicitly
attField = arcpy.da.TableToNumPyArray(unique_name_img, rasterValueField, skip_nulls=True) #I assume I should use the same variable over, but it's unclear to me -- JWX
results = (float(attField[rasterValueField].sum())/totalCount)*100 #For a percentage the result should be multiplied by 100 -- JWX
except:
tb = traceback.format_exc()
self._sm("Error computing Raster Percent Area " +tb,"ERROR",289)
return self.getValueAtCentroid(mask, inRaster)
finally:
#local clean up
if attField is not None: del attField; attField = None
attExtract = None
if constfield is not None: del constfield; constfield = None
const1 = None
mask = None
if sr is not None: del sr; sr = None
self._LicenseManager("Spatial",False)
return results
def isRasterALLNoData(self,inRaster):
try:
#isNull method returns 1 if the input value is NoData, and 0 if not
if inRaster.maximum is None and inRaster.minimum is None: return True
else: return False;
except:
tb = traceback.format_exc()
self._sm("Error computing Raster Percent Area " +tb,"ERROR",289)
return True
#endregion
#region helper methods
def _LicenseManager(self, extension, checkout=True):
v = None
licAvailability = arcpy.CheckExtension(extension)
if(licAvailability == "Available"):
if(checkout):v = arcpy.CheckOutExtension(extension)
else: v= arcpy.CheckInExtension(extension)
else:raise Exception("Lisense "+ extension +" "+ licAvailability)
print v
def __getFieldMap(self, mappedFields,FieldIndex, newName, mergeRule):
'''
Maps the field
Merge rules:
First The first source value.
Last The last source value.
Join A concatenation of source values. You can use a delimiter to separate multiple input values.
Sum The sum total of all source values.
Mean The mean (average) of all source values.
Median The median (middle) of all source values.
Mode The source value that is the most common or has the highest frequency.
Min The minimum (lowest) source value.
Max The maximum (highest) source value.
Standard deviation The standard deviation of all source values.
Count The number of source values, excluding null values.
Range The absolute difference between the minimum and maximum source values.
'''
try:
fieldmap = mappedFields.getFieldMap(FieldIndex)
# Get the output field's properties as a field object
field = fieldmap.outputField
# Rename the field and pass the updated field object back into the field map
field.name = newName
field.aliasName = newName
fieldmap.outputField = field
fieldmap.mergeRule = mergeRule
return fieldmap
except:
tb = traceback.format_exc()
self._sm(tb+ " Failed to map "+ newName)
return None
def _sm(self,msg,type="INFO", errorID=0):
WiMLogging.sm(msg,type="INFO", errorID=0)
#endregion
```
#### File: WiMLib/WIMLib/UnitConverter.py
```python
linear = {
'KMtoM': 1000, 'KMtoCM': 100000, 'KMtoMM': 1000000, 'KMtoMI': 0.621371, 'KMtoFT': 3280.84, 'KMtoIN': 39370.1,
'MtoKM': 0.001, 'MtoCM': 100, 'MtoMM': 1000, 'MtoMI': 0.0006213712, 'MtoFT': 0.3048, 'MtoIN': 39.37007874,
'CMtoKM': 0.00001, 'CMtoM': 0.1, 'CMtoMM': 10, 'CMtoMI': 0.0000621371, 'CMtoFT': 0.0328084, 'CMtoIN': 0.393701,
'MMtoKM': 0.000006, 'MMtoM': 0.001, 'MMtoCM': 0.1, 'MMtoMI': 0.00000062137, 'MMtoFT': 0.00328084, 'MMtoIN': 0.0393701,
'MItoKM': 1.60934, 'MItoM': 1609.34, 'MItoCM': 160934, 'MItoMM': 1609000, 'MItoFT': 5280, 'MItoIN': 63360,
'FTtoKM': 0.0003048, 'FTtoM': 3.28083, 'FTtoCM': 30.48, 'FTtoMM': 304.8, 'FTtoMI': 0.000189394, 'FTtoIN': 12,
'INtoKM': 0.0000245, 'INtoM': 0.0254, 'INtoCM': 2.54, 'INtoMM': 25.4, 'INtoMI': 0.0000157828, 'INtoFT': 0.0833333,
}
#Rudimentary module
def Convert(value, inUnits, outUnits, dimentionality):
if inUnits == outUnits:
print "No Conversion Needed"
else:
print "In units do not match output units. A conversion will be made."
#Create a string for the conversion look up rule
conversionRule = '%s to %s' % (inUnits, outUnits) #Build conversion rule
conversionRule.replace(" ", "") #Convert to look-up key
#Choose dictionary based on being a line, area, or volume
if dimentionality == 1:
outValue = value * linear[conversionRule]
return outValue
elif dimentionality == 2:
outValue = value * area[conversionRule] #Needs to be added later
return outValue
elif dimentionality == 3:
outValue = value * volume[conversionRule] #Needs to be added later
return outValue
else:
print "Dimentionality was undefined."
``` |
{
"source": "jknielse/termtable",
"score": 2
} |
#### File: termtable/termtable/colour_helpers.py
```python
import itertools
from termtable.utils import RamCacheWrapper
_COLOUR_LISTS = {
'basic': [
('00', '000000'),
('01', '800000'),
('02', '008000'),
('03', '808000'),
('04', '000080'),
('05', '800080'),
('06', '008080'),
('07', 'c0c0c0'),
('08', '808080'),
('09', 'ff0000'),
('10', '00ff00'),
('11', 'ffff00'),
('12', '0000ff'),
('13', 'ff00ff'),
('14', '00ffff'),
('15', 'ffffff'),
],
'extended': [
('16', '000000'),
('17', '00005f'),
('18', '000087'),
('19', '0000af'),
('20', '0000d7'),
('21', '0000ff'),
('22', '005f00'),
('23', '005f5f'),
('24', '005f87'),
('25', '005faf'),
('26', '005fd7'),
('27', '005fff'),
('28', '008700'),
('29', '00875f'),
('30', '008787'),
('31', '0087af'),
('32', '0087d7'),
('33', '0087ff'),
('34', '00af00'),
('35', '00af5f'),
('36', '00af87'),
('37', '00afaf'),
('38', '00afd7'),
('39', '00afff'),
('40', '00d700'),
('41', '00d75f'),
('42', '00d787'),
('43', '00d7af'),
('44', '00d7d7'),
('45', '00d7ff'),
('46', '00ff00'),
('47', '00ff5f'),
('48', '00ff87'),
('49', '00ffaf'),
('50', '00ffd7'),
('51', '00ffff'),
('52', '5f0000'),
('53', '5f005f'),
('54', '5f0087'),
('55', '5f00af'),
('56', '5f00d7'),
('57', '5f00ff'),
('58', '5f5f00'),
('59', '5f5f5f'),
('60', '5f5f87'),
('61', '5f5faf'),
('62', '5f5fd7'),
('63', '5f5fff'),
('64', '5f8700'),
('65', '5f875f'),
('66', '5f8787'),
('67', '5f87af'),
('68', '5f87d7'),
('69', '5f87ff'),
('70', '5faf00'),
('71', '5faf5f'),
('72', '5faf87'),
('73', '5fafaf'),
('74', '5fafd7'),
('75', '5fafff'),
('76', '5fd700'),
('77', '5fd75f'),
('78', '5fd787'),
('79', '5fd7af'),
('80', '5fd7d7'),
('81', '5fd7ff'),
('82', '5fff00'),
('83', '5fff5f'),
('84', '5fff87'),
('85', '5fffaf'),
('86', '5fffd7'),
('87', '5fffff'),
('88', '870000'),
('89', '87005f'),
('90', '870087'),
('91', '8700af'),
('92', '8700d7'),
('93', '8700ff'),
('94', '875f00'),
('95', '875f5f'),
('96', '875f87'),
('97', '875faf'),
('98', '875fd7'),
('99', '875fff'),
('100', '878700'),
('101', '87875f'),
('102', '878787'),
('103', '8787af'),
('104', '8787d7'),
('105', '8787ff'),
('106', '87af00'),
('107', '87af5f'),
('108', '87af87'),
('109', '87afaf'),
('110', '87afd7'),
('111', '87afff'),
('112', '87d700'),
('113', '87d75f'),
('114', '87d787'),
('115', '87d7af'),
('116', '87d7d7'),
('117', '87d7ff'),
('118', '87ff00'),
('119', '87ff5f'),
('120', '87ff87'),
('121', '87ffaf'),
('122', '87ffd7'),
('123', '87ffff'),
('124', 'af0000'),
('125', 'af005f'),
('126', 'af0087'),
('127', 'af00af'),
('128', 'af00d7'),
('129', 'af00ff'),
('130', 'af5f00'),
('131', 'af5f5f'),
('132', 'af5f87'),
('133', 'af5faf'),
('134', 'af5fd7'),
('135', 'af5fff'),
('136', 'af8700'),
('137', 'af875f'),
('138', 'af8787'),
('139', 'af87af'),
('140', 'af87d7'),
('141', 'af87ff'),
('142', 'afaf00'),
('143', 'afaf5f'),
('144', 'afaf87'),
('145', 'afafaf'),
('146', 'afafd7'),
('147', 'afafff'),
('148', 'afd700'),
('149', 'afd75f'),
('150', 'afd787'),
('151', 'afd7af'),
('152', 'afd7d7'),
('153', 'afd7ff'),
('154', 'afff00'),
('155', 'afff5f'),
('156', 'afff87'),
('157', 'afffaf'),
('158', 'afffd7'),
('159', 'afffff'),
('160', 'd70000'),
('161', 'd7005f'),
('162', 'd70087'),
('163', 'd700af'),
('164', 'd700d7'),
('165', 'd700ff'),
('166', 'd75f00'),
('167', 'd75f5f'),
('168', 'd75f87'),
('169', 'd75faf'),
('170', 'd75fd7'),
('171', 'd75fff'),
('172', 'd78700'),
('173', 'd7875f'),
('174', 'd78787'),
('175', 'd787af'),
('176', 'd787d7'),
('177', 'd787ff'),
('178', 'd7af00'),
('179', 'd7af5f'),
('180', 'd7af87'),
('181', 'd7afaf'),
('182', 'd7afd7'),
('183', 'd7afff'),
('184', 'd7d700'),
('185', 'd7d75f'),
('186', 'd7d787'),
('187', 'd7d7af'),
('188', 'd7d7d7'),
('189', 'd7d7ff'),
('190', 'd7ff00'),
('191', 'd7ff5f'),
('192', 'd7ff87'),
('193', 'd7ffaf'),
('194', 'd7ffd7'),
('195', 'd7ffff'),
('196', 'ff0000'),
('197', 'ff005f'),
('198', 'ff0087'),
('199', 'ff00af'),
('200', 'ff00d7'),
('201', 'ff00ff'),
('202', 'ff5f00'),
('203', 'ff5f5f'),
('204', 'ff5f87'),
('205', 'ff5faf'),
('206', 'ff5fd7'),
('207', 'ff5fff'),
('208', 'ff8700'),
('209', 'ff875f'),
('210', 'ff8787'),
('211', 'ff87af'),
('212', 'ff87d7'),
('213', 'ff87ff'),
('214', 'ffaf00'),
('215', 'ffaf5f'),
('216', 'ffaf87'),
('217', 'ffafaf'),
('218', 'ffafd7'),
('219', 'ffafff'),
('220', 'ffd700'),
('221', 'ffd75f'),
('222', 'ffd787'),
('223', 'ffd7af'),
('224', 'ffd7d7'),
('225', 'ffd7ff'),
('226', 'ffff00'),
('227', 'ffff5f'),
('228', 'ffff87'),
('229', 'ffffaf'),
('230', 'ffffd7'),
('231', 'ffffff'),
],
'greyscale': [
('232', '080808'),
('233', '121212'),
('234', '1c1c1c'),
('235', '262626'),
('236', '303030'),
('237', '3a3a3a'),
('238', '444444'),
('239', '4e4e4e'),
('240', '585858'),
('241', '626262'),
('242', '6c6c6c'),
('243', '767676'),
('244', '808080'),
('245', '8a8a8a'),
('246', '949494'),
('247', '9e9e9e'),
('248', 'a8a8a8'),
('249', 'b2b2b2'),
('250', 'bcbcbc'),
('251', 'c6c6c6'),
('252', 'd0d0d0'),
('253', 'dadada'),
('254', 'e4e4e4'),
('255', 'eeeeee'),
]
}
def _build_map(colour_tuples):
bins = {}
l = None
for tup in colour_tuples:
if l is None:
l = len(tup[1])
component = tup[1][:2]
if component not in bins:
bins[component] = []
bins[component].append((tup[0], tup[1][2:]))
if l == 2:
for c in [key for key in bins]:
bins[c] = int(bins[c][0][0])
else:
for c in [key for key in bins]:
bins[c] = _build_map(bins[c])
return bins
def mult_colspace_to_hex(value):
return str(hex(int(value * 255)))[2:].rjust(2, '0')
def hex_colspace_to_mult(hex_str):
value = int(hex_str, 16)
return float(value)/255.0
def hex_col_to_mult(hex_col, default_alpha=1):
hex_components = [hex_col[1:3], hex_col[3:5], hex_col[5:7]]
if len(hex_col) == 9:
hex_components.append(hex_col[7:9])
else:
hex_components.append(mult_colspace_to_hex(default_alpha))
return [hex_colspace_to_mult(s) for s in hex_components]
def mult_col_to_hex(mult_col):
return '#{}'.format(''.join(mult_colspace_to_hex(comp) for comp in mult_col))
@RamCacheWrapper
def blend_colours(fg_hex_col, bg_hex_col, default_fg_col='#ffffff', default_bg_col='#000000', default_fg_alpha=0.9, default_bg_alpha=1.0):
if not fg_hex_col or fg_hex_col < 0:
fg_hex_col = default_fg_col
if not bg_hex_col or bg_hex_col < 0:
bg_hex_col = default_bg_col
fg_col = hex_col_to_mult(fg_hex_col, default_fg_alpha)
bg_col = hex_col_to_mult(bg_hex_col, default_bg_alpha)
res_alpha = 1.0 - (1.0 - fg_col[3]) * (1.0 - bg_col[3])
res_col = [(fg_col[i] * fg_col[3] + bg_col[i] * bg_col[3] * (1 - fg_col[3])) / res_alpha for i in xrange(3)]
res_col.append(res_alpha)
return mult_col_to_hex(res_col)
def colour_distance(hex_col_1, hex_col_2):
m1 = hex_col_to_mult(hex_col_1)[:3]
m2 = hex_col_to_mult(hex_col_2)[:3]
return sum((comp1 - comp2) ** 2 for comp1, comp2 in zip(m1, m2))
_BASIC_MAP = _build_map(_COLOUR_LISTS['basic'])
_EXTENDED_MAP = _build_map(_COLOUR_LISTS['extended'])
_GREYSCALE_MAP = _build_map(_COLOUR_LISTS['greyscale'])
_TERM_COL_MAP = dict(itertools.chain(*[_COLOUR_LISTS[key] for key in _COLOUR_LISTS]))
@RamCacheWrapper
def get_closest_term_colour(hex_col, cmap=None, include_details=False):
if hex_col.startswith('#'):
hex_col = hex_col[1:]
if len(hex_col) > 6:
hex_col = hex_col[:6]
if cmap is None:
if hex_col[0:2] == hex_col[2:4] and hex_col[2:4] == hex_col[4:6]:
return get_closest_term_colour(hex_col, cmap=_GREYSCALE_MAP, include_details=include_details)
return get_closest_term_colour(hex_col, cmap=_EXTENDED_MAP, include_details=include_details)
component = hex_col[:2]
c_val = int(component, 16)
closest = min((max(c_val - int(c, 16), int(c, 16) - c_val), c) for c in cmap)[1]
if isinstance(cmap[closest], int):
if include_details:
return cmap[closest], closest
return cmap[closest]
if include_details:
term_col, hex_part = get_closest_term_colour(hex_col[2:], cmap[closest], include_details=True)
if len(hex_col) == 6:
return term_col, '#' + closest + hex_part
return term_col, closest + hex_part
return get_closest_term_colour(hex_col[2:], cmap[closest])
def term_col_to_hex(term_col):
return '#' + _TERM_COL_MAP[str(term_col)]
``` |
{
"source": "jknisley/aiocoap",
"score": 2
} |
#### File: contrib/oscore-plugtest/plugtest_common.py
```python
import shutil
from pathlib import Path
# When Python 3.5 support is dropped (and PyPy has evolved beyond that
# point), .as_posix() can be dropped
import cbor
from aiocoap import oscore
contextdir = Path(__file__).parent / 'common-context'
def get_security_context(contextname, role, contextcopy: Path):
"""Copy the base context (disambiguated by contextname in "ab", "cd") onto
the path in contextcopy if it does not already exist, and load the
resulting context with the given role. The context will be monkey-patched
for debugging purposes."""
if not contextcopy.exists():
contextcopy.parent.mkdir(parents=True, exist_ok=True)
shutil.copytree((contextdir / contextname).as_posix(), contextcopy.as_posix())
print("Context %s copied to %s" % (contextname, contextcopy))
secctx = oscore.FilesystemSecurityContext(contextcopy.as_posix(), role=role)
original_extract_external_aad = secctx._extract_external_aad
def _extract_extenal_aad(message, i_am_sender, request_partiv=None):
result = original_extract_external_aad(message, i_am_sender, request_partiv)
print("Verify: External AAD: bytes.fromhex(%r), %r"%(result.hex(), cbor.loads(result)))
return result
secctx._extract_external_aad = _extract_extenal_aad
return secctx
def additional_verify(description, lhs, rhs):
if lhs == rhs:
print("Additional verify passed: %s"%description)
else:
print("Additional verify failed (%s != %s): %s"%(lhs, rhs, description))
``` |
{
"source": "jknofe/pi_monitor",
"score": 3
} |
#### File: pi_monitor/RPLCD/contextmanagers.py
```python
from __future__ import print_function, division, absolute_import, unicode_literals
from contextlib import contextmanager
@contextmanager
def cursor(lcd, row, col):
"""Context manager to control cursor position.
Args:
lcd:
The CharLCD instance.
row:
The target row (0 index based).
col:
The target column (0 index based).
Example:
>>> with cursor(lcd, 2, 0):
lcd.write_string('This is the hird row')
"""
lcd.cursor_pos = (row, col)
yield
@contextmanager
def cleared(lcd):
"""Context manager to clear display before writing.
Example:
>>> with cleared(lcd):
lcd.write_string('Clear display, wooo!')
"""
lcd.clear()
yield
```
#### File: pi_monitor/RPLCD/lcd.py
```python
from __future__ import print_function, division, absolute_import, unicode_literals
import time
from collections import namedtuple
import RPi.GPIO as GPIO
from . import enum
# # # PYTHON 3 COMPAT # # #
try:
range = xrange
except NameError:
pass
# # # BIT PATTERNS # # #
# Commands
LCD_CLEARDISPLAY = 0x01
LCD_RETURNHOME = 0x02
LCD_ENTRYMODESET = 0x04
LCD_DISPLAYCONTROL = 0x08
LCD_CURSORSHIFT = 0x10
LCD_FUNCTIONSET = 0x20
LCD_SETCGRAMADDR = 0x40
LCD_SETDDRAMADDR = 0x80
# Flags for display entry mode
LCD_ENTRYRIGHT = 0x00
LCD_ENTRYLEFT = 0x02
LCD_ENTRYSHIFTINCREMENT = 0x01
LCD_ENTRYSHIFTDECREMENT = 0x00
# Flags for display on/off control
LCD_DISPLAYON = 0x04
LCD_DISPLAYOFF = 0x00
LCD_CURSORON = 0x02
LCD_CURSOROFF = 0x00
LCD_BLINKON = 0x01
LCD_BLINKOFF = 0x00
# Flags for display/cursor shift
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
# Flags for display/cursor shift
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
LCD_MOVERIGHT = 0x04
LCD_MOVELEFT = 0x00
# Flags for function set
LCD_8BITMODE = 0x10
LCD_4BITMODE = 0x00
LCD_2LINE = 0x08
LCD_1LINE = 0x00
LCD_5x10DOTS = 0x04
LCD_5x8DOTS = 0x00
# Flags for RS pin modes
RS_INSTRUCTION = 0x00
RS_DATA = 0x01
# # # NAMEDTUPLES # # #
PinConfig = namedtuple('PinConfig', 'rs rw e d0 d1 d2 d3 d4 d5 d6 d7 backlight mode')
LCDConfig = namedtuple('LCDConfig', 'rows cols dotsize')
# # # ENUMS # # #
class Alignment(enum.Enum):
left = LCD_ENTRYLEFT
right = LCD_ENTRYRIGHT
class ShiftMode(enum.Enum):
cursor = LCD_ENTRYSHIFTDECREMENT
display = LCD_ENTRYSHIFTINCREMENT
class CursorMode(enum.Enum):
hide = LCD_CURSOROFF | LCD_BLINKOFF
line = LCD_CURSORON | LCD_BLINKOFF
blink = LCD_CURSOROFF | LCD_BLINKON
class BacklightMode(enum.Enum):
active_high = 1
active_low = 2
# # # HELPER FUNCTIONS # # #
def msleep(milliseconds):
"""Sleep the specified amount of milliseconds."""
time.sleep(milliseconds / 1000.0)
def usleep(microseconds):
"""Sleep the specified amount of microseconds."""
time.sleep(microseconds / 1000000.0)
# # # MAIN # # #
class CharLCD(object):
# Init, setup, teardown
def __init__(self, pin_rs=15, pin_rw=18, pin_e=16, pins_data=[21, 22, 23, 24],
pin_backlight=None, backlight_mode=BacklightMode.active_low,
backlight_enabled=True,
numbering_mode=GPIO.BOARD,
cols=20, rows=4, dotsize=8,
auto_linebreaks=True):
"""
Character LCD controller.
The default pin numbers are based on the BOARD numbering scheme (1-26).
You can save 1 pin by not using RW. Set ``pin_rw`` to ``None`` if you
want this.
Args:
pin_rs:
Pin for register select (RS). Default: 15.
pin_rw:
Pin for selecting read or write mode (R/W). Set this to
``None`` for read only mode. Default: 18.
pin_e:
Pin to start data read or write (E). Default: 16.
pins_data:
List of data bus pins in 8 bit mode (DB0-DB7) or in 4 bit mode
(DB4-DB7) in ascending order. Default: [21, 22, 23, 24].
pin_backlight:
Pin for controlling backlight on/off. Set this to ``None`` for
no backlight control. Default: None.
backlight_mode:
Set this to one of the BacklightMode enum values to configure the
operating control for the backlight. Has no effect if pin_backlight is ``None``
backlight_enabled:
Set this to True to turn on the backlight or False to turn it off.
Has no effect if pin_backlight is ``None``
numbering_mode:
Which scheme to use for numbering of the GPIO pins, either
``GPIO.BOARD`` or ``GPIO.BCM``. Default: ``GPIO.BOARD`` (1-26).
rows:
Number of display rows (usually 1, 2 or 4). Default: 4.
cols:
Number of columns per row (usually 16 or 20). Default 20.
dotsize:
Some 1 line displays allow a font height of 10px.
Allowed: 8 or 10. Default: 8.
auto_linebreaks:
Whether or not to automatically insert line breaks.
Default: True.
Returns:
A :class:`CharLCD` instance.
"""
assert dotsize in [8, 10], 'The ``dotsize`` argument should be either 8 or 10.'
# Set attributes
self.numbering_mode = numbering_mode
if len(pins_data) == 4: # 4 bit mode
self.data_bus_mode = LCD_4BITMODE
block1 = [None] * 4
elif len(pins_data) == 8: # 8 bit mode
self.data_bus_mode = LCD_8BITMODE
block1 = pins_data[:4]
else:
raise ValueError('There should be exactly 4 or 8 data pins.')
block2 = pins_data[-4:]
self.pins = PinConfig(rs=pin_rs, rw=pin_rw, e=pin_e,
d0=block1[0], d1=block1[1], d2=block1[2], d3=block1[3],
d4=block2[0], d5=block2[1], d6=block2[2], d7=block2[3],
backlight=pin_backlight,
mode=numbering_mode)
self.backlight_mode = backlight_mode
self.lcd = LCDConfig(rows=rows, cols=cols, dotsize=dotsize)
# Setup GPIO
GPIO.setmode(self.numbering_mode)
for pin in list(filter(None, self.pins))[:-1]:
GPIO.setup(pin, GPIO.OUT)
if pin_backlight is not None:
GPIO.setup(pin_backlight, GPIO.OUT)
# must enable the backlight AFTER setting up GPIO
self.backlight_enabled = backlight_enabled
# Setup initial display configuration
displayfunction = self.data_bus_mode | LCD_5x8DOTS
if rows == 1:
displayfunction |= LCD_1LINE
elif rows in [2, 4]:
# LCD only uses two lines on 4 row displays
displayfunction |= LCD_2LINE
if dotsize == 10:
# For some 1 line displays you can select a 10px font.
displayfunction |= LCD_5x10DOTS
# Create content cache
self._content = [[0x20] * cols for _ in range(rows)]
# Set up auto linebreaks
self.auto_linebreaks = auto_linebreaks
self.recent_auto_linebreak = False
# Initialization
msleep(50)
GPIO.output(self.pins.rs, 0)
GPIO.output(self.pins.e, 0)
if self.pins.rw is not None:
GPIO.output(self.pins.rw, 0)
# Choose 4 or 8 bit mode
if self.data_bus_mode == LCD_4BITMODE:
# Hitachi manual page 46
self._write4bits(0x03)
msleep(4.5)
self._write4bits(0x03)
msleep(4.5)
self._write4bits(0x03)
usleep(100)
self._write4bits(0x02)
elif self.data_bus_mode == LCD_8BITMODE:
# Hitachi manual page 45
self._write8bits(0x30)
msleep(4.5)
self._write8bits(0x30)
usleep(100)
self._write8bits(0x30)
else:
raise ValueError('Invalid data bus mode: {}'.format(self.data_bus_mode))
# Write configuration to display
self.command(LCD_FUNCTIONSET | displayfunction)
usleep(50)
# Configure display mode
self._display_mode = LCD_DISPLAYON
self._cursor_mode = int(CursorMode.hide)
self.command(LCD_DISPLAYCONTROL | self._display_mode | self._cursor_mode)
usleep(50)
# Clear display
self.clear()
# Configure entry mode
self._text_align_mode = int(Alignment.left)
self._display_shift_mode = int(ShiftMode.cursor)
self._cursor_pos = (0, 0)
self.command(LCD_ENTRYMODESET | self._text_align_mode | self._display_shift_mode)
usleep(50)
def close(self, clear=False):
if clear:
self.clear()
GPIO.cleanup()
# Properties
def _get_cursor_pos(self):
return self._cursor_pos
def _set_cursor_pos(self, value):
if not hasattr(value, '__getitem__') or len(value) != 2:
raise ValueError('Cursor position should be determined by a 2-tuple.')
if value[0] not in range(self.lcd.rows) or value[1] not in range(self.lcd.cols):
msg = 'Cursor position {pos!r} invalid on a {lcd.rows}x{lcd.cols} LCD.'
raise ValueError(msg.format(pos=value, lcd=self.lcd))
row_offsets = [0x00, 0x40, self.lcd.cols, 0x40 + self.lcd.cols]
self._cursor_pos = value
self.command(LCD_SETDDRAMADDR | row_offsets[value[0]] + value[1])
usleep(50)
cursor_pos = property(_get_cursor_pos, _set_cursor_pos,
doc='The cursor position as a 2-tuple (row, col).')
def _get_text_align_mode(self):
try:
return Alignment[self._text_align_mode]
except ValueError:
raise ValueError('Internal _text_align_mode has invalid value.')
def _set_text_align_mode(self, value):
if value not in Alignment:
raise ValueError('Cursor move mode must be of ``Alignment`` type.')
self._text_align_mode = int(value)
self.command(LCD_ENTRYMODESET | self._text_align_mode | self._display_shift_mode)
usleep(50)
text_align_mode = property(_get_text_align_mode, _set_text_align_mode,
doc='The text alignment (``Alignment.left`` or ``Alignment.right``).')
def _get_write_shift_mode(self):
try:
return ShiftMode[self._display_shift_mode]
except ValueError:
raise ValueError('Internal _display_shift_mode has invalid value.')
def _set_write_shift_mode(self, value):
if value not in ShiftMode:
raise ValueError('Write shift mode must be of ``ShiftMode`` type.')
self._display_shift_mode = int(value)
self.command(LCD_ENTRYMODESET | self._text_align_mode | self._display_shift_mode)
usleep(50)
write_shift_mode = property(_get_write_shift_mode, _set_write_shift_mode,
doc='The shift mode when writing (``ShiftMode.cursor`` or ``ShiftMode.display``).')
def _get_display_enabled(self):
return self._display_mode == LCD_DISPLAYON
def _set_display_enabled(self, value):
self._display_mode = LCD_DISPLAYON if value else LCD_DISPLAYOFF
self.command(LCD_DISPLAYCONTROL | self._display_mode | self._cursor_mode)
usleep(50)
display_enabled = property(_get_display_enabled, _set_display_enabled,
doc='Whether or not to display any characters.')
def _get_cursor_mode(self):
try:
return CursorMode[self._cursor_mode]
except ValueError:
raise ValueError('Internal _cursor_mode has invalid value.')
def _set_cursor_mode(self, value):
if value not in CursorMode:
raise ValueError('Cursor mode must be of ``CursorMode`` type.')
self._cursor_mode = int(value)
self.command(LCD_DISPLAYCONTROL | self._display_mode | self._cursor_mode)
usleep(50)
cursor_mode = property(_get_cursor_mode, _set_cursor_mode,
doc='How the cursor should behave (``CursorMode.hide``, ' +
'``CursorMode.line`` or ``CursorMode.blink``).')
def _get_backlight_enabled(self):
# We could probably read the current GPIO output state via sysfs, but
# for now let's just store the state in the class
if self.pins.backlight is None:
raise ValueError('You did not configure a GPIO pin for backlight control!')
return bool(self._backlight_enabled)
def _set_backlight_enabled(self, value):
if self.pins.backlight is None:
raise ValueError('You did not configure a GPIO pin for backlight control!')
if not isinstance(value, bool):
raise ValueError('backlight_enabled must be set to ``True`` or ``False``.')
self._backlight_enabled = value
GPIO.output(self.pins.backlight, value ^ (self.backlight_mode is BacklightMode.active_low))
backlight_enabled = property(_get_backlight_enabled, _set_backlight_enabled,
doc='Whether or not to turn on the backlight.')
# High level commands
def write_string(self, value):
"""Write the specified unicode string to the display.
To control multiline behavior, use newline (\n) and carriage return
(\r) characters.
Lines that are too long automatically continue on next line, as long as
``auto_linebreaks`` has not been disabled.
Make sure that you're only passing unicode objects to this function. If
you're dealing with bytestrings (the default string type in Python 2),
convert it to a unicode object using the ``.decode(encoding)`` method
and the appropriate encoding. Example for UTF-8 encoded strings:
.. code::
>>> bstring = 'Temperature: 30°C'
>>> bstring
'Temperature: 30\xc2\xb0C'
>>> bstring.decode('utf-8')
u'Temperature: 30\xb0C'
Only characters with an ``ord()`` value between 0 and 255 are currently
supported.
"""
ignored = None # Used for ignoring manual linebreaks after auto linebreaks
for char in value:
# Write regular chars
if char not in '\n\r':
self.write(ord(char))
ignored = None
continue
# If an auto linebreak happened recently, ignore this write.
if self.recent_auto_linebreak is True:
# No newline chars have been ignored yet. Do it this time.
if ignored is None:
ignored = char
continue
# A newline character has been ignored recently. If the current
# character is different, ignore it again. Otherwise, reset the
# ignored character tracking.
if ignored != char: # A carriage return and a newline
ignored = None # Reset ignore list
continue
# Handle newlines and carriage returns
row, col = self.cursor_pos
if char == '\n':
if row < self.lcd.rows - 1:
self.cursor_pos = (row + 1, col)
else:
self.cursor_pos = (0, col)
elif char == '\r':
if self.text_align_mode is Alignment.left:
self.cursor_pos = (row, 0)
else:
self.cursor_pos = (row, self.lcd.cols - 1)
def clear(self):
"""Overwrite display with blank characters and reset cursor position."""
self.command(LCD_CLEARDISPLAY)
self._cursor_pos = (0, 0)
self._content = [[0x20] * self.lcd.cols for _ in range(self.lcd.rows)]
msleep(2)
def home(self):
"""Set cursor to initial position and reset any shifting."""
self.command(LCD_RETURNHOME)
self._cursor_pos = (0, 0)
msleep(2)
def shift_display(self, amount):
"""Shift the display. Use negative amounts to shift left and positive
amounts to shift right."""
if amount == 0:
return
direction = LCD_MOVERIGHT if amount > 0 else LCD_MOVELEFT
for i in range(abs(amount)):
self.command(LCD_CURSORSHIFT | LCD_DISPLAYMOVE | direction)
usleep(50)
def create_char(self, location, bitmap):
"""Create a new character.
The HD44780 supports up to 8 custom characters (location 0-7).
Args:
location:
The place in memory where the character is stored. Values need
to be integers between 0 and 7.
bitmap:
The bitmap containing the character. This should be a tuple of
8 numbers, each representing a 5 pixel row.
Raises:
AssertionError:
Raised when an invalid location is passed in or when bitmap
has an incorrect size.
Example::
>>> smiley = (
... 0b00000,
... 0b01010,
... 0b01010,
... 0b00000,
... 0b10001,
... 0b10001,
... 0b01110,
... 0b00000,
... )
>>> lcd.create_char(0, smiley)
"""
assert 0 <= location <= 7, 'Only locations 0-7 are valid.'
assert len(bitmap) == 8, 'Bitmap should have exactly 8 rows.'
# Store previous position
pos = self.cursor_pos
# Write character to CGRAM
self.command(LCD_SETCGRAMADDR | location << 3)
for row in bitmap:
self._send(row, RS_DATA)
# Restore cursor pos
self.cursor_pos = pos
# Mid level commands
def command(self, value):
"""Send a raw command to the LCD."""
self._send(value, RS_INSTRUCTION)
def write(self, value):
"""Write a raw byte to the LCD."""
# Get current position
row, col = self._cursor_pos
# Write byte if changed
if self._content[row][col] != value:
self._send(value, RS_DATA)
self._content[row][col] = value # Update content cache
unchanged = False
else:
unchanged = True
# Update cursor position.
if self.text_align_mode is Alignment.left:
if self.auto_linebreaks is False or col < self.lcd.cols - 1:
# No newline, update internal pointer
newpos = (row, col + 1)
if unchanged:
self.cursor_pos = newpos
else:
self._cursor_pos = newpos
self.recent_auto_linebreak = False
else:
# Newline, reset pointer
if row < self.lcd.rows - 1:
self.cursor_pos = (row + 1, 0)
else:
self.cursor_pos = (0, 0)
self.recent_auto_linebreak = True
else:
if self.auto_linebreaks is False or col > 0:
# No newline, update internal pointer
newpos = (row, col - 1)
if unchanged:
self.cursor_pos = newpos
else:
self._cursor_pos = newpos
self.recent_auto_linebreak = False
else:
# Newline, reset pointer
if row < self.lcd.rows - 1:
self.cursor_pos = (row + 1, self.lcd.cols - 1)
else:
self.cursor_pos = (0, self.lcd.cols - 1)
self.recent_auto_linebreak = True
# Low level commands
def _send(self, value, mode):
"""Send the specified value to the display with automatic 4bit / 8bit
selection. The rs_mode is either ``RS_DATA`` or ``RS_INSTRUCTION``."""
# Choose instruction or data mode
GPIO.output(self.pins.rs, mode)
# If the RW pin is used, set it to low in order to write.
if self.pins.rw is not None:
GPIO.output(self.pins.rw, 0)
# Write data out in chunks of 4 or 8 bit
if self.data_bus_mode == LCD_8BITMODE:
self._write8bits(value)
else:
self._write4bits(value >> 4)
self._write4bits(value)
def _write4bits(self, value):
"""Write 4 bits of data into the data bus."""
for i in range(4):
bit = (value >> i) & 0x01
GPIO.output(self.pins[i + 7], bit)
self._pulse_enable()
def _write8bits(self, value):
"""Write 8 bits of data into the data bus."""
for i in range(8):
bit = (value >> i) & 0x01
GPIO.output(self.pins[i + 3], bit)
self._pulse_enable()
def _pulse_enable(self):
"""Pulse the `enable` flag to process data."""
GPIO.output(self.pins.e, 0)
usleep(1)
GPIO.output(self.pins.e, 1)
usleep(1)
GPIO.output(self.pins.e, 0)
usleep(100) # commands need > 37us to settle
``` |
{
"source": "jknostman3/tile-generator",
"score": 2
} |
#### File: ci/deployment-tests/app1_deploymenttest.py
```python
import unittest
import json
import sys
import os
import requests
from tile_generator import opsmgr
class VerifyApp1(unittest.TestCase):
def setUp(self):
self.cfinfo = opsmgr.get_cfinfo()
self.hostname = 'tg-test-app1.' + self.cfinfo['apps_domain']
self.url = 'http://' + self.hostname
def test_responds_to_hello(self):
headers = { 'Accept': 'application/json' }
response = requests.get(self.url + '/hello', headers=headers)
response.raise_for_status()
def test_receives_custom_properties(self):
headers = { 'Accept': 'application/json' }
response = requests.get(self.url + '/env', headers=headers)
response.raise_for_status()
env = response.json()
self.assertEqual(env.get('AUTHOR'), 'Tile Ninja')
self.assertEqual(env.get('CUSTOMER_NAME'), "<NAME>")
self.assertEqual(env.get('STREET_ADDRESS'), 'Cartaway Alley')
self.assertEqual(env.get('CITY'), 'New Jersey')
self.assertEqual(env.get('ZIP_CODE'), '90310')
self.assertEqual(env.get('COUNTRY'), 'country_us')
def test_receives_expected_services(self):
headers = { 'Accept': 'application/json' }
response = requests.get(self.url + '/env', headers=headers)
response.raise_for_status()
env = response.json()
vcap_services = json.loads(env.get('VCAP_SERVICES'))
def test_receives_expected_collection(self):
headers = { 'Accept': 'application/json' }
response = requests.get(self.url + '/env', headers=headers)
response.raise_for_status()
env = response.json()
example_collection = json.loads(env.get('EXAMPLE_COLLECTION'))
self.assertTrue(isinstance(example_collection, list))
self.assertEquals(len(example_collection), 2)
def test_receives_expected_selector(self):
headers = { 'Accept': 'application/json' }
response = requests.get(self.url + '/env', headers=headers)
response.raise_for_status()
env = response.json()
example_selector = json.loads(env.get('EXAMPLE_SELECTOR'))
self.assertTrue(isinstance(example_selector, dict))
self.assertEquals(example_selector['value'], 'Filet Mignon')
# self.assertEquals(example_selector['selected_option']['rarity_dropdown'], 'medium')
def test_has_versioned_name(self):
headers = { 'Accept': 'application/json' }
response = requests.get(self.url + '/env', headers=headers)
response.raise_for_status()
env = response.json()
vcap_application = json.loads(env.get('VCAP_APPLICATION'))
name = vcap_application.get('application_name')
self.assertTrue(name.startswith('tg_test_app1-'))
def test_is_in_correct_space(self):
headers = { 'Accept': 'application/json' }
response = requests.get(self.url + '/env', headers=headers)
response.raise_for_status()
env = response.json()
vcap_application = json.loads(env.get('VCAP_APPLICATION'))
space= vcap_application.get('space_name')
self.assertEquals(space, 'test-tile-space')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jknox13/iterative_hierarchical_clustering",
"score": 2
} |
#### File: iterative_hierarchical_clustering/flithic/clustering.py
```python
import numpy as np
from collections import namedtuple, deque
from scipy.cluster.hierarchy import ward, fcluster
from scipy.spatial.distance import pdist
from scipy.stats import skew, zscore
from sklearn.svm import SVC
from sklearn.base import BaseEstimator
from sklearn.utils import check_array
from sklearn.model_selection import cross_val_score, ShuffleSplit, StratifiedShuffleSplit
from sklearn.utils.validation import check_is_fitted
from tree import Node, iter_bft, iter_tree
def _parameter_skew_check(X, alpha=1e-8):
"""Returns X or Log(X) based on skew.
Parameters
----------
X
alpha
Returns
-------
"""
C = X.copy()
for j, col in enumerate(X.T):
# each parameter, check skew
log_col = np.log10(col+alpha)
if skew(col) > skew(log_col):
# use log_transformed parameter
C[:,j] = log_col
return C
def _ward_cluster(X):
"""Clusters 1-corr using Ward distance
Parameters
----------
X
Returns
-------
"""
# pairwise (1-corr) of zscores
D = pdist( X, metric="correlation" )
# return top branch split using ward linkage
return fcluster( ward(D), 2, criterion="maxclust" )
def _test_clusters(X, clusters, stratified=False):
"""Clusters 1-corr using Ward distance
Parameters
----------
X
Returns
-------
"""
# C-SVM with radial kernel (sklearn.svm.SVC defaults)
clf = SVC(C=1.0, kernel="rbf", gamma="auto")
# 100 random 50/50 splits
if stratified:
splitter = StratifiedShuffleSplit
else:
splitter = ShuffleSplit
cv = splitter(n_splits=100, train_size=0.5, test_size=0.5)
# return score
return cross_val_score(clf, X, clusters, scoring="accuracy", cv=cv)
class GLIFClustering(BaseEstimator):
"""Clustering described in ...
Recursively splits the data into clusters that satisfy ...
NOTE : Docs taken heavily from:
sklearn.cluster.hierarchical.AgglomerativeClustering
Parameters
----------
tol : float, optional (default=0.90)
Tolerance of the split...
Attributes
----------
X_ :
labels_ :
References
----------
CITE GLIF PAPER
Examples
--------
>>> from cortical_paper.clustering import GLIFClustering
>>>
"""
Cluster = namedtuple("Cluster", "indices, score, name, size")
Leaf = namedtuple("Leaf", "name, indices")
Split = namedtuple("Split", "name, children, score, size")
def __init__(self, tol=0.80, stratified=False):
self.tol = tol
self.stratified = stratified
def _fit_recursive(self, X, cluster, node=None):
"""Recursive helper for fit()
skkdj
Parameters
----------
Returns
-------
"""
if X.shape[0] < 2:
# base case
# must have > 2 obs to cluster!
return Node(self.Leaf(cluster.name, cluster.indices))
# ---------------------------------------------------------------------
# use ward w/ (1-corr) to hierarcically cluster to split
# ---------------------------------------------------------------------
split = _ward_cluster(X)
# ---------------------------------------------------------------------
# train/test svm (radial kernal on 100 random 50/50 splits of clustering
# ---------------------------------------------------------------------
try:
scores = _test_clusters(X, split, stratified=self.stratified)
except ValueError:
# base case
# two few of second class to split (say 9:1 or something)
# assign entire population to terminal cluster
return Node(self.Leaf(cluster.name, cluster.indices))
# ---------------------------------------------------------------------
# if min(score) < tol (0.80 in glif paper), recursively repeat 3-5 on
# each subpopulation
# ---------------------------------------------------------------------
score = scores.min()
if score < self.tol:
# base case
# assign entire population to terminal cluster
return Node(self.Leaf(cluster.name, cluster.indices))
# recursively split
a = np.where(split == 1)
b = np.where(split == 2)
A = self.Cluster(cluster.indices[a], score, cluster.name + "1", len(a[0]))
B = self.Cluster(cluster.indices[b], score, cluster.name + "2", len(b[0]))
# add score to binary tree
if node is None:
node = Node(self.Split(cluster.name, (A.name, B.name), score, cluster.size))
else:
# tree is built pre order
raise ValueError("Should be null!!!")
node.left = self._fit_recursive(X[a], A, node.left)
node.right = self._fit_recursive(X[b], B, node.right)
return node
def _fit(self, X):
"""wrapper, inits cluster"""
# for linkage
cluster = self.Cluster( np.arange(self.n_obs_), score=0.0,
name="", size=self.n_obs_)
return self._fit_recursive(X, cluster)
def fit(self, X, y=None):
"""Fit the hierarchical clustering on the data
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The samples a.k.a. observations.
y : Ignored
Returns
-------
self
"""
X_ = check_array(X, ensure_min_samples=2, estimator=self)
self.n_obs_, self.n_params_ = X_.shape
X_ = _parameter_skew_check(X_)
# z-score all params
X_ = zscore(X_)
# recursively split in hierarchical fashion
self._cluster_tree = self._fit(X_)
return self
def _get_labels(self):
"""..."""
check_is_fitted(self, ["_cluster_tree", "n_obs_"])
i = 1
labels = np.empty(self.n_obs_)
for cluster in iter_tree(self._cluster_tree, order="pre"):
if isinstance(cluster, self.Leaf):
labels[cluster.indices] = i
i += 1
return labels
@property
def labels_(self):
try:
return self._labels
except AttributeError:
self._labels = self._get_labels()
return self._labels
def _get_linkage(self):
"""...:"""
# NOTE : may rewrite zrow to be row index, (would be zrow+56 or smth)
check_is_fitted(self, "_cluster_tree")
FILL=0. #1e-20 # try 0
# returned linkage (n_obs-1)x4
Z = []
# iterates through Z, used for referencing previously formed clusters
z_row = self.n_obs_ - 1
name_row_map = {}
# get leaves, splits in reverse depth-first traversal order
leaves, splits = [], []
for cluster in iter_bft(self._cluster_tree, reverse=True):
if isinstance(cluster, self.Leaf):
leaves.append(cluster)
else:
splits.append(cluster)
# NOTE : currently generates linkages sequentially
# to get the dendrogram to point to center of clusters,
# will need to start in the middle
for leaf in leaves:
# fencepost
a, b = leaf.indices[:2]
tmp = [[a, b, FILL*z_row, 2]] #scipy needs monotonic distances
z_row += 1
for j, index in enumerate(leaf.indices[2:]):
# assign everyother observation to fencepost cluster
row = [index, z_row, FILL*z_row, 3+j]
tmp.append(row)
z_row += 1
# update
Z.extend(tmp)
name_row_map[leaf.name] = z_row
# DISTANCES ARE CUMMULATIVE!!!
name_distance_map = dict()
for split in splits:
# get distance
try:
distance = name_distance_map[split.name]
except KeyError:
# no children splits
distance = split.score
# parent
parent = split.name[:-1]
try:
name_distance_map[parent] += distance
except KeyError:
name_distance_map[parent] = distance
# indices when children were formed
a, b = map(name_row_map.get, split.children)
row = [a, b, distance, split.size]
# update
Z.append(row)
z_row += 1
name_row_map[split.name] = z_row
# must contain doubles for scipy
return np.asarray(Z, dtype=np.float64)
@property
def linkage_(self):
try:
return self._linkage
except AttributeError:
self._linkage = self._get_linkage()
return self._linkage
```
#### File: flithic/distance/setup.py
```python
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
from numpy.distutils.misc_util import get_info as get_misc_info
from numpy.distutils.system_info import get_info as get_sys_info
config = Configuration('distance', parent_package, top_path)
config.add_data_dir('tests')
# _distance_wrap
config.add_extension('_distance_wrap',
sources=[join('src', 'distance_wrap.c')],
depends=[join('src', 'distance_impl.h')],
include_dirs=[get_numpy_include_dirs()],
extra_info=get_misc_info("npymath"))
config.add_extension('_hausdorff', sources=['_hausdorff.c'])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
```
#### File: iterative_hierarchical_clustering/tests/__init__.py
```python
import os
import sys
def entrypoint_exists(entry_point):
executable_dir = os.path.dirname(sys.executable)
return os.path.exists(os.path.join(executable_dir, entry_point))
``` |
{
"source": "jknox13/python-nnls",
"score": 2
} |
#### File: nnls/tests/test_active_set.py
```python
import pytest
import scipy.optimize as sopt
import scipy.sparse as sp
import numpy as np
from numpy.testing import assert_array_almost_equal
from nnls import block_pivoting, lawson_hanson
def test_block_pivoting():
# design matrix size (square)
n = 100
# ------------------------------------------------------------------------
# test same output as scipy.nnls
# eye with noise (only useful for full rank A)
rng = np.random.RandomState(10293)
A = np.eye(n) + 0.1*rng.rand(n,n)
b = np.arange(n)
scipy_sol = sopt.nnls(A, b)[0]
bp_sol = block_pivoting(A, b)
assert_array_almost_equal(scipy_sol, bp_sol)
# ------------------------------------------------------------------------
# test sparse
A = np.eye(n)
idx = rng.choice(np.arange(n**2), int(0.9*n**2), replace=False)
noise = 0.1*rng.rand(n,n)
noise[np.unravel_index(idx, (n,n))] = 0
A += noise
csr_A = sp.csr_matrix(A.copy())
csc_A = sp.csc_matrix(A.copy())
dense_sol = block_pivoting(A, b)
csr_sol = block_pivoting(csr_A, b)
csc_sol = block_pivoting(csc_A, b)
# check cs*_A still sparse
assert sp.issparse(csr_A)
assert sp.issparse(csc_A)
assert_array_almost_equal(csr_sol, dense_sol)
assert_array_almost_equal(csc_sol, dense_sol)
def test_lawson_hanson():
# design matrix size (square)
n = 100
# ------------------------------------------------------------------------
# test same output as scipy.nnls
# A is the n x n Hilbert matrix
A = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
b = np.ones(n)
scipy_sol = sopt.nnls(A, b)[0]
lh_sol = lawson_hanson(A, b)
assert_array_almost_equal(scipy_sol, lh_sol)
# ------------------------------------------------------------------------
# test sparse
rng = np.random.RandomState(10293)
A = np.eye(n)
idx = rng.choice(np.arange(n**2), int(0.9*n**2), replace=False)
noise = 0.1*rng.rand(n,n)
noise[np.unravel_index(idx, (n,n))] = 0
A += noise
b = np.arange(n)
csr_A = sp.csr_matrix(A.copy())
csc_A = sp.csc_matrix(A.copy())
dense_sol = lawson_hanson(A, b)
csr_sol = lawson_hanson(csr_A, b)
csc_sol = lawson_hanson(csc_A, b)
# check cs*_A still sparse
assert sp.issparse(csr_A)
assert sp.issparse(csc_A)
assert_array_almost_equal(csr_sol, dense_sol)
assert_array_almost_equal(csc_sol, dense_sol)
``` |
{
"source": "jknudstrup/dsf_info",
"score": 2
} |
#### File: dsf_info/dsfinfo/dsfinfo.py
```python
def info():
print("\nSome important links: \n \n"
"<NAME>'s DSF Curriculum Page: \n"
"https://github.com/knathanieltucker/data-science-foundations \n \n"
"General Assembly's Course Info Page: \n"
"https://git.generalassemb.ly/nate/course-info/tree/master")
``` |
{
"source": "jknyght9/nut-forwarder-influxdb-python",
"score": 3
} |
#### File: nut-forwarder-influxdb-python/lib/config.py
```python
import json
config = {}
def getNut():
name = input("NUT Server Name: ")
server = input("NUT Server IP address: ")
username = input("Username: ")
password = input("Password: ")
return {"name": name, "server": server, "username": username, "password": password}
def getInfluxDb():
influxdb = {}
while (True):
version2 = input("InfluxDB Version2: (Y|N): ")
if version2.upper() == "Y":
server = input("InfluxDB Server IP address: ")
token = input("Token: ")
bucket = input("Bucket: ")
organization = input("Organization: ")
influxdb = {"server": server, "version": "2", "token": token, "bucket": bucket, "organization": organization}
break
elif version2.upper() == "N":
server = input("InfluxDB Server IP address: ")
database = input("Database: ")
username = input("Username: ")
password = input("Password: ")
influxdb = {"server": server, "version": "1", "database": database, "username": username, "password": password}
break
return influxdb
def generateConfig():
nutserver = []
while(True):
nutserver.append(getNut())
prompt = input("More NUT Servers? (Y|N): ")
if prompt.upper() == "N":
break
influxserver = getInfluxDb()
config = {"nutservers": nutserver, "influxserver": influxserver}
with open("config.json", "w") as f:
f.write(json.dumps(config))
f.close()
```
#### File: jknyght9/nut-forwarder-influxdb-python/nut-forwarder-influxdb.py
```python
import argparse
import json
import logging
import os
import sched
import sys
from telnetlib import EC
import time
from datetime import datetime
from lib.config import generateConfig
from lib.influx import send, test
from lib.nut import getNutData
s = sched.scheduler(time.time, time.sleep)
def pushNut2Influx(sc, nutservers, influxserver):
nutdata = getNutData(nutservers)
for nd in nutdata:
send(influxserver, {
"measurement": "ups",
"tags": {
"server": nd["server"],
"name": nd["name"],
"description": nd["description"],
"serial": nd["serial"]
},
"fields": nd["data"],
"time": datetime.utcnow()
})
s.enter(60, 1, pushNut2Influx, (sc, nutservers, influxserver,))
def main():
parser = argparse.ArgumentParser(description="Forwards NUT data to InfluxDB")
group = parser.add_mutually_exclusive_group()
group.add_argument("-g", "--generateconfig", action="store_true", help="Generate configuration file")
group.add_argument("-r", "--run", action="store_true", help="Run the program")
parser.add_argument("-d", "--debug", action="store_true", help="Run in debug mode")
options = parser.parse_args()
logginglevel = logging.INFO
if options.debug:
logginglevel = logging.DEBUG
logger = logging.getLogger("nut-forwarder-influxdb")
logger.setLevel(logginglevel)
formatter = logging.Formatter(
'%(asctime)s %(name)s [%(levelname)s] %(message)s')
cs = logging.StreamHandler()
cs.setFormatter(formatter)
logger.addHandler(cs)
if options.generateconfig:
generateConfig()
elif options.run:
if os.path.exists("config.json"):
config = None
with open("config.json", "rb") as f:
config = f.read()
config = json.loads(config)
f.close()
if config["influxserver"] is not None and config["nutservers"] is not None:
logger.info("connecting to Influx server %s", config["influxserver"]["server"])
if test(config["influxserver"]):
logger.info("logging data to Influx server")
if getNutData(config["nutservers"]) is not None:
logger.info("connected to NUT server(s)")
s.enter(0, 1, pushNut2Influx, (s, config["nutservers"], config["influxserver"],))
s.run()
else:
exit()
else:
exit()
else:
logger.error("configuration file does not exist. Rerun this program with the '-g' parameter.")
exit()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Shutting down")
try:
sys.exit(0)
except SystemExit:
os._exit(0)
``` |
{
"source": "jko0401/IT299-project",
"score": 3
} |
#### File: IT299-project/root/app.py
```python
import dash
import dash_core_components as dcc, dash_table
import dash_html_components as html
from dash.dependencies import Input, Output
from sklearn.decomposition import PCA
import plotly.express as px
import db
import pandas as pd
from labels import FEATURES, SUMMARY, SCATTER
app = dash.Dash(__name__)
df = db.main_db()
df = df.convert_dtypes()
df['datepublished'] = pd.to_datetime(df['datepublished'])
df['s_release_date'] = pd.to_datetime(df['s_release_date'])
def create_options(name, list):
name = []
for a in list:
name.append({'label': a, 'value': a})
return name
artist_options = create_options('artist_options', df[~(df['channelname'] == 'BassMusicMovement')]['artistname'].unique())
channel_options = create_options('channel_options', df[~(df['channelname'] == 'BassMusicMovement')]['channelname'].unique())
features = [
{"label": str(FEATURES[feature]), "value": str(feature)} for feature in FEATURES
]
scatter = [
{"label": str(SCATTER[feature]), "value": str(feature)} for feature in SCATTER
]
summary = [
{"label": str(SUMMARY[feature]), "value": str(feature)} for feature in SUMMARY
]
audio_features = ['danceability', 'energy', 'music_key', 'loudness', 'music_mode', 'speechiness',
'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo',
'time_signature']
min_s_date = '2009-1-1'
max_s_date = max(df['s_release_date'])
min_y_date = min(df['datepublished'])
max_y_date = max(df['datepublished'])
color_scheme = ['#00adb5', '#E74C3C', '#3498DB', '#F39C12', '#9B59B6']
app.layout = html.Div([
html.Div(id='main-header', children=[
html.Header([
html.H1('Trap and Dubstep Exploration through YouTube and Spotify Data')
])
], className='container'),
html.Div(id='main-app', children=[
dcc.Tabs([
dcc.Tab(id='intro-tab', label='Introduction', children=[
html.Div([
html.H2('Welcome!'),
html.P('This dashboard allows you to explore the electronic bass music genres of Trap and Dubstep '
'through YouTube and Spotify data. Tracks related to these genres were gathered by scraping '
'five popular music-discovering channels on YouTube: TrapNation, TrapCity, BassNation, '
'UKFDubstep, and DubRebellion. Audio features data was tied to those that could be found in '
'Spotify’s library. Brief explanations of each section of the dashboard below.'),
html.H5('Filters:'),
html.P('Choose to filter the dataset by artists or channels, and through different time ranges by YouTube publish or Spotify release dates. Data points can be differentiated by color through artists or channels.'),
html.H5('Feature Summary:'),
html.P('Compare the mean, max, min of a specific feature for each artist.'),
html.H5('Compare Features:'),
html.P('Select any two features to compare and see if there is a correlation between them.'),
html.H5('Feature Distributions:'),
html.P('A set of histograms to help visualize the frequency and distribution of audio features pertaining to the tracks of the artists or channels selected. Follow the link below to Spotify\'s explanation of each feature.'),
html.A('Audio Features', href='https://developer.spotify.com/documentation/web-api/reference/#object-audiofeaturesobject'),
html.H5('Similar Tracks:'),
html.P('Tracks that are similar in terms of their audio features are grouped together through principal component analysis. The closer the tracks, the more similar they are.'),
html.H5('Selected Track:'),
html.P('Click on any data point on the Similar Tracks graph to load an embedded YouTube video and listen to the track. Some tracks may have embedding disabled and must be played on YouTube.'),
html.H5('Limitations:'),
html.P('> The dataset does not automatically update. The most recent data was from the end of January when everything was scraped.'),
html.P('> Not all tracks uploaded to YouTube could be found on Spotify. Many SoundCloud-only tracks, unofficial releases, remixes that also represent the genres were not included in this dataset.'),
html.P('> Not all Spotify tracks of artists in this dataset were included, only those uploaded and shared by the five YouTube channels were selected.'),
html.P(''),
html.Div([
html.P(''),
html.A(html.Img(src='/assets/github.png'), href='https://github.com/jko0401/IT299-project'),
html.A(html.Img(src='/assets/website.png'), href='https://jko0401.github.io/')
], className='offset-by-five columns')
], className='six columns pretty_container offset-by-three columns')
]),
dcc.Tab(id='dash-tab', label='Dashboard', children=[
# Filters, Popularity Plots, Video
html.Div([
html.Div([
html.Div([
html.H5('Filters'),
html.Div([
html.P('Artists:'),
html.Div([
dcc.Dropdown(id='artists',
options=artist_options,
multi=True,
value=['RL Grime', 'TroyBoi', 'Eptic'],
),
]),
]),
], className='row'),
html.Div([
html.Div([
html.P('Channels:'),
dcc.Dropdown(id='channels',
options=channel_options,
multi=True,
value=['TrapNation', 'TrapCity', 'BassNation', 'UKFDubstep', 'DubRebellion']
),
]),
], className='row'),
html.Div([
html.P('Differentiate Data Points By:'),
dcc.RadioItems(
id='color',
options=[
{'label': 'Artists', 'value': 'artistname'},
{'label': 'Channels', 'value': 'channelname'},
],
value='artistname',
labelStyle={'display': 'inline-block'})
], className='row'),
html.Div([
html.Div([
html.P('Spotify Release Date Range:'),
dcc.DatePickerRange(id='s_date',
start_date=min_s_date,
end_date=max_s_date,
display_format='Y/M/D'
)
]),
], className='row'),
html.Div([
html.Div([
html.P('YouTube Publish Date Range:'),
dcc.DatePickerRange(id='y_date',
start_date=min_y_date,
end_date=max_y_date,
display_format='Y/M/D'
)
]),
], className='row'),
], className='pretty_container'),
html.Div([
html.H5('Feature Summary'),
dcc.Dropdown(id='summary',
options=summary,
value='popularity'
),
html.Div(id='div-summary')
], className='pretty_container'),
html.Div(id='div-video')
], className='three columns'),
# Scatter Plots
html.Div([
html.Div([
html.H5('Compare Features'),
html.Div([
html.Div([
html.P('X-Axis'),
dcc.Dropdown(id='feature-1',
options=scatter,
value='popularity'
),
], className='six columns'),
html.Div([
html.P('Y-Axis'),
dcc.Dropdown(id='feature-2',
options=scatter,
value='energy'
),
], className='six columns'),
], className='row'),
html.Div(dcc.Graph(id='scatter')),
], className='pretty_container'),
html.Div([
html.H5('Similar Tracks'),
html.Div(dcc.Graph(id='pca'))
], className='pretty_container')
], className='five columns'),
# Histograms
html.Div([
html.Div([
html.H5('Feature Distributions'),
html.Div(id='div-figures'),
html.Div(id='filtered-data-hidden', style={'display': 'none'})
], className='pretty_container')
], className='four columns')
])
])
]),
])
@app.callback(
Output('filtered-data-hidden', 'children'),
[Input('artists', 'value'),
Input('channels', 'value'),
Input('s_date', 'start_date'),
Input('s_date', 'end_date'),
Input('y_date', 'start_date'),
Input('y_date', 'end_date')]
)
def filter_df(artists, channels, start_s, end_s, start_y, end_y):
if artists:
df_filtered = df[df['artistname'].isin(artists[:5]) &
df['channelname'].isin(channels) &
df['s_release_date'].isin(pd.date_range(start_s, end_s)) &
df['datepublished'].isin(pd.date_range(start_y, end_y))]
elif not channels:
df_filtered = df[df['artistname'].isin(artists[:5]) &
df['s_release_date'].isin(pd.date_range(start_s, end_s)) &
df['datepublished'].isin(pd.date_range(start_y, end_y))]
else:
df_filtered = df[df['channelname'].isin(channels) &
df['s_release_date'].isin(pd.date_range(start_s, end_s)) &
df['datepublished'].isin(pd.date_range(start_y, end_y))]
return df_filtered.to_json(date_format='iso', orient='split')
@app.callback(
Output('div-figures', 'children'),
[Input('filtered-data-hidden', 'children'),
Input('color', 'value'),
Input('artists', 'value'),
Input('channels', 'value')]
)
def plot_data(df, color, artists, channels):
dff = pd.read_json(df, orient='split')
figures = []
if artists and channels:
color = color
elif not artists:
color = 'channelname'
elif not channels:
color = 'artistname'
else:
color = None
for feature in FEATURES.keys():
if feature == 'music_key':
bin_size = 22
elif feature == 'valence':
bin_size = 2
else:
bin_size = 20
f = px.histogram(dff, x=feature, nbins=bin_size, height=300, color=color,
color_discrete_sequence=color_scheme[:len(artists)])
f.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
paper_bgcolor="#393e46",
showlegend=False,
font_color="#eeeeee",
xaxis_title=FEATURES[feature]
)
figures.append(dcc.Graph(figure=f))
return figures
@app.callback(
Output('div-summary', 'children'),
[Input('filtered-data-hidden', 'children'),
Input('color', 'value'),
Input('summary', 'value')]
)
def summary_table(df, color, summary):
dff = pd.read_json(df, orient='split')
test_sum = dff.groupby('artistname').agg({summary: ['mean', 'min', 'max']}).round(2).transpose()
test_sum = pd.DataFrame(test_sum).reset_index().drop(columns=['level_0']).rename(columns={'level_1': ''})
figures = dash_table.DataTable(
data=test_sum.to_dict('records'),
sort_action='native',
columns=[{'name': str(i), 'id': str(i)} for i in test_sum.columns],
style_header={'backgroundColor': '#00adb5'},
style_cell={
'backgroundColor': '#222831',
'color': '#eeeeee'
},
)
return figures
@app.callback(
Output('scatter', 'figure'),
[Input('filtered-data-hidden', 'children'),
Input('feature-1', 'value'),
Input('feature-2', 'value'),
Input('color', 'value'),
Input('artists', 'value'),
Input('channels', 'value')]
)
def graph_scatter(df, feature_1, feature_2, color, artists, channels):
dff = pd.read_json(df, orient='split')
if artists and channels:
color = color
elif not artists:
color = 'channelname'
elif not channels:
color = 'artistname'
else:
color = None
figure = px.scatter(dff, x=feature_1, y=feature_2, custom_data=['videoid'],
hover_name='s_track_name', color=color, color_discrete_sequence=color_scheme[:len(artists)],
height=1000)
if color == 'artistname':
legend_title = 'Artist'
else:
legend_title = 'Channel'
figure.update_layout(
paper_bgcolor="#393e46",
showlegend=True,
font_color="#eeeeee",
xaxis_title=SCATTER[feature_1],
yaxis_title=SCATTER[feature_2],
legend=dict(
orientation="h",
yanchor="middle",
y=1.02,
xanchor="center",
x=0.5,
font=dict(
size=12,
),
title=legend_title
)
)
return figure
@app.callback(
Output('pca', 'figure'),
[Input('filtered-data-hidden', 'children'),
Input('artists', 'value'),
Input('channels', 'value')]
)
def pca(df, artists, channels):
if not artists or not channels:
return dash.no_update
dff = pd.read_json(df, orient='split')
X = dff[audio_features].to_numpy(dtype='float')
X_id = pd.merge(dff[['s_track_name', 's_id']], dff[audio_features], left_index=True, right_index=True)
pca = PCA(n_components=2)
components = pca.fit_transform(X)
figure = px.scatter(components, x=0, y=1, hover_name=X_id['s_track_name'], height=1000)
figure.update_layout(
paper_bgcolor="#393e46",
showlegend=False,
font_color="#eeeeee",
xaxis_title='Principal Component 1',
yaxis_title='Principal Component 2'
)
figure.update_traces(marker=dict(color='#00adb5'))
return figure
@app.callback(
Output('div-video', 'children'),
Input('pca', 'clickData'))
def display_selected_data(selectedData):
if not selectedData:
return html.Div([
html.H5('Selected Track'),
html.Div(id='div-video'),
html.P('(Click on a datapoint on the Similar Tracks graph to listen to the track)')
], className='pretty_container')
else:
dff = db.track_id(selectedData['points'][0]['hovertext'])
vid_link = "https://www.youtube.com/embed/" + dff['videoid'][0]
return html.Div([
html.Iframe(src=vid_link, className='video')
], className='video-container')
if __name__ == '__main__':
app.run_server(debug=True)
``` |
{
"source": "jkobject/CodonUsageBias",
"score": 2
} |
#### File: CodonUsageBias/PyCUB/espece.py
```python
import requests
import numpy as np
import utils
from scipy.stats import multinomial
try:
from urllib2 import urlopen as urlopen
except:
from urllib.request import urlopen as urlopen
class Espece(object):
""" docstring for Espece
This is an object that contains all required information of a species for PyCUB and some nice functions
to interact for each species
Attributes:
code: a dict from gene_name to dna_seq string (deprecated)
metadata: a dict containing different metadata information that one can gather, preferentially boolean
flags to classify the species for plottings and comparings
name: the full scientific name of the species
is_stored, state if the data is stored in HD (deprecated)
link: the link to the ensembl genome
num_genes: the number of gene of this species
genome_size: the bp size of the coding genome
name: the name of the species
taxonid: the number assoxiated to this taxon
copynumbers: the approx. copynumbers if any of each tRNA known of this species
average_entropy: the mean CUB value for each amino acids (CUBD dimension)
average_size: the mean size of each homologies
var_entropy: the mean of fullvarentropy
fullentropy: the array containing all CUB values from the homologies of this species
fullvarentropy: the variance for each amino acids of the full CUB values of this species
fullGCcount: the GC content of the full coding genome
varGCcount: the variance of the GC content of the full coding genome
tRNAentropy: the entropy values of the copynumbers of the tRNAs if sufficient tRNAs exist
tot_homologies: the total number of homologies to cerevisiae
"""
code = None # dict
num_genes = 0 # int
genome_size = 0 # int
link = None # str
metadata = {
"isplant_pathogen": False,
"isanimal_pathogen": False,
"isplant_symbiotic": False, # endophyte or mycorrhizal
"isbrown_rot": False,
"iswhite_rot": False
}
is_stored = False # bool
name = '' # str
taxonid = None # str
copynumbers = None # dict
average_entropy = None # array float
average_size = None # float
var_entropy = None # float
fullentropy = None # array float
fullvarentropy = None # array float
fullGCcount = None # int
varGCcount = None # float
meanGChomo = None # float
tRNAentropy = None # array float
tot_homologies = None # int
meanecai = None # float
def __init__(self, **kwargs):
"""
can intialize the file from kwargs as a raw dictionnary for json format (output of dictify) or from regular args.
"""
data = kwargs.get("data", None)
if data is not None:
self.name = data.get("name", None)
if data.get("metadata", None) is None:
self.metadata = {
"isplant_pathogen": False,
"isanimal_pathogen": False,
"isplant_symbiotic": False, # endophyte or mycorrhizal
"isbrown_rot": False,
"iswhite_rot": False
}
else:
self.metadata = data.get("metadata", None)
self.code = data.get("code", None)
self.taxonid = data.get("taxonid", None)
self.copynumbers = data.get("copynumbers", None)
self.is_stored = data.get('is_stored', None)
self.average_entropy = np.asarray(data["average_entropy"]) if data.get("average_entropy", False) else None
self.average_size = data.get("average_size", None)
self.var_entropy = data.get("var_entropy", None)
self.fullentropy = np.asarray(data["fullentropy"]) if data.get("fullentropy", False) else None
self.fullGCcount = data.get("fullGCcount", None)
self.tRNAentropy = np.asarray(data["tRNAentropy"]) if data.get("tRNAentropy", False) else None
self.num_genes = data.get("num_genes", 0)
self.genome_size = data.get("genome_size", 0)
self.link = data.get("link", None)
self.fullvarentropy = np.asarray(data["fullvarentropy"]) if data.get("fullvarentropy", False) else None
self.varGCcount = data.get("varGCcount", None)
self.tot_homologies = data.get("tot_homologies", None)
self.meanGChomo = data.get("meanGChomo", None)
self.meanecai = data.get("meanecai", None)
else:
self.code = kwargs.get('code', None)
self.is_stored = kwargs.get('is_stored', None)
self.taxonid = kwargs.get('taxonid', '')
self.name = kwargs.get('name', '')
self.copynumbers = kwargs.get('copynumbers', None)
self.average_entropy = kwargs.get("average_entropy", None)
self.average_size = kwargs.get("average_size", None)
self.var_entropy = kwargs.get("var_entropy", None)
self.fullentropy = kwargs.get("fullentropy", None)
self.fullGCcount = kwargs.get("fullGCcount", None)
self.tRNAentropy = kwargs.get("tRNAentropy", None)
self.num_genes = kwargs.get("num_genes", 0)
self.genome_size = kwargs.get("genome_size", 0)
self.link = kwargs.get("link", None)
self.fullvarentropy = kwargs.get("fullvarentropy", None)
self.varGCcount = kwargs.get("varGCcount", None)
self.tot_homologies = kwargs.get("tot_homologies", None)
self.metadata = kwargs.get("metadata", None)
self.meanGChomo = kwargs.get("meanGChomo", None)
self.meanecai = kwargs.get("meanecai", None)
def __str__(self):
"""
will present some interesting info about this species.
"""
toret = ''
if self.name:
toret += "\nspecies: " + self.name
toret += "\n----------------------------------"
if self.taxonid:
toret += "\ntaxonid" + str(self.taxonid)
toret += "\nmetadata" + str(self.metadata)
toret += "\n----------------------------------"
if self.copynumbers is not None:
toret += "\ncopynumbers of tRNA: " + str(self.copynumbers)
if self.average_size is not None:
toret += "\naverage size: " + str(self.average_size)
if self.tRNAentropy is not None:
toret += "\ntRNA entropy: " + str(self.tRNAentropy)
if self.num_genes:
toret += "\nnumber of genes: " + str(self.num_genes)
if self.genome_size:
toret += "\ngenome size: " + str(self.genome_size)
if self.tot_homologies is not None:
toret += "\ntotal number of homologies to cerevisiae: " + str(self.tot_homologies)
toret += "\n----------------------------------"
if self.average_entropy is not None:
toret += "\naverage entropy: " + str(self.average_entropy)
if self.var_entropy is not None:
toret += "\nvariance of entropy: " + str(self.var_entropy)
if self.fullentropy is not None:
toret += "\nfull entropy: " + str(self.fullentropy)
if self.fullvarentropy is not None:
toret += "\nfull variance of entropy: " + str(self.fullvarentropy)
if self.varGCcount is not None:
toret += "\nvariance of the GC content: " + str(self.varGCcount)
if self.meanecai is not None:
toret += "\nmean ECAI: " + str(self.meanecai)
return toret
# CpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpTpTpCpCpGpApApTpApTpApTpTp
# GbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbAbGbGbCbTbTbAbTbAbTbAbAbAb
# CpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpTpTpCpCpGpApApTpApTpApTpTp
# GbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbAbGbGbCbTbTbAbTbAbTbAbAbAb
def get_tRNAcopy(self, by="entropy", setnans=False, kingdom='fungi', baseCNvalue=2):
"""
Retrieves tRNA copy numbers from ensembl DB
will print the number of tRNAs and the number of tRNAs with
a knwon codons ( the usefull ones)
will stop and set a trace for the user to inspect the data
to do so: please write "dat" in the console. if you see something that
should be corrected please do so from the console directly or from the code
if there seems to be an error in the code
if it is an error in the db that you can't do anything, like a mismatched codon
and amino acid, you can't do much. resume the process by typing "c" in the console.
Args:
species: string, the species from which you want the Trna copy number
Returns:
Will populate copynumbers. And tRNAentropy if by="entropy"
Or will not do anything if the species is unavailable and will print it
Raises:
AttributeError: this is a wrong argument try frequency or entropy
"""
server = "http://rest.ensemblgenomes.org" if kingdom != 'vertebrate' else "http://rest.ensembl.org"
print 'species: ' + self.name
ext = "/lookup/genome/" + self.name + '?'
add = "biotypes=tRNA;level=transcript"
r = requests.get(server + ext + add, headers={"Content-Type": "application/json"})
if not r.ok:
print " !! ---> unavailable species"
return
data = r.json()
copynumber = {}
for key, val in utils.anticodons.iteritems():
copynumber.update({key: {}})
for v in val:
v.replace('T', 'U')
copynumber[key].update({v.replace('T', 'U'): baseCNvalue})
num = 0
j = 0
for j, dat in enumerate(data):
if dat["name"] is not None and len(dat["name"]) != 0:
if dat["name"][0:4] == 'tRNA':
try:
if dat["description"] is None:
if dat["name"][10:13] != '':
if len(dat["name"]) == 14:
codn = dat["name"][10:13]
if len(dat["name"]) == 13:
codn = dat["name"][9:12]
if 'T' in codn:
codn = codn.replace('T', 'U')
else:
continue
else:
codn = dat["description"][23:26]
ami = dat["name"][5:8].upper()
if ami == 'SEC':
ami = 'SER'
copynumber[ami][codn] += 1
num += 1
elif ami in ['TRP', 'MET', 'UND', 'SUP', 'UNK']:
continue
elif ami == 'PSE':
codn = dat["name"][12:15].upper()
for key, val in copynumber.iteritems():
if type(val) is dict:
for k, v in val.iteritems():
if k == codn:
copynumber[key][k] += 1
else:
copynumber[ami][codn[::-1]] += 1
num += 1
except KeyError:
print "KeyError"
elif dat["name"][0:3] == 'trn':
try:
codn = dat["name"][5:8].upper()
if 'T' in codn:
codn = codn.replace('T', 'U')
ami = utils.amino2reduce[dat["name"][3]]
if ami in ['TRP', 'MET', 'UND', 'SUP', 'UNK']:
continue
else:
copynumber[ami][codn[::-1]] += 1
num += 1
except KeyError:
print "KeyError"
elif dat["description"] is not None and len(dat["description"]) > 10:
if dat["description"][0:4] == 'tRNA':
try:
codn = dat["description"][23:26]
ami = dat["description"][5:8].upper()
if ami == 'SEC':
ami = 'SER'
copynumber[ami][codn] += 1
num += 1
elif ami in ['TRP', 'MET', 'UND', 'SUP', 'UNK']:
continue
else:
copynumber[ami][codn[::-1]] += 1
num += 1
except KeyError:
print "KeyError"
if num == 0:
print "empty data"
print "we got " + str(j) + " datapoints and managed to extract " + str(num)
# we find probabilities of tRNA
k = 0
if num > 100:
tRNAentropy = np.zeros(18) if by == "entropy" else None
for _, v in copynumber.iteritems():
n = np.array(v.values()).sum()
if n > 0:
for _, val in v.iteritems():
val = val / n
# Else we keep the raw frequency values
if by == "entropy":
nbcod = len(v) # replace Cleng
count = v.values()
X = np.zeros(nbcod)
mn = np.ones(nbcod) / nbcod
if n == 0:
tRNAentropy[k] = np.NaN if setnans else 0.5
else:
Yg = multinomial.pmf(x=count, n=n, p=mn)
# efor part
div, i = divmod(n, nbcod)
X[:i] = np.ceil(div) + 1
X[i:] = np.floor(div)
Eg = multinomial.pmf(x=X, n=n, p=mn)
# end here
tRNAentropy[k] = -np.log(Yg / Eg)
k += 1
elif by != "frequency":
raise AttributeError("this is a wrong argument try frequency or entropy")
# Here we can compute as well the entropy of the tRNA CNs when there is suficient number of val
# else we can set it to zero (to NaN) this allows us to directly compare two values
copynumber.update({'num': num}) # total number
copynumber.update({'datapoints': j}) # possible number of datapoints
self.copynumbers = copynumber
if by == "entropy" and num > 100:
self.tRNAentropy = tRNAentropy
def gettaxons(self, kingdom='fungi'):
"""
Pars the ensemblgenomes REST API to retrieve the taxons id
for the species from which we would not have any (downloaded via Yun for example)
Raises:
HTTPrequestError: not able to connect to the server
"""
# http: // rest.ensemblgenomes.org / info / genomes / arabidopsis_thaliana?
server = "http://rest.ensemblgenomes.org" if kingdom != 'vertebrate' else "http://rest.ensembl.org"
print 'species: ' + self.name
ext = "/info/genomes/" + self.name + '?'
r = requests.get(server + ext, headers={"Content-Type": "application/json"})
if not r.ok:
r.raise_for_status()
data = r.json()
self.taxonid = data["species_taxonomy_id"]
def get_epigenomes(self):
"""
get from ensembl all the data about the epigenome that could help asking interesting questions about the CUB
"""
# curl 'http://rest.ensemblgenomes.org/overlap/id/AT3G52430?feature=array_probe' - H 'Content-type:application/json'
# curl 'http://rest.ensemblgenomes.org/overlap/id/AT3G52430?feature=repeat' - H 'Content-type:application/json'
pass
def _dictify(self):
"""
Used by the saving function. transform the object into a dictionary that can be json serializable
Returns:
A dict holding every element to be jsonized
"""
return {"name": self.name,
"code": self.code,
"num_genes": self.num_genes,
"genome_size": self.genome_size,
"link": self.link,
"fullvarentropy": self.fullvarentropy.tolist() if self.fullentropy is not None else None,
"varGCcount": self.varGCcount,
"tot_homologies": self.tot_homologies,
"taxonid": self.taxonid,
"copynumbers": self.copynumbers,
"metadata": self.metadata,
"meanGChomo": self.meanGChomo,
"meanecai": self.meanecai,
"is_stored": self.is_stored,
"average_entropy": self.average_entropy.tolist() if self.average_entropy is not None else None,
"average_size": self.average_size,
"var_entropy": self.var_entropy,
"fullentropy": self.fullentropy.tolist() if self.fullentropy is not None else None,
"fullGCcount": self.fullGCcount.tolist() if self.fullGCcount is not None else None,
"tRNAentropy": self.tRNAentropy.tolist() if self.tRNAentropy is not None else None}
```
#### File: CodonUsageBias/PyCUB/pyCUB.py
```python
import os
import json
import zipfile
import shutil
from ftplib import FTP
import gzip
import copy
import requests
from sklearn.preprocessing import normalize
try:
from urllib2 import urlopen as urlopen
except:
from urllib.request import urlopen as urlopen
from sklearn.neural_network import MLPRegressor
from joblib import Parallel, delayed
import multiprocessing
from functools32 import lru_cache
from rpy2.robjects.packages import importr
from ete2 import NCBITaxa
from rpy2 import robjects
import rpy2.robjects.packages as rpackages
from rpy2.robjects.vectors import StrVector
import pandas as pd
import numpy as np
from scipy.spatial.distance import euclidean
from scipy.sparse.csgraph import dijkstra
from scipy.stats import spearmanr
from sklearn import manifold as man
from sklearn.decomposition import PCA
from sklearn.linear_model import MultiTaskLassoCV, LassoCV
from sklearn import cluster
import espece as spe
import homoset as hset
import utils
import homology as h
from bokeh.plotting import *
from bokeh.models import *
from bokeh.io import save, show
from bokeh.layouts import column
import matplotlib.pyplot as plt
from Bio import SeqIO
# Lasso, cross Validation, Principal Component Analysis, T-SNE, spearman's rho,
# djikstra alg with fbonacci's boosting, multinomial distribution,
# multivariate normal approximation to multinomial, multiprocessing,
# parallel computing, entropy, AUC_entropy, pseudo phylodistance
# by cophenetic matrix of a dendogram, cosine similarity, hyperparams grid search,
# KMeans, MiniBatchKMeans, KModes, silhouette_score, calinski_harabaz_score, akaike information criterion,
# bayesian information criterion binary search, recurssive function,
# dynamic programming, endres distance, kulback leiber divergence, gaussian mixture clustering
# neural networks, Perceptron, DBscan
# python, js, REST, ftp, json, doxygen,gzip,
# CAI, tRNA, CUB, 3D conformation of DNA, CUF, fungi, animals, plants, GCcount, KaKs_Scores, hydrophob, synthcost, isoepoint
# HiC data, fasta, fast,
# biology, genomics, genetics, population genomics, comparative genomics, computational biology,
# bioinformatics, machine learning, statistics, statistical learning, informatics, computer science
# knowledge discovery, datascience, big data, cloud computing, scientific computing
import pdb
class PyCUB(object):
""" @package PyCUB is the main object of the project that allows the user to access most of the functions
When using it, please follow the documentation and examples on notebooks thought you can
still use it as you please and use some of the nice tricks provided here and in python
Attributes:
species: dictionary of Espece objects from the name of the species.
(see espece.py)
working_homoset: PyCUB.homoset object that stores a subset of the homologies
you want to work on
all_homoset PyCUB.homoset that stores the all the homologies
session: str the session name you want to use (will appear in the savings for example
_is_saved: bool trivial system only boolean
links: dict of all the links readily available in PyCUB.
for the project of <NAME> please use whatever datasets you may find usefull
(you can also download from Ensembl)
coeffgenes: np.array regressing values for each attributes
scoregenes: the score of the regressor
scorespecies: the score of the regressor
coeffspecies: np.array regressing values for each attributes
rho_ent: float from the scoring of spearman's rho for entropy
pent: float from the scoring of spearman's rho for entropy
rho_cub: float from the scoring of spearman's rho for CUB
pcub: float from the scoring of spearman's rho for CUB
rho_cuf: float from the scoring of spearman's rho for CUF
pcuf: float from the scoring of spearman's rho for CUF
"""
links = {'yun': {
'homology1t500.zip': 'https://www.dropbox.com/s/fmh0ljf02twn4vw/homology1t500.zip?dl=1',
'homology501t1000.zip': 'https://www.dropbox.com/s/ld4ar5pnh0f1w1w/homology501t1000.zip?dl=1',
'homology1001t2000.zip': 'https://www.dropbox.com/s/he39xu9c0n2jw8n/homology1001t2000.zip?dl=1',
'homology2001t2500.zip': 'https://www.dropbox.com/s/8w73jbs3r0ugqb8/homology2001t2500.zip?dl=1',
'homology2501t3000.zip': 'https://www.dropbox.com/s/86d23iaetw3hmzy/homology2501t3000.zip?dl=1',
'homology3001t3500.zip': 'https://www.dropbox.com/s/mr1tefylq11l3ee/homology3001t3500.zip?dl=1',
'first50.zip': 'https://www.dropbox.com/s/m3vob12ztfqs8gh/first50.zip?dl=1'},
'mymeta': {
'Amino Acid Properties README.txt': 'https://www.dropbox.com/s/3tb2j69l0acirt0/\
Amino%20Acid%20Properties%20README.txt?dl=1',
'Amino Acid Properties.csv':
'https://www.dropbox.com/s/g157emzyid2qi83/Amino%20Acid%20Properties.csv?dl=1',
'cerevisae_prot_abundance.csv':
'https://www.dropbox.com/s/t77016m5fqzb2fc/cerevisae_prot_abundance.csv?dl=1',
'names_with_links.csv':
'https://www.dropbox.com/s/voj26r0onvvqvx2/names_with_links.csv?dl=1',
'order_name461.csv':
'https://www.dropbox.com/s/0708046ld1pcju4/order_name461.csv?dl=1',
'Yun_Species_Context':
'https://www.dropbox.com/s/rdse1rco04hmuwf/Yun_Species_Context.csv?dl=1',
'homolist.json':
'https://www.dropbox.com/s/5a3h8hps9eozd8g/homolist.json?dl=1'
},
'meta': {
'fungi':
'ftp://ftp.ensemblgenomes.org/pub/release-39/fungi/species_metadata_EnsemblFungi.json',
'bacteria':
'ftp://ftp.ensemblgenomes.org/pub/release-39/bacteria/species_metadata_EnsemblBacteria.json',
'plants':
'ftp://ftp.ensemblgenomes.org/pub/release-39/plants/species_metadata_EnsemblPlants.json'
}
}
species = {}
working_homoset = None
all_homoset = None
_is_saved = False
session = None
coeffgenes = None
scoregenes = None
scorespecies = None
coeffspecies = None
def __init__(self, species={}, _is_saved=False,
_is_loaded=False, working_homoset=None, all_homoset=None, session='session1'):
"""
will initialize the object with the different values you might have from another project
Args:
species: dictionary of Espece objects from the name of the species.
(see espece.py)
working_homoset : PyCUB.homoset object that stores a subset of the homologies
you want to work on
all_homoset PyCUB.homoset that stores the all the homologies
session : str the session name you want to use (will appear in the savings for example
_is_saved : bool trivial system only boolean
"""
self.species = species
self.working_homoset = working_homoset
self.all_homoset = all_homoset
self._is_saved = _is_saved
self._is_loaded = _is_loaded
self.session = session
self.homolist = None
print "working on session: " + self.session
if os.path.isdir('utils/save/' + session):
print 'you already have a session here (just a warning)'
else:
os.mkdir('utils/save/' + session)
# create a function to find all homologies from a species
# CpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpTpTpCpCpGpApApTpApTpApTpTp
# GbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbAbGbGbCbTbTbAbTbAbTbAbAbAb
def getHomologylist(self, species='saccharomyces_cerevisiae', kingdom='fungi'):
"""
A function to retrieve the homologies directly from a given species
(it is better to use
one of the key species for the different kingdoms (sacharomyces, HS, Arabidopsis..))
Args:
specie: str the name of the specie to get the homology from
kingdom: str the kingdom where we can find this specie
"""
location = 'ftp.ensemblgenomes.org' if kingdom != 'vertebrate' else 'ftp.ensembl.org'
release = 'release-40/' if kingdom != 'vertebrate' else 'release-93'
ftp = FTP(location)
ftp.login()
if kingdom == 'vertebrate':
kingdom = ''
ftp.cwd('pub/' + release + kingdom + '/fasta/')
data = []
name = []
ftp.retrlines('NLST', data.append)
for d in data:
if d == species:
ftp.cwd(d)
link = []
ftp.cwd('cds')
ftp.retrlines('NLST', link.append)
with open("utils/data/temp.fa.gz", "wb") as file:
for i in link:
if i[-9:] == "all.fa.gz":
ftp.retrbinary("RETR " + i, file.write)
with gzip.open("utils/data/temp.fa.gz", "rt") as handle:
for record in SeqIO.parse(handle, "fasta"):
name.append(record.name)
self.homolist = name
def get_data(self, From='yun', homonames=None, kingdom='fungi', sequence='cdna',
additional='type=orthologues', saveonfiles=False, normalized=True, setnans=False,
by="entropy", using="normal", tRNA=True, getCAI=True, first=20, inpar=True):
"""
Download the data from somewhere on the web (Ensembl, Yun(with links))
you can provide a lot of different values to scrape Ensembl's datasets
it will compute from ensembl to retrieve a similar dataset as what yun's
data is.
Args:
From: str flag 'yun' or 'ensembl':
homonames: list[str] what particular homologies you want to scrap if 'all' and you have used the
getHomologylist() function, will get the homologies from there
kingdom: str same for kingdoms
sequence: str the type of sequences you want to use
additional: str additional information about the scrapping
saveonfiles: bool save the unprocessed data before populating working homoset
normalized: bool if you want the values to be normalized by the length of the codons
(lengths are always saved)
setnans: bool if you want to save the nans as metadata
by: str flag 'entropy', 'entropyLocation' (entropy location), 'frequency'
using: str flag 'random' 'normal' 'permutation' 'full'
inpar: bool or int for parallel computing and number of core
tRNA: bool whether or not to compute tRNA data
getCAI: bool flag to true to retrieve the CAI as well
first: int the first most expressed genes to compute the CAI ref statistics
Raises:
AttributeError: "you can't compute codon frequency with Yun's data...", 'not the right From'
UnboundLocalError: "you have to load the homologies first"
AttributeError: 'not the right From'
http://rest.ensemblgenomes.org/
"""
if by == 'frequency':
print "you will have a larger dimensional matrix (59D)"
if type(inpar) is int:
num_cores = inpar
else:
num_cores = -1 if inpar else 1
if From == 'yun':
if by is 'frequency':
raise AttributeError("you can't compute codon frequency with Yun's data...")
Parallel(n_jobs=8)(delayed(utils.getyun)(key, val) for key, val in
self.links['yun'].iteritems())
self.load(All=False if homonames is not None else True, filename=homonames,
From=From, by=by, inpar=inpar)
elif From == 'ensembl':
if homonames == 'all' or homonames is None:
if self.homolist is None and kingdom == 'fungi':
with open('utils/meta/homolist.json', "r") as f:
self.homolist = json.loads(f.read())
else:
if self.homolist is None:
raise UnboundLocalError("you have to load the homologies first")
print "using the loaded homolist from ensembl"
else:
self.homolist = homonames
self.all_homoset = hset.HomoSet(datatype=by)
print "doing all " + str(len(self.homolist)) + " homologies"
print ' '
homonamelist = []
getCAI = self.createRefCAI(first=first, kingdom=kingdom) if getCAI else None
if bool(inpar):
values = Parallel(n_jobs=num_cores)(delayed(utils.loadfromensembl)(
name, kingdom, sequence,
additional, saveonfiles,
normalized, setnans, i, by, using, getCAI) for i, name in enumerate(self.homolist))
for i, val in enumerate(values):
if val is not None:
homonamelist.append(self.homolist[i])
self.all_homoset.update({self.homolist[i]: val})
else:
for i, name in enumerate(self.homolist):
homo = utils.loadfromensembl(name, kingdom, sequence,
additional, saveonfiles,
normalized, setnans, i, by, using, getCAI)
if homo is not None:
homonamelist.append(name)
self.all_homoset.update({name: homo})
self.all_homoset.datatype = by
self.all_homoset.homo_namelist = homonamelist
# TODO: test full pipeline with frequency/entropy/entropylocation
taxons, species = self.all_homoset.preprocessing(withtaxons=True)
if tRNA:
print "computing tRNA copy numbers"
for i, spece in enumerate(species):
espece_val = spe.Espece(name=spece, taxonid=taxons[i])
if tRNA:
espece_val.get_tRNAcopy(by=by, setnans=setnans)
self.species.update({spece: espece_val})
self.all_homoset.loadhashomo()
else:
raise AttributeError('not the right From')
def get_metadata_Ensembl(self, kingdoms):
"""
download it and put it where it belongs in the Espece object
parse the server https://fungi.ensembl.org/info/website/ftp/index.html
will also get the metadata from the kingdoms that you are analysing
Args:
kingdoms: str flag the type of kingdoms you wanna have 'fungi' 'bacteria' 'plants' 'animals'
"""
if not os.path.exists('utils/meta'):
os.mkdir('utils/meta')
url = self.links['meta'][kingdoms]
print "downloading " + kingdoms + " with urllib"
if not os.path.exists('utils/meta/' + kingdoms + '.json'):
f = urlopen(url)
data = f.read()
with open('utils/meta/' + kingdoms + '.json', "wb") as code:
code.write(data)
print "downloaded"
def get_mymetadata(self, From='jerem', inpar=True):
"""
Go ahead and design your own metadata retrieval here.
obviously you woud need to change some other functions.
for me it is mean protein abundances in cerevisiae cells.
Args:
From: str flag designer of the function to load metadatas
inpar: bool for parallel processing
"""
if not os.path.exists('utils/meta'):
os.mkdir('utils/meta')
if From == 'jerem':
num_cores = -1 if inpar else 1
Parallel(n_jobs=num_cores)(delayed(utils.mymeta)(key, val) for key, val in
self.links['mymeta'].iteritems())
if From == 'tobias':
self.import_metadataTobias()
def import_metadataTobias(self):
"""
will import the metadata obtained from tobias for the fungi species affiliated to cerevisiae to each species for further diagnostics.
Populates metadata[num_genes, plant_pathogen, animal_pathogen, genome_size, plant_symbiotic, brown_rot, white_rot]
for each species
and weight, mRNA_abundance, is_secreted, protein_abundance, cys_elements, decay_rate for each homology
"""
# species metadata
data = pd.read_csv("utils/meta/Yun_Species_Context.csv")
for i, species in enumerate(data["Genome"]):
if species in self.species:
if self.species[species].metadata is None:
self.species[species].metadata = {
"isplant_pathogen": False,
"isanimal_pathogen": False,
"isplant_symbiotic": False, # endophyte or mycorrhizal
"isbrown_rot": False,
"iswhite_rot": False
}
self.species[species].num_genes = int(data["No_Genes"][i])
self.species[species].metadata["isplant_pathogen"] = bool(data["plant_pathogen"][i])
self.species[species].metadata["isanimal_pathogen"] = bool(data["animal_pathogen"][i])
self.species[species].genome_size = int(data["Genome_Size"][i])
self.species[species].metadata["isplant_symbiotic"] = bool(data["mycorrhizal"][i] or data["endophyte"][i])
self.species[species].metadata["isbrown_rot"] = bool(data["brown_rot"][i])
self.species[species].metadata["iswhite_rot"] = bool(data["white_rot"][i])
# protein metadata
data = pd.read_csv("utils/meta/protdata/tob_currated.csv")
for i, homo in enumerate(data["ORF"]):
if unicode(homo) in self.all_homoset.keys():
self.all_homoset[homo].weight = data["Molecular Weight (Da)"][i]
self.all_homoset[homo].protein_abundance = data["Protein Abundance (molecules per cell)"][i]
self.all_homoset[homo].mRNA_abundance = float(data["mRNA Abundance (molecules per cell)"][i].replace(',', '.'))\
if type(data["mRNA Abundance (molecules per cell)"][i]) is str else data["mRNA Abundance (molecules per cell)"][i]
self.all_homoset[homo].decay_rate = float(data["Protein decay rate (min-1)"][i].replace(',', '.'))\
if type(data["Protein decay rate (min-1)"][i]) is str else data["Protein decay rate (min-1)"][i]
data = pd.read_csv("utils/meta/protdata/PDI_substrates.csv")
for i, homo in enumerate(data["ORF"]):
if unicode(homo) in self.all_homoset.keys():
self.all_homoset[homo].is_secreted = True
self.all_homoset[homo].protein_abundance = data["proteins_per_cell"][i]
self.all_homoset[homo].cys_elements = data["Cys"][i]
self.all_homoset[homo].decay_rate = data["degradation"][i]
# LOADINGS AND SAVINGS
def load(self, session=None, All=False, filename='first500', From=None, by='entropy', tRNA=True, inpar=True):
"""
Get the data that is already present on a filename
Either load from Yun's datasets or from an already saved session.
Is being called by get_data. But you can call it to just use one of Yun's files
as well
Args:
From: str if this flag is set to 'yun' it means that the filename is made of Yundata
in which case we will create directly the homology map in the same time as the rest
of the PyCUB object.
All: bool set to true if load everything from Yun
by: str same flag as get_data (for Yun's files here).
filename: str the particular filename when not loading them all
session: str if a session name is provided, then will load a zip file from
this session's folder
tRNA: bool to true to compute the tRNA values
inpar: int to set the number of processor (as in scikit)
Returns:
May return additionals if loading from a session where one decided to save more than the two All/working
homologies. to be handled separately
"""
separation = "homology"
folder = "utils/data/" if session is None else "utils/save/" + session + '/'
if not self._is_loaded:
if From == 'yun' and session is None:
if All:
# if we want them all we will take the first and then use loadmore
# function
filename = 'homology1t500'
# then we process it according to how Yun Displays its data, trying to fill in
# as much as we can
homo_namelist = []
nameB = ""
self.all_homoset = hset.HomoSet(datatype=by)
if not os.path.exists(folder + filename):
# we the file has not been zipped already
zip_ref = zipfile.ZipFile(folder + filename + '.zip', 'r')
zip_ref.extractall(folder + filename)
zip_ref.close()
note = False
else:
note = True
file = folder + filename
if len(os.listdir(file)) == 2:
# the case with macos and special zipping... lame
filename = file + '/' + filename
else:
filename = file
# getting all the homology names
print "Reviewing all the " + str(len(os.listdir(filename))) + " files"
for f in sorted(os.listdir(filename)):
if f.endswith(".txt"):
nameA = f.split(separation)[0]
if(nameA != nameB):
nameB = nameA
homo_namelist.append(nameB)
self.all_homoset.homo_namelist = homo_namelist
# getting all the species names and instanciating the species object
df, dflink = utils.retrievenames()
i = 0
df = df.sort_values(by='name')
for _, row in df.iterrows():
espece_val = spe.Espece(name=row['name'],
link=dflink.loc[dflink['name'] == row['name'], 'b'].tolist()[0])
if tRNA:
espece_val.get_tRNAcopy(by=by)
self.species.update({
row['name']: espece_val})
utils.speciestable.update({i: row['name']})
i += 1
self.all_homoset.species_namelist = df['name'].tolist()
# getting the homologies now
dou = 0
if by == "entropy":
by = "entropyValue"
if inpar:
values = Parallel(n_jobs=-1)(delayed(utils.homoyun)(
separation, filename,
homology, by=by) for homology in homo_namelist)
for i, val in enumerate(values):
self.all_homoset.update({homo: h.homology(
full=val[0], names=val[1].tolist(),
nans=val[2],
homocode=homo,
lenmat=val[3], doub=val[4])})
dou += np.count_nonzero(val[4])
else:
for homo in homo_namelist:
val = utils.homoyun(separation, filename, homo, by=by)
self.all_homoset.update({homo: h.homology(
full=val[0], names=val[1].tolist(),
nans=val[2],
homocode=homo,
lenmat=val[3], doub=val[4])})
dou += np.count_nonzero(val[4])
# create the hashomomatrix
self.all_homoset.preprocessing(withnames=True)
self.all_homoset.loadhashomo()
self.all_homoset.datatype = by
print "you had " + str(dou) + " same species homologies"
print "reviewed " + str(len(homo_namelist)) + " homologies "
# if we haven't change the working with processing
self._is_saved = False
if not note:
shutil.rmtree(file)
self._is_loaded = True
if All:
self.loadmore('homology4501t5000', by=by)
self.loadmore('homology3501t4000', by=by)
self.loadmore('homology2501t3000', by=by)
self.loadmore('homology601t1000', by=by)
self.loadmore('homology4001t4500', by=by)
self.loadmore('homology3001t3500', by=by)
self.loadmore('homology2001t2500', by=by)
self.loadmore('homology1001t2000', by=by)
print "you now have " + str(np.count_nonzero(self.all_homoset.hashomo_matrix)) +\
" genes in total"
elif From is None:
if not os.path.isfile(folder + filename + '.json'):
print "unzipping " + folder + filename + '.json.gz'
os.system("gzip -d " + folder + filename + '.json.gz')
with open(folder + filename + ".json", "r") as f:
print "loading from " + filename
additionals = self._undictify(json.loads(f.read()))
print "it worked !"
os.system("gzip " + folder + filename + '.json')
print "you now have " + str(np.count_nonzero(self.all_homoset.hashomo_matrix)) +\
" genes in total"
return additionals
else:
print "hey, it looks like this object has already loaded some things"
print "please use loadmore or use another object"
print "you can delete this one with 'del' "
def loadmore(self, filename='first500', by='entropyLocation'):
"""
Get the data that is already present on a filename when you already have data
is usefull to load more of Yun's datasets.
is called when load is set to All
Args:
filename: str the filename to additionaly load
by: flag same as before
Raises:
UnboundLocalError: "You should try load first, this object is empty"
"""
folder = "utils/data/"
separation = "homology"
if self._is_loaded:
# then we process it according to how Yun Displays its data, trying to fill in
# as much as we can
homo_namelist = []
nameB = ""
if not os.path.exists(folder + filename):
print "unzipping " + filename
zip_ref = zipfile.ZipFile(folder + filename + '.zip', 'r')
zip_ref.extractall(folder + filename)
zip_ref.close()
note = False
else:
note = True
file = folder + filename
if len(os.listdir(file)) < 3:
file = file + '/' + filename
print "Reviewing all the " + str(len(os.listdir(file))) + " files"
for f in sorted(os.listdir(file)):
if f.endswith(".txt"):
nameA = f.split(separation)[0]
if(nameA != nameB):
nameB = nameA
homo_namelist.append(nameB)
# comparing two lists
notdup = [item for item in homo_namelist if not (item in self.all_homoset.homo_namelist)]
dup = len(homo_namelist) - len(notdup)
if dup != 0:
print "there is " + str(dup) + " duplicate from previous loads.. not cool"
homo_namelist = notdup
# update homonamelist
self.all_homoset.homo_namelist.extend(homo_namelist)
dou = 0
print "start the iteration process, I hope you haven't clusterized \
your data yet..else it won't work (for now)"
num_cores = multiprocessing.cpu_count()
values = Parallel(n_jobs=num_cores)(delayed(utils.homoyun)(
i, homology, separation, file, self.all_homoset.species_namelist, by=by) for i,
homology in enumerate(homo_namelist))
for val in values:
self.all_homoset.update(val[0])
self.all_homoset.hashomo_matrix = np.vstack((self.all_homoset.hashomo_matrix, val[1]))\
if self.all_homoset.hashomo_matrix is not None else val[1]
dou += val[2]
print "you had " + str(dou) + " same species homologies (it can't be processed! for now)"
print "reviewed " + str(len(homo_namelist)) + " homologies "
self._is_saved = False
if not note:
shutil.rmtree(folder + filename)
self._is_loaded = True
print "you now have " + str(np.count_nonzero(self.all_homoset.hashomo_matrix)) +\
" genes in total"
else:
raise UnboundLocalError("You should try load first, this object is empty")
def save(self, name, save_workspace=True, save_homo=True, add_homosets={}, cmdlinetozip="gzip"):
"""
call to save your work. you should call save on specific data structure if this is what you want to save.
Will call other object's save, will transform all the variable into dict and save the dicts
as json files. will save the df also as json files. PyCUB and homoset have
their own json file.
adding some params because else the object may be too big
Args:
name: str the name of the particular save on this session
save_workspace: bool to fale not to save working_homoset
save_homo: bool to false not to save all_homoset
add_homosets= PyCUB.homoset homoset to add in addition to the regular ones
cmdlinetozip: str you need to tell the platform how to zip on your system uses gzip by default
but it needs to be installed
"""
filename = "utils/save/" + self.session + '/' + name + ".json"
print "writing in " + name
dictify = self._dictify(save_workspace, save_homo, add_homosets)
data = json.dumps(dictify, indent=4, separators=(',', ': '))
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'w') as f:
f.write(data)
print "it worked !"
# now we zip to save 90% memory space
if cmdlinetozip == 'mac':
os.system("ditto -c -k --sequesterRsrc " + filename + ' ' + filename + '.zip')
os.remove(filename)
if cmdlinetozip == 'gzip':
os.system("gzip " + filename)
self._is_saved = True
# PREPROCESSINGS
# CpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpTpTpCpCpGpApApTpApTpApTpTp
# GbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbAbGbGbCbTbTbAbTbAbTbAbAbAb
def get_working_homoset(self, clusternb=None, species=None, homologies=None, cleanhomo=None):
"""
create a subset of all_homoset on which you would like to do further computation
To use once you have clustered homology groups, else takes everything.
Can also be used just to get a subset of the all homosets.
Args:
clusternb: int set the cluster of the group you want to get need to be between 1 and homogroupnb
homologies: list[str] the subset as a list you want to get from all_homoset
(can be additional to a clusternb)
species: list[str] the subset as a list you want to get from all_homoset
(can be additional to a clusternb)
cleanhomo: float if the homology is only shared by less than this amount amongst the species present
in this homoset, removes them.
Returns:
a HomoSet object (see homoset.py)
Raises:
UnboundLocalError: "you have not clusterized you 'all_homoset'. you want to just use 'order_from_matrix' on it."
"""
leng = len(self.all_homoset.clusters)
if clusternb is not None and leng > 0:
# if clustering has been done
if leng == len(self.all_homoset.species_namelist):
# version by species
homo = hset.HomoSet()
ind = np.argwhere(np.asarray(self.all_homoset.clusters) == clusternb - 1)[:, 0]
species_name = [self.all_homoset.species_namelist[i] for i in ind]
homo.homodict = dict(self.all_homoset.homodict)
homo.homodict.remove(species_name)
homo.homo_namelist = self.all_homoset.homo_namelist
homo.species_namelist = species_name
elif leng == len(self.all_homoset.homodict):
# version by homologies
homo = hset.HomoSet()
ind = np.argwhere(np.asarray(self.all_homoset.clusters) == clusternb - 1)[:, 0]
if cleanhomo is not None:
perc = self.all_homoset.hashomo_matrix.sum(1).astype(float) / self.all_homoset.hashomo_matrix.shape[1]
homo_name = [self.all_homoset.homo_namelist[i] for i in ind if perc[i] > cleanhomo]
else:
homo_name = [self.all_homoset.homo_namelist[i] for i in ind]
for x in homo_name:
homo.update({x: self.all_homoset.homodict[x]})
homo.homo_namelist = homo_name
homo.species_namelist = self.all_homoset.species_namelist
else:
raise UnboundLocalError("you have not clusterized you 'all_homoset'. you want to just use" +
"'order_from_matrix' on it.")
return False
homo.datatype = self.all_homoset.datatype
if homologies is not None:
homo.homodict = {k: homo.homodict[k] for k in homologies}
if species is not None:
other = [item for item in homo.species_namelist if item not in species]
homo.remove(sepcies=other)
homo.loadhashomo()
# big mistake to call this as it removes also species from the all homology as they both share the same objects
# if cleanspecies is not None:
# homo.clean_species(thresh=cleanspecies)
# homo.loadhashomo()
homo.loadfullhomo()
homo.datatype = self.all_homoset.datatype
self.working_homoset = homo
return homo
def get_subset(self, homoset, withcopy=False, clusternb=None, species=None, homologies=None):
"""
either changes or returns a subset of the provided homoset
To use once if you want to further refine a set of homologies
Args:
homoset: PyCUB.homoset to get a subset from
withcopy: bool to true if we don't want to change the homoset object but create a copy from it
clusternb: int set the cluster of the group you want to get need to be between 1 and homogroupnb
homologies: list[str] the subset as a list or a tuple of int
species: list[the subset as a list, or a list of int
Returns:
a HomoSet object (see homoset.py)
"""
if withcopy:
homo = copy.deepcopy(homoset)
else:
homo = homoset.copy()
if homologies is not None:
homo.homodict = {k: homoset[k] for k in homologies} if type(homologies[0]) is str else \
{homoset.homo_namelist[k]: homoset[k] for k in homologies}
homo.homo_namelist = homo.homodict.keys()
homo.hashomo_matrix = None
homo.homo_matrix = None
homo.homo_matrixnames = None
homo.fulleng = None
homo.red_homomatrix = None
homo.homo_clusters = None
homo.averagehomo_matrix = None
homo.stats = {}
if species is not None:
other = [item for item in homoset.species_namelist if not (item in species)]
homo.remove(sepcies=other)
homo.loadhashomo()
homo.loadfullhomo()
return homo
def get_full_genomes(self, kingdom='fungi', seq='cds', avg=True, by="entropy", normalized=False):
"""
go trought all full genome fasta files in the ftp server of ensemblgenomes and
download then parse them to get the full entropy of the genome.
usefull for futher comparison steps.
will populate the fullentropy, fullvarentropy, fullGCcount,
varGCcount of each species where the full sequence is known
Args:
kingdom: str flags the relevant kingdom of you current session [fungi,plants,bacteria, animals]
seq: str flags the type of sequence you consider the full genome is (coding or non coding or full) [cds, all, cda]
avg: bool to true if we average over each gene or get the full dna in one go.
by: str flags what type of computation should be done [entropy,frequency]
normalized: should we normalize the entorpy by length
Raises:
MemoryError: "this sequence is too long to be computed (> 1 billion bp)"
"""
def _compute_full_entropy(handle, by='entropy', avg=True, normalized=False, setnans=False):
"""
called by get full genomes, either calls utils.computeYun or process the full coding sequence in one go
Private method. see 'get_full_genomes()'
"""
GCcount = []
val = []
if avg:
for record in SeqIO.parse(handle, "fasta"):
codseq = [record.seq._data[i:i + 3] for i in range(0, len(record.seq._data), 3)]
valH, _, _ = utils.computeyun(codseq, setnans=setnans, normalized=normalized, by=by)
val.append(valH)
GCcount.append(float(record.seq._data.count('G') + record.seq._data.count('C')) / len(record.seq._data))
va = np.zeros((len(val), utils.CUBD))
for i, v in enumerate(val):
va[i] = v
return va, np.array(GCcount)
else:
print "not working, overflow ..."
"""
pdb.set_trace()
c = []
amino = list(utils.amino)
codons = dict(utils.codons)
GCcount = 0.
for x, record in enumerate(SeqIO.parse(handle, "fasta")):
if len(c) > 1000000000:
raise MemoryError("this sequence is too long to be computed (> 1 billion bp)")
GCcount += (record.seq._data.count('G') + record.seq._data.count('C'))
for i in range(0, len(record.seq._data), 3):
c.append(record.seq._data[i:i + 3])
valH = np.zeros(len(amino)) if by != 'frequency' else np.zeros(59) # the number of codons usefull
utils.CUBD = len(amino) if by != 'frequency' else 59
pos = 0
GCcount = float(GCcount) / len(c)
for k, amin in enumerate(amino):
subcodons = codons[amin]
nbcod = len(subcodons) # replace Cleng
count = np.zeros(nbcod)
X = np.zeros(nbcod)
mn = np.ones(nbcod) / nbcod
for j, val in enumerate(c):
for i, cod in enumerate(codons[amin]):
if val == cod:
count[i] += 1
c.pop(j)
break
lengsubseq = count.sum() # replace subSlength
if by == 'frequency':
E = count / lengsubseq
valH[pos:pos + nbcod] = E
pos += nbcod
elif by == "entropy":
Yg = multinomial.pmf(x=count, n=lengsubseq, p=mn)
# efor part
div, i = divmod(lengsubseq, nbcod)
X[:int(i)] = np.ceil(div) + 1
X[int(i):] = np.floor(div)
Eg = multinomial.pmf(x=X, n=lengsubseq, p=mn)
# end here
valH[k] = -np.log(Yg / Eg) / lengsubseq if normalized else -np.log(Yg / Eg)
print "missed codons: "+str(len(c))
return valH, GCcount
"""
location = 'ftp.ensemblgenomes.org' if kingdom != 'vertebrate' else 'ftp.ensembl.org'
release = 'release-40/' if kingdom != 'vertebrate' else 'release-93'
ftp = FTP(location, timeout=3000)
ftp.login()
if kingdom == 'vertebrate':
kingdom = ''
ftp.cwd('pub/' + release + kingdom + '/fasta/')
data = []
ftp.retrlines('NLST', data.append)
species_namelist = self.species.keys()
for i, d in enumerate(data):
print "\rparsed " + str(i) + " over " + str(len(data)),
ftp.cwd(d)
if d[-10:] == 'collection':
subdata = []
ftp.retrlines('NLST', subdata.append)
for sub in subdata:
if sub in species_namelist and self.species[sub].fullentropy is None:
link = []
ftp.cwd(sub + '/' + seq)
ftp.retrlines('NLST', link.append)
with open("utils/data/temp.fa.gz", "wb") as file:
for i in link:
if i[-9:] == "all.fa.gz":
ftp.retrbinary("RETR " + i, file.write)
with gzip.open("utils/data/temp.fa.gz", "rt") as handle:
val, gccount = _compute_full_entropy(handle, by, avg)
self.species[sub].fullentropy = val.mean(0) if avg else val
self.species[sub].fullvarentropy = (val.var(0)**(0.5)).mean() if avg else None
self.species[sub].fullGCcount = gccount.mean() if avg else gccount
self.species[sub].varGCcount = gccount.var()**(0.5) if avg else None
print "done " + sub
ftp.cwd('../..')
os.remove("utils/data/temp.fa.gz")
else:
if d in species_namelist and self.species[d].fullentropy is None:
link = []
ftp.cwd(seq)
ftp.retrlines('NLST', link.append)
with open("utils/data/temp.fa.gz", "wb") as file:
for i in link:
if i[-9:] == "all.fa.gz":
ftp.retrbinary("RETR " + i, file.write)
break
with gzip.open("utils/data/temp.fa.gz", "rt") as handle:
val, gccount = _compute_full_entropy(handle, by, avg)
self.species[d].fullentropy = val.mean(0) if avg else val
self.species[d].fullvarentropy = (val.var(0)**(0.5)).mean() if avg else None
self.species[d].fullGCcount = gccount.mean() if avg else gccount
self.species[d].varGCcount = gccount.var()**(0.5) if avg else None
print "done " + d
ftp.cwd('..')
os.remove("utils/data/temp.fa.gz")
ftp.cwd('..')
def get_taxons(self):
"""
find the taxons of each referenced species (see PyCUB.Espece.gettaxons())
"""
for key, val in self.species.iteritems():
try:
val.gettaxons()
except:
print key + " has no referenced taxons"
print "got taxons"
def get_evolutionary_distance(self, display_tree=False, size=40):
"""
uses metadata of the ancestry tree and computes a theoretical evolutionary distance matrix between each species
can optionaly take any hierarchical evolutionary file between a group of species
will populate utils.phylo_distances with a pandas.df of the phylodistance
and meandist with the average distance amongst species in the df, species are referenced
by their taxon ids. you have to have taxons in your species. will also plot the distance matrix
Args:
display_tree: bool to true to print the phylogenetic tree as a txt
(may be quite big)
size: int the x size of the plot
Raises:
EnvironmentError: "you need to have R installed to compute the distance"
"""
ncbi = NCBITaxa()
taxons = []
for key, val in self.species.iteritems():
if val.taxonid is not None and val.taxonid != '':
taxons.append(val.taxonid)
tree = ncbi.get_topology(taxons) # taxons
# finding what this tree looks like
if display_tree:
print tree.get_ascii(attributes=["sci_name", "rank"])
with open('utils/meta/metaphylo/temp_tree.phy', 'w') as f: # maybe will be newick format...
f.write(tree.write())
# """
try:
# https://stackoverflow.com/questions/19894365/running-r-script-from-python
base = importr('base')
utiles = importr('utils')
except:
print EnvironmentError("you need to have R installed to compute the distance")
return
if not rpackages.isinstalled('treeio'):
robjects.r('''
source("https://bioconductor.org/biocLite.R")
biocLite("treeio")
''')
robjects.r('''
treeText <- readLines("utils/meta/metaphylo/temp_tree.phy")
treeText <- paste0(treeText, collapse="")
library(treeio)
tree <- read.tree(text = treeText) ## load tree
distMat <- cophenetic(tree)
write.table(distMat,"utils/meta/metaphylo/phylodistMat_temp.csv")
''')
# """
df = pd.read_csv("utils/meta/metaphylo/phylodistMat_temp.csv", delim_whitespace=True)
dcol = {}
dind = {}
for name, species in self.species.iteritems():
if species.taxonid:
dind.update({int(species.taxonid): name})
dcol.update({unicode(species.taxonid): name})
df = df.rename(index=dind, columns=dcol)
utils.phylo_distances = df
utils.meandist = df.sum().sum() / (len(df)**2 - len(df))
self.plot_distances(size=size)
def createRefCAI(self, speciestocompare='saccharomyces_cerevisiae', kingdom='fungi', first=20):
"""
do a compute CAI
where we get Tobias' data to find highly expressed genes and
use them to compute codon frequency for the reference set and use it to compute
the CAI and mean CAI for each ho
mology.
Args:
speciestocompare: str the name of the species to retrieve the genes from
kingdom: str the kingdom where we can find it
first: the number of highly expressed genes to retrieve
"""
if kingdom != 'fungi':
print "if kingdom is not fungi, need to provide another file"
return
data = pd.read_csv("utils/meta/protdata/tob_currated.csv")
homonames = data["ORF"].values
expres = data["Protein Abundance (molecules per cell)"].values
ind = expres.argsort()
highlyexpressed = [homonames[i] for i in ind[:first]]
location = 'ftp.ensemblgenomes.org' if kingdom != 'vertebrate' else 'ftp.ensembl.org'
release = 'release-40/' if kingdom != 'vertebrate' else 'release-93'
ftp = FTP(location)
ftp.login()
if kingdom == 'vertebrate':
kingdom = ''
ftp.cwd('pub/' + release + kingdom + '/fasta/')
data = []
ftp.retrlines('NLST', data.append)
for d in data:
if d == speciestocompare:
ftp.cwd(d)
link = []
ftp.cwd('cds')
ftp.retrlines('NLST', link.append)
with open("utils/data/temp.fa.gz", "wb") as file:
for i in link:
if i[-9:] == "all.fa.gz":
ftp.retrbinary("RETR " + i, file.write)
codseq = []
with gzip.open("utils/data/temp.fa.gz", "rt") as handle:
for record in SeqIO.parse(handle, "fasta"):
if record.id in highlyexpressed:
codseq.extend([record.seq._data[i:i + 3] for i in range(0, len(record.seq._data), 3)])
os.remove("utils/data/temp.fa.gz")
return utils.reference_index(codseq, forcai=True)
def speciestable(self):
"""
a copy of the utils.speciestable
Returns:
a copy of the utils.speciestable (dict[int,str] of species to their PyCUB coded value
"""
return dict(utils.speciestable) if utils.speciestable is not None else None
def phylo_distances(self):
"""
a copy of the phylodistances dataframe see (get_evolutionary_distance())
Returns:
a copy of the phylodistances dataframe see (get_evolutionary_distance())
"""
return utils.phylo_distances.copy() if utils.phylo_distances is not None else None
def compute_averages(self, homoset):
"""
compute the average entropy
Will add species related averages gotten from this homoset in the species
container, and in the homoset everytime you compute averages from a set, they will get erased !
Args:
homoset: PyCUB.homoset from which to compute the averages
"""
if homoset.homo_matrix is None:
homoset.loadfullhomo()
if homoset.hashomo_matrix is None:
homoset.loadhashomo()
# we get all CUB values pertaining to one specific species from
# the homoset with the full homo matrix
ind, counts = np.unique(homoset.homo_matrixnames, return_counts=True)
ind = homoset.homo_matrixnames.argsort()
GCmat = np.zeros((len(homoset.homo_namelist), len(homoset.species_namelist)))
for i, val in enumerate(homoset.homo_namelist):
GCmat[i, np.array(homoset[val].names)[np.invert(homoset[val].doub)]] = homoset[val].GCcount[np.invert(homoset[val].doub)]
GCmat = GCmat.sum(0) / np.count_nonzero(GCmat, 0)
for i, spece in enumerate(homoset.species_namelist):
self.species[spece].meanGChomo = GCmat[i]
pos = 0
speciestable = dict(utils.speciestable)
for i, un in enumerate(counts):
aslicetoavg = homoset.homo_matrix[ind[pos:pos + un]]
bslicetoavg = homoset.fulleng[ind[pos:pos + un]]
self.species[speciestable[i]].average_entropy = aslicetoavg.mean(axis=0)
self.species[speciestable[i]].average_size = bslicetoavg.sum(1).mean()
# variances are mean variances over all values
self.species[speciestable[i]].var_entropy = (aslicetoavg.var(axis=0)**(0.5)).mean()
pos += un
self.species[speciestable[i]].tot_homologies = un
print str(float(i) / len(speciestable)) + "% have entropy"
print "homology averages : " + str(homoset.homo_matrix.mean(axis=0))
homoset.averagehomo_matrix = np.array([homoset[homo].mean for homo in homoset.homo_namelist])
for _, val in homoset.iteritems():
val.compute_averages()
# CpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpTpTpCpCpGpApApTpApTpApTpTp
# GbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbAbGbGbCbTbTbAbTbAbTbAbAbAb
def compare_species(self, to="full", showvar=True, reducer='tsne', perplexity=40, eps=0.3, size=10, varmult=1):
"""
compare the species according to their mean CUB,
plot the mean CUB
to their full CUB, to their tRNA copy numbers, to the euclidean distance of their CUB
to the one of their phylogenetic matrix.
in this plot, the mean entropy value is plotted as a regular homology plot but each dot is a species
thus we can compare them together, moreover, the size of the dots informs oneself of the variance
in entropy per species. the color intensity informs on how much this is close to what is given by the
tRNA values. (additional information such as name of the species, number of tRNA values,metadata and the point
from its tRNA value is plotted when hovering the dot)
then we can also compare the mean of homologies or else, the full entropy of the cdna sequence per species
is also computed the euclidean distance amongst the species for the full entropy to see if a difference can be linked
to some evolutionary for one codon
Args:
size: the average size of the datapoints in the pointcloud representation of this dataset
showvar: bool to true, show the mean variance in CUB values accros this homology as a variation in dot sizes
eps: float the hyperparamter of the clustering algorithm applied to this dataset
reducer: str the reducer to use 'tsne' or 'PCA'
perplexity: int the perplexity hyperparam for tSNE
Raises:
UnboundLocalError: "you need to compute the averages of the all_homoset. use PyCUB.compute_averages(homoset)"
UnboundLocalError: "you have to compute tRNA values, use PyCUB.get_tRNAcopy()"
AttributeError: "try avg or full"
"""
e_subspecies = np.zeros((len(self.species), utils.CUBD))
vare_subspecies = np.zeros((len(self.species), 1))
tRNAentropydist = np.zeros(len(self.species))
tRNA_number = np.zeros((len(self.species), 1), dtype=int)
genome_size = np.zeros((len(self.species), 1), dtype=int)
efulldiff = np.zeros((len(self.species), 1))
fullvarentropy = np.zeros((len(self.species), 1))
varGCcount = np.zeros((len(self.species), 1))
gcfulldiff = np.zeros((len(self.species), 1))
num_genes = np.zeros((len(self.species), 1), dtype=int)
meanGChomo = np.zeros((len(self.species), 1))
suff = 0
phylo_distances = self.phylo_distances()
if self.species.values()[0].average_entropy is None:
raise UnboundLocalError("you have to compute averages, use PyCUB.compute_averages(homoset)")
for i, (name, specie) in enumerate(self.species.iteritems()):
if specie.average_entropy is not None:
if np.isnan(specie.average_entropy).any():
specie.average_entropy[np.isnan(specie.average_entropy)] = 0
e_subspecies[i] = specie.average_entropy
vare_subspecies[i] = specie.var_entropy
meanGChomo[i] = specie.meanGChomo
if specie.fullentropy is not None:
if np.isnan(specie.fullentropy).any():
specie.fullentropy[np.isnan(specie.fullentropy)] = 0
if np.isinf(specie.fullentropy).any():
specie.average_entropy[np.isinf(specie.average_entropy)] = 0
if specie.average_entropy is not None:
efulldiff[i] = euclidean(specie.average_entropy, specie.fullentropy)
gcfulldiff[i] = euclidean(specie.meanGChomo, specie.fullGCcount)
varGCcount[i] = specie.varGCcount
fullvarentropy[i] = specie.fullvarentropy
if specie.copynumbers is not None:
if specie.tRNAentropy is not None:
if to == 'full' and specie.fullentropy is not None:
tRNAentropydist[i] = euclidean(specie.tRNAentropy, specie.fullentropy)
elif to == 'avg' and specie.average_entropy is not None:
tRNAentropydist[i] = euclidean(specie.tRNAentropy, specie.average_entropy)
suff += 1
# if is zero, will be black colored
if specie.copynumbers.get("datapoints", False):
tRNA_number[i] = specie.copynumbers["datapoints"]
# if is zero, will be black colored
else:
raise UnboundLocalError("you have to compute tRNA values, use PyCUB.get_tRNAcopy()")
if specie.genome_size:
genome_size[i] = specie.genome_size
if specie.num_genes:
num_genes[i] = specie.num_genes
# if is zero, will be black colored
print "we have " + str(suff) + " species with sufficient statistics in their tRNA values"
if reducer == 'tsne':
red = man.TSNE(n_components=2, perplexity=perplexity).fit_transform(e_subspecies)
elif reducer == 'pca':
red = PCA(n_components=2).fit_transform(e_subspecies)
alg = cluster.DBSCAN(eps=eps, min_samples=6, algorithm='auto', n_jobs=-1)
clusters = alg.fit_predict(e_subspecies).tolist()
colormap = list(utils.colormap)
colors = [colormap[0] if not dist else
utils.rgb2hex((126, 8, 10 + np.floor(246 * dist / tRNAentropydist.max()))) for dist in np.log(np.ma.masked_equal(tRNAentropydist, 0))]
data = dict(x=red[:, 0], y=red[:, 1],
species=self.species.keys(),
meanentropy=["%.2f" % i.mean() for i in e_subspecies],
color=colors,
vare_subspecies=vare_subspecies,
tRNA_number=np.log(tRNA_number),
recent=colors,
tRNAentropydist=np.log(np.ma.masked_equal(tRNAentropydist, 0) * 1000),
genome_size=genome_size,
num_genes=num_genes,
efulldiff=efulldiff,
gcfulldiff=gcfulldiff,
gccount=meanGChomo,
varGCcount=varGCcount,
fullvarentropy=fullvarentropy,
clusters=clusters,
size=[(size / 2) + (varmult * (val.var_entropy)) if val.var_entropy != 0 else size
for _, val in self.species.iteritems()] if showvar else size)
# compare it to the phylogenetic distance matrix and the sizes
if phylo_distances is not None:
names = list(phylo_distances.index)
distances = np.zeros(len(self.species))
for i, val in enumerate(self.species.keys()):
if val in names:
distances[i] = phylo_distances[val].values.mean()
data.update({'distances': distances})
else:
print "no phylo distances"
labe = ["show clusters", "show num_genes", "show genome_size",
"show full/mean CUB diff", "show GCcount", "show full/mean GC diff",
"show GC variance", "show distance to tRNA UB", "show tRNA number", "show avg phylodistance",
"show full phylo distance"] # 11
for i, (key, val) in enumerate(self.species[self.species.keys()[0]].metadata.iteritems()):
labe.append("show if " + key)
data.update({str(i + 11): [espe.metadata[key] if espe.metadata is not None else False for _, espe in self.species.iteritems()]})
"""
to implement full phylo distance, we need to add onHover, and put in data a dict which associate
for each species name, a species ordered list of normalized distance values from the phylodistance
dataframe
"""
source = ColumnDataSource(data=data)
output_notebook()
callback = CustomJS(args=dict(source=source), code=utils.callback_allgenes)
radio_button_group = widgets.RadioButtonGroup(
labels=labe, callback=callback, active=7)
hover = HoverTool(tooltips=[("species: ", "@species"), ("mean entr: ", "@meanentropy"),
("phylodistances: ", "@distances"), ("tRNA CN: ", "@tRNA_number"),
("dist2tRNA Bias Value: ", "@tRNAentropydist"), ("GCbias: ", "@gccount")])
p = figure(title="exploration of every homologies",
tools=[hover, WheelZoomTool(), PanTool(), SaveTool(), ResetTool()],
plot_width=800, plot_height=600)
p.circle(x='x', y='y', source=source, color='color', size='size')
save(column(radio_button_group, p), "utils/templot/species_compare.html")
show(column(radio_button_group, p))
def compute_ages(self, homoset, preserved=True, minpreserv=0.9, minsimi=0.85, redo=False):
homoset.compute_ages(preserved=preserved, minpreserv=minpreserv, minsimi=minsimi, redo=redo)
def regress_on_species(self, without=[""], full=True, onlyhomo=False, perctrain=0.8, algo="lasso",
eps=0.001, n_alphas=100):
"""
Will fit a regression curve on the CUB values of the different species according to the metadatas available for each of them.
It will try to see if there is enough information in the metadata to retrieve CUB values. and if there is,
how much for each metadata (if we constraint the number of regressors) is it better for mean homology CUB
or full genome CUB ?
or raw frequency, should we remove some data?
Args:
without: list[str] of flags [similarity_scores, KaKs_Scores, nans, lenmat, GCcount, weight,
protein_abundance, mRNA_abundance, decay_rate, cys_elements, tot_volume, mean_hydrophobicity,
glucose_cost, synthesis_steps, is_recent, meanecai]
onlyhomo: bool to true if want to use only CUB from homologies
full: bool flags to true to use full CUB values or meanCUB values, as regressee
perctrain: the percentage of training set to total set ( the rest is used as test set)
algo: str flag to lasso or nn to use either Lasso with Cross Validation, or a 2 layer neural net
eps: the eps value for the Lasso
n_alphas: the number of alphas for the lasso
Returns:
scoregenes: float, the score of the regression performed
coeffgenes: the coefficient applied to each category (for each CUB value if using full)
attrlist: the corresponding list[str] of attribute used
Raises:
UnboundLocalError: "wrong params"
"""
params = []
phylo_distances = self.phylo_distances()
espece = self.species.values()[0]
attrlist = ["average_size", "num_genes", "genome_size", "fullGCcount",
"varGCcount", "tot_homologies"]
dataset = np.zeros((len(self.species), utils.CUBD))
for i, (_, v) in enumerate(self.species.iteritems()):
if onlyhomo or v.fullentropy is None:
if v.average_entropy is not None:
dataset[i] = v.average_entropy
else:
dataset[i] = v.fullentropy
if not full:
dataset = dataset.mean(1)
for attr in attrlist:
if getattr(espece, attr) is not None and attr not in without:
arr = np.array([getattr(spece, attr) for spece in self.species.values()]).astype(float)
arr = arr / arr.max()
if not full:
print attr + ': ' + str(spearmanr(np.ma.masked_equal(arr, 0), np.ma.masked_equal(dataset, 0), axis=None))
params.append(arr)
for key in self.species.values()[0].metadata.keys():
if str(key) not in without:
attrlist.append(key)
params.append(np.array([spece.metadata.get(key, 0) if spece.metadata is not None else False
for spece in self.species.values()]).astype(int))
if phylo_distances is not None and "phylo_distances" not in without:
attrlist.append("phylo_distances")
names = list(phylo_distances.index)
arr = np.array([phylo_distances[val].values.mean() if val in names else 0 for val in self.species.keys()])
arr = arr / arr.max()
params.append(arr)
if not full:
print 'phylogenetic distances: ' + str(spearmanr(np.ma.masked_equal(arr, 0), np.ma.masked_equal(dataset, 0), axis=None))
if algo == "lasso":
# http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LassoCV.html
model = MultiTaskLassoCV(eps=eps, n_alphas=n_alphas,
alphas=None, fit_intercept=True, normalize=False,
max_iter=1000, tol=0.0001, copy_X=False, cv=None,
verbose=False, n_jobs=1, random_state=None, selection='cyclic') \
if full else LassoCV(eps=eps, n_alphas=n_alphas,
alphas=None, fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=0.0001, copy_X=False, cv=None, verbose=False, n_jobs=-1,
positive=False, random_state=None, selection='cyclic')
elif algo == "nn" and not full:
# http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Perceptron.html
model = MLPRegressor(hidden_layer_sizes=(len(attrlist), len(attrlist)), activation='relu', solver='adam', alpha=0.0001,
batch_size='auto', learning_rate='constant', learning_rate_init=0.001,
power_t=0.5, max_iter=200, shuffle=True, random_state=None, tol=0.0001,
verbose=1, warm_start=False, momentum=0.9, nesterovs_momentum=True,
early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
else:
raise UnboundLocalError("wrong params")
params = np.vstack(params).T
model.fit(params[:int(len(self.species) * perctrain)], dataset[:int(len(self.species) * perctrain)])
self.scorespecies = model.score(params[int(len(self.species) * perctrain):],
dataset[int(len(self.species) * perctrain):], sample_weight=None)
self.coeffspecies = model.coef_.tolist() if algo == "lasso" else model.coefs_
print "the R^2 score is of: " + str(self.scorespecies)
print "-------------------------------"
if algo == "lasso":
for i, val in enumerate(attrlist):
print val + ": " + str(self.coeffspecies[i])
return self.scorespecies, self.coeffspecies, attrlist
# TODO: create a model that can say if the species has a high CUB or low, given data on the species and on the protein
def compare_homologies(self, homoset, homosapiens=False, mindistance=10, preserved=True, size=6, varsize=2,
minpreserv=0.9, minsimi=0.9, showvar=True, eps=0.28, reducer='tsne', perplexity=40):
"""
finds for species with a common ancester separated by a pseudo phylogenetic distance X,
genes/functions that are novel to only a subset.
plot the two with their differences the differences between the two
also considers an homology as highly preserved if it is shared amongst most of the species
and if the average similarity score is high amongst this homology.
also shows if there is a relationship between the number of amino acids the sequence does not
encode for and the codon usage bias
We could have used the sequence dating of ensembl but it only works for homo sapiens for now
maybe use it for homosapiens later
Args:
homosapiens: bool to true if we should use homosapiens dataset on gene dates
mindistance: int the minimal phylogenetic distance between in average in this homology to consider it
highly conserved
preserved: bool to true if we should find highly preserved genes or not
size: the average size of the datapoints in the pointcloud representation of this dataset
minpreserv: float minimal percentage of homologous species that have this homology
minsimi: float minimal avg similarity between genes to consider them highly preserved
showvar: bool to true, show the mean variance in CUB values accros this homology as a variation in dot sizes
eps: float the hyperparamter of the clustering algorithm applied to this dataset
homoset: PyCUB.homoset the homoset to use
reducer: str the reducer to use 'tsne' or 'PCA'
perplexity: int the perplexity hyperparam for tSNE
Raises:
UnboundLocalError: "you need to compute the averages of the all_homoset. use PyCUB.compute_averages(homoset)"
"""
if not homosapiens:
if homoset[-1].isrecent is None:
homoset.compute_ages(preserved=preserved, minpreserv=minpreserv, minsimi=minsimi)
else:
pass
# TODO: code the version for homo sapiens where we know exactly this distance with more data
# and better inference metrics
# display the differences between recent homologies and older ones
pdb.set_trace()
averagehomo_matrix = np.zeros((len(homoset), utils.CUBD))
for i, homo in enumerate(homoset.homo_namelist):
averagehomo_matrix[i] = homoset[homo].mean
if reducer == 'tsne':
red = man.TSNE(n_components=2, perplexity=perplexity).fit_transform(averagehomo_matrix)
elif reducer == 'pca':
red = PCA(n_components=2).fit_transform(averagehomo_matrix)
else:
raise AttributeError("wrong algorithm")
alg = cluster.DBSCAN(eps=eps, min_samples=7, algorithm='auto', n_jobs=-1)
clusters = alg.fit_predict(averagehomo_matrix).tolist()
n_clusters_ = len(set(clusters))
if n_clusters_ > 10:
print "ooups you have more than 10 clusters"
colormap = list(utils.colormap)
colors = [colormap[int(homoset[homo].ishighpreserved)] if not homoset[homo].isrecent else
utils.rgb2hex((126, 88, np.floor(156 * homoset[homo].isrecent))) for homo in homoset.homo_namelist]
data = dict(x=red[:, 0], y=red[:, 1],
homologies=homoset.homo_namelist,
meanentropy=["%.2f" % averagehomo_matrix[i].mean()
for i in range(len(averagehomo_matrix))],
color=colors,
recent=colors,
clusters=clusters,
size=[size + (varsize * self.all_homoset[homo].var.mean()) if self.all_homoset[homo].var is not None
else size for homo in self.all_homoset.homo_namelist] if showvar else size)
# add average of similar protein name
values = ["similarity_scores", "KaKs_Scores", "nans", "lenmat", "GCcount", "weight",
"protein_abundance", "mRNA_abundance", "decay_rate", "is_secreted", "cys_elements",
"tot_volume", "mean_hydrophobicity", "glucose_cost", "synthesis_steps", "isoelectricpoint", "meanecai", "meancai", "conservation"]
labe = ["show Recent/preserved", "showclusters", "show avg similarity_scores", "show avg KaKs_Scores", "show Nans avg",
"show avg Length", "show avg GCcount", "Show weight", "Show prot abundance", "Show mRNA abundance",
"Show half life", "Show secreted", "Show num of cys", "Show volume", "Show hydrophobicity", "show cost (glucose)",
"Show synthesis cost", "Show Pi", "Show ECAI", "Show CAI", "show amino Conservation"] # 21
templabe = labe[:2]
i = 2
for val in values[:5]:
if getattr(homoset[0], val) is not None:
data.update({val: np.array([getattr(homoset[homo], val).mean() for homo in homoset.homo_namelist])})
templabe.append(labe[i])
else:
templabe.append(" ")
i += 1
for val in values[5:]:
data.update({val: np.nan_to_num(np.array([getattr(homoset[homo], val) if getattr(homoset[homo], val) is not None else 0 for homo in homoset.homo_namelist]))})
templabe.append(labe[i])
i += 1
for k, v in data.iteritems():
if k == "conservation":
v = v - v.min()
elif k == "mRNA_abundance" or k == "protein_abundance":
v = np.log(1 + v)
source = ColumnDataSource(data=data)
output_notebook()
callback = CustomJS(args=dict(source=source), code=utils.callback_allhomo)
radio_button_group = widgets.RadioButtonGroup(
labels=templabe, callback=callback, active=0)
hover = HoverTool(tooltips=[("homologies: ", "@homologies"), ("avg nans: ", "@nans"), ("similarity scores: ", "@similarity_scores"),
("mRNA abundance: ", "@mRNA_abundance"), ("mean ecai: ", "@meanecai"), ("amino conservation: ", "@conservation"),
("mean_entr: ", "@meanentropy"), ("length: ", "@lengths"), ("GCcount: ", "@gc")])
p = figure(title="exploration of every homologies",
tools=[hover, WheelZoomTool(), PanTool(), SaveTool(), ResetTool()],
plot_width=800, plot_height=600)
p.circle(x='x', y='y', source=source, color='color',
size='size')
save(column(radio_button_group, p), "utils/templot/homology_compare.html")
show(column(radio_button_group, p))
def regress_on_genes(self, homoset, full=True, without=['meanecai', 'meancai'], perctrain=0.8, algo="lasso", eps=0.001, n_alphas=100):
"""
Will fit a regression curve on the CUB values of the different homologies according to the metadatas available for each of them.
It will try to see if there is enough information in the metadata to retrieve CUB values. and if there is,
how much for each metadata (if we constraint the number of regressors) is it better for entropy values, mean entropy
or ECAI values
or raw frequency, should we remove some data
Args:
without: list[str] of flags [similarity_scores, KaKs_Scores, nans, lenmat, GCcount, weight,
protein_abundance, mRNA_abundance, decay_rate, cys_elements, tot_volume, mean_hydrophobicity,
glucose_cost, synthesis_steps, is_recent, meanecai]
full: bool flags to true to use full CUB values or meanCUB values, as regressee
homoset: PyCUB.homoset the homoset to use
perctrain: the percentage of training set to total set ( the rest is used as test set)
algo: str flag to lasso or nn to use either Lasso with Cross Validation, or a 2 layer neural net
eps: the eps value for the Lasso
n_alphas: the number of alphas for the lasso
Returns:
scoregenes: float, the score of the regression performed
coeffgenes: the coefficient applied to each category (for each CUB value if using full)
attrlist: the corresponding list[str] of attribute used
Raises:
UnboundLocalError: "wrong params"
"""
params = []
dataset = np.nan_to_num(homoset.averagehomo_matrix) if full else np.nan_to_num(homoset.averagehomo_matrix).mean(1)
values = ["similarity_scores", "KaKs_Scores", "nans", "lenmat", "GCcount", "weight",
"protein_abundance", "mRNA_abundance", "decay_rate", "is_secreted", "cys_elements",
"tot_volume", "mean_hydrophobicity", "glucose_cost", "synthesis_steps",
"isoelectricpoint", "meanecai", "meancai", "conservation"]
attrlist = []
pdb.set_trace()
for val in values[:5]:
if val not in without:
if getattr(homoset[0], val) is not None:
arr = np.nan_to_num(np.array([getattr(homoset[homo], val).mean() for homo in homoset.homo_namelist])).astype(float)
arr = arr / arr.max()
if not full:
print val + ': ' + str(spearmanr(np.ma.masked_equal(arr, 0), np.ma.masked_equal(dataset,0), axis=None))
params.append(arr)
attrlist.append(val)
for val in values[5:]:
if val not in without:
if getattr(homoset[0], val) is not None:
arr = np.nan_to_num(np.array([getattr(homoset[homo], val) for homo in homoset.homo_namelist])).astype(float)
arr = arr / arr.max()
if not full:
print val + ': ' + str(spearmanr(np.ma.masked_equal(arr, 0), np.ma.masked_equal(dataset,0), axis=None))
params.append(arr)
attrlist.append(val)
if algo == "lasso":
# http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LassoCV.html
print "change"
model = MultiTaskLassoCV(eps=eps, n_alphas=n_alphas,
alphas=None, fit_intercept=True, normalize=True,
max_iter=1000, tol=0.0001, copy_X=False, cv=None,
verbose=False, n_jobs=1, random_state=None, selection='cyclic')\
if full else LassoCV(eps=eps, n_alphas=n_alphas,
alphas=None, fit_intercept=True, normalize=True, precompute='auto',
max_iter=1000, tol=0.0001, copy_X=False, cv=None, verbose=False, n_jobs=-1,
positive=False, random_state=None, selection='cyclic')
elif algo == "nn" and not full:
# http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Perceptron.html
model = MLPRegressor(hidden_layer_sizes=(len(attrlist), len(attrlist)), activation='relu', solver='adam', alpha=0.0001,
batch_size='auto', learning_rate='constant', learning_rate_init=0.001,
power_t=0.5, max_iter=200, shuffle=True, random_state=None, tol=0.0001,
verbose=1, warm_start=False, momentum=0.9, nesterovs_momentum=True,
early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
else:
raise UnboundLocalError("wrong params")
params = np.vstack(params).T
model.fit(params[:int(len(homoset.homo_namelist) * perctrain)], dataset[:int(len(homoset.homo_namelist) * perctrain)])
self.scoregenes = model.score(params[int(len(homoset.homo_namelist) * perctrain):],
dataset[int(len(homoset.homo_namelist) * perctrain):], sample_weight=None)
self.coeffgenes = model.coef_.tolist() if algo == "lasso" else model.coefs_
print "the R^2 score is of: " + str(self.scoregenes)
print "-------------------------------"
if model == "lasso":
for i, val in enumerate(attrlist):
print val + ": " + str(self.coeffgenes[i])
return self.scoregenes, self.coeffgenes, attrlist
def getRelation2G3DD(self, species_name='saccharomyces_cerevisiae', kingdom='fungi',
intrachromosome="utils/meta/3Dmodel/interactions_HindIII_fdr0.01_intra_cerevisiae.csv",
interchromose=["utils/meta/3Dmodel/cerevisiae_inter1.csv",
"utils/meta/3Dmodel/cerevisiae_inter2.csv",
"utils/meta/3Dmodel/cerevisiae_inter3.csv",
"utils/meta/3Dmodel/cerevisiae_inter4.csv",
"utils/meta/3Dmodel/cerevisiae_inter5.csv"], bins=2000, seq='cds', use='diament2',
euclide=False, homomean=False):
"""
https://www.nature.com/articles/ncomms6876
retrieve the data for the species sacharomyces cerevisiae and Schizosaccharomyces pombe
and find if similarity distances of CUB using entropy between genes of this species is predictive
of closeness of genes in the nucleus.
Used to confirm a work on nature and see if we can have some similar results by only looking at the
CUB
Args:
species_name: str the name of the species to look for
kingdom: str the kingdom in which to find the species
intrachromosome: str the location of the csv interaction data for intrachromosome respecting the format
of the default file
interchromose: str the location of the csv interaction data for interchromose respecting the format
of the default file
bins: int, the number of bin to use (a power of 2)
seq: the type of sequence to compare to. (to compute the CUB from)
use: str flag different types of algorithm I have made trying to understand the thing
compute: str flag to different computation available
euclidean: bool flag to true to compute euclidean instead of Endres Shcidelin metrics
"""
# get gene distance matrix from entropy value distance or Andres Schindelin metrics
# compare to see how much the distance between one can explain the distance between another by
# regression
# retrieve the data.
intra = pd.read_csv(intrachromosome, delim_whitespace=True).drop(columns=["qvalue", "freq"])
inter = pd.concat([pd.read_csv(interchro) for interchro in interchromose]).drop(columns=[
"P value", "Q value", "sequence frequency"])
# getting all the genes
torname = {"HindIII fragment": "locus1",
"HindIII fragment.1": "locus2",
"chromosome": "chr1",
"chromosome.1": "chr2"}
inter = inter.rename(torname, axis="columns")
df = pd.concat([intra, inter])
df = df.sort_values(by=["chr1", "locus1"])
df = df.reset_index()
chrom2int = {
'I': 1,
'II': 2,
'III': 3,
'IV': 4,
'V': 5,
'VI': 6,
'VII': 7,
'VIII': 8,
'IX': 9,
'X': 10,
'XI': 11,
'XII': 12,
'XIII': 13,
'XIV': 14,
'XV': 15,
'XVI': 16,
'XVII': 17,
'XVIII': 18,
'XIX': 19,
'XX': 20,
'XXI': 21
}
location = 'ftp.ensemblgenomes.org' if kingdom != 'vertebrate' else 'ftp.ensembl.org'
release = 'release-40/' if kingdom != 'vertebrate' else 'release-93'
ftp = FTP(location)
ftp.login()
if kingdom == 'vertebrate':
kingdom = ''
ftp.cwd('pub/' + release + kingdom + '/fasta/')
data = []
ftp.retrlines('NLST', data.append)
for d in data:
if d in species_name:
ftp.cwd(d)
pdb.set_trace()
link = []
ftp.cwd(seq)
ftp.retrlines('NLST', link.append)
with open("utils/data/temp.fa.gz", "wb") as file:
for i in link:
if i[-9:] == "all.fa.gz":
ftp.retrbinary("RETR " + i, file.write)
vals = []
cufs = []
cubs = []
names = []
positions = []
nb = 0
codons = list(utils.codamino.keys())
with gzip.open("utils/data/temp.fa.gz", "rt") as handle:
for record in SeqIO.parse(handle, "fasta"):
uncounted = len(record.seq._data) - (record.seq._data.count('A') + record.seq._data.count('T') +
record.seq._data.count('C') + record.seq._data.count('G'))
if uncounted:
print "ref uncounted = " + str(uncounted)
codseq = record.seq._data.replace("Y", "T").replace("R", "G").replace("K", "G")\
.replace("M", "A").replace('S', 'C').replace("W", "A").replace("B", "C").replace("D", "T")\
.replace("H", "T").replace("V", "G").replace("N", "C")
else:
codseq = record.seq._data
codseq = [codseq[i:i + 3] for i in range(0, len(codseq), 3)]
leng = len(codseq)
CuF = np.zeros(64)
for i, val in enumerate(codons):
CuF[i] = codseq.count(val)
valH, CuB, len_i, _ = utils.computeyun(codseq, setnans=False, normalized=False,
by="entropy" + "frequency")
if self.all_homoset.datatype=="entropylocation":
valH = utils.getloc(valH, len_i)
if homomean:
if self.all_homoset.datatype!="frequency":
other = valH
else:
other = CuB
vals.append(self.all_homoset[record.id].mean if self.all_homoset.get(record.id, False) else other)
else:
vals.append(valH)
cufs.append((CuF / leng).tolist())
cubs.append(CuB)
nb += 1
server = "http://rest.ensemblgenomes.org" if kingdom != 'vertebrate' else "http://rest.ensembl.org"
names.append(record.id)
ext = "/lookup/id/" + record.id + "?expand=1"
r = requests.get(server + ext, headers={"Content-Type": "application/json"})
if not r.ok:
r.raise_for_status()
sys.exit()
print '{0} genes\r'.format(nb),
decoded = r.json()
chrom = chrom2int.get(str(decoded["seq_region_name"]), False)
if not chrom:
names.pop()
vals.pop()
cufs.pop()
cubs.pop()
continue
# we associate valH to a position retrieve the positions
positions.append([chrom, (decoded["start"] + decoded["end"]) / 2])
ind = sorted(range(len(positions)), key=lambda k: positions[k])
vals = np.array(vals)[ind]
cufs = np.array(cufs)[ind]
cubs = np.array(cubs)[ind]
positions = np.array(positions)[ind]
names = [names[i] for i in ind]
pdb.set_trace()
tempchrom = 0
tempind = 0
tempdf = df.loc[df['chr1'] == 1]
gene2pos = {}
pos2gene = {}
dists = np.zeros(len(positions))
tempdist = 1000000
for n, val in enumerate(positions):
if val[0] >= tempchrom + 1:
tempdf = df.loc[df['chr1'] == val[0]]
tempchrom = val[0]
tempind = tempdf.index[0]
maxind = tempdf.index[-1]
# Here we could use a modified binar search instead
while abs(tempdf['locus1'][tempind] - val[1]) <= tempdist:
tempdist = abs(tempdf['locus1'][tempind] - val[1])
tempind += 1
if tempind >= maxind:
break
dists[n] = tempdist
tempind -= 1
# we found a position
tempdist = 10000000
gene2pos.update({n: [tempchrom, tempdf['locus1'][tempind]]})
if pos2gene.get((tempchrom, tempdf['locus1'][tempind]), False):
pos2gene[(tempchrom, tempdf['locus1'][tempind])].append(n)
else:
pos2gene.update({(tempchrom, tempdf['locus1'][tempind]): [n]})
# for each gene positions we look at the closest point in the contact map (list of positison)
# we create a mapping dict for that.
tempchrom = 0
print "average distance is" + str(dists.mean())
missedrelation = 0
tempdf = df.loc[df['chr1'] == 1]
dist3D = np.zeros((len(positions), len(positions)), dtype=int)
dist3D += 1000000
np.fill_diagonal(dist3D, 0)
# Doing the first one for efficiency
tempdf = df.loc[df['chr1'] == 1]
relatedto = tempdf.loc[tempdf['locus1'] == gene2pos[0][1]]
chro = list(relatedto["chr2"])
loc = list(relatedto["locus2"])
for i in range(len(chro)):
pos = pos2gene.get((chro[i], int(loc[i])), False)
if pos:
for p in pos:
dist3D[p, 0] = 1
dist3D[0, p] = 1
for p_ in pos:
dist3D[p, p_] = 1
dist3D[p_, p] = 1
else:
missedrelation += 1
n = 1
for val in positions[1:]:
dist3D[n, n - 1] = 1
dist3D[n - 1, n] = 1
if val[0] >= tempchrom + 1:
tempdf = df.loc[df['chr1'] == val[0]]
relatedto = tempdf.loc[tempdf['locus1'] == gene2pos[n][1]]
chro = list(relatedto["chr2"])
loc = list(relatedto["locus2"])
for i in range(len(chro)):
pos = pos2gene.get((chro[i], int(loc[i])), False)
if pos:
for p in pos:
dist3D[p, n] = 1
dist3D[n, p] = 1
for p_ in pos:
dist3D[p, p_] = 1
dist3D[p_, p] = 1
else:
missedrelation += 1
n = 1
for val in positions[1:]:
dist3D[n, n - 1] = 1
dist3D[n - 1, n] = 1
if val[0] >= tempchrom + 1:
tempdf = df.loc[df['chr1'] == val[0]]
relatedto = tempdf.loc[tempdf['locus1'] == gene2pos[n][1]]
chro = list(relatedto["chr2"])
loc = list(relatedto["locus2"])
for i in range(len(chro)):
pos = pos2gene.get((chro[i], int(loc[i])), False)
if pos:
for p in pos:
dist3D[p, n] = 1
dist3D[n, p] = 1
else:
missedrelation += 1
n += 1
print "got " + str(missedrelation) + " missed relation"
else:
gene2pos = {}
pos2gene = {}
dists = np.zeros(len(positions))
tempdist = 1000000
for n, val in enumerate(positions):
if val[0] >= tempchrom + 1:
tempdf = df.loc[df['chr1'] == val[0]]
tempchrom = val[0]
tempind = tempdf.index[0]
maxind = tempdf.index[-1]
# Here we could use a modified binar search instead
while abs(tempdf['locus1'][tempind] - val[1]) <= tempdist:
tempdist = abs(tempdf['locus1'][tempind] - val[1])
tempind += 1
if tempind >= maxind:
break
dists[n] = tempdist
tempind -= 1
# we found a position
tempdist = 10000000
gene2pos.update({n: [tempchrom, tempdf['locus1'][tempind]]})
if pos2gene.get((tempchrom, tempdf['locus1'][tempind]), False):
pos2gene[(tempchrom, tempdf['locus1'][tempind])].append(n)
else:
pos2gene.update({(tempchrom, tempdf['locus1'][tempind]): [n]})
# for each gene positions we look at the closest point in the contact map (list of positison)
# we create a mapping dict for that.
tempchrom = 0
print "average distance is" + str(dists.mean())
missedrelation = 0
tempdf = df.loc[df['chr1'] == 1]
dist3D = np.zeros((len(positions), len(positions)), dtype=int)
dist3D += 1000000
np.fill_diagonal(dist3D, 0)
# Doing the first one for efficiency
tempdf = df.loc[df['chr1'] == 1]
relatedto = tempdf.loc[tempdf['locus1'] == gene2pos[0][1]]
chro = list(relatedto["chr2"])
loc = list(relatedto["locus2"])
for i in range(len(chro)):
pos = pos2gene.get((chro[i], int(loc[i])), False)
if pos:
for p in pos:
dist3D[p, 0] = 1
dist3D[0, p] = 1
else:
missedrelation += 1
n = 1
for val in positions[1:]:
dist3D[n, n - 1] = 1
dist3D[n - 1, n] = 1
if val[0] >= tempchrom + 1:
tempdf = df.loc[df['chr1'] == val[0]]
relatedto = tempdf.loc[tempdf['locus1'] == gene2pos[n][1]]
chro = list(relatedto["chr2"])
loc = list(relatedto["locus2"])
for i in range(len(chro)):
pos = pos2gene.get((chro[i], int(loc[i])), False)
if pos:
for p in pos:
dist3D[p, n] = 1
dist3D[n, p] = 1
else:
missedrelation += 1
n += 1
print "got " + str(missedrelation) + " missed relation"
shortestpath = dijkstra(dist3D, directed=False) # to set shortest as 1
# for each genes, we look if there is a contact gene in the contact map with the mapping
# if the genes are the ones next to each other, we mark them as close to each other
# true also if on different chrom
# then we compute 1hop distances on this matrix
#
# if tempchrom + 1 == val['chr1']
# val['chr1']
# for all values
# we take n subsets for which we compute the average CUB,
# then for each we compute a CUB distance value as a big distance
# matrix of size n x n n should be 32 000
# we create another distance matrix using an Andres distance metrics on the CUF values
if use != "jerem1":
distcuf = np.zeros((len(positions), len(positions)), dtype=float)
distcub = np.zeros((len(positions), len(positions)), dtype=float)
distent = np.zeros((len(positions), len(positions)), dtype=float)
# cubs = np.ma.masked_equal(cubs, 0)
j = 0
for val in vals:
i = 0
for comp in vals:
if i < j:
distent[j, i] = distent[i, j]
# distcub[j, i] = distcub[i, j]
# distcuf[j, i] = distcuf[i, j]
elif i > j:
distent[j, i] = euclidean(val, comp) if euclide else utils.endresdistance(val, comp)
# distcub[j, i] = euclidean(cubs[j], cubs[i]) if euclide else utils.endresdistance(cubs[j], cubs[i])
# distcuf[j, i] = euclidean(cufs[j], cufs[i]) if euclide else utils.endresdistance(cufs[j], cufs[i])
i += 1
j += 1
print '\rdistcomputation ' + str(j) + ' over ' + str(len(vals)),
if use == "diament1":
div, i = divmod(len(names), bins)
X = np.zeros(bins)
cuf = np.zeros((bins, bins))
cub = np.zeros((bins, bins))
ent = np.zeros((bins, bins))
dist3D = np.zeros((bins, bins), dtype=float)
X[:int(i)] = np.ceil(div) + 1
X[int(i):] = np.floor(div)
start = 0
for num, val in enumerate(X):
star = 0
for nu, va in enumerate(X):
dist3D[num, nu] = shortestpath[start:start + int(val), star:star + int(va)].mean()
cuf[num, nu] = distcuf[start:start + int(val), star:star + int(va)].mean()
cub[num, nu] = distcub[start:start + int(val), star:star + int(va)].mean()
ent[num, nu] = distent[start:start + int(val), star:star + int(va)].mean()
star += int(va)
start += int(val)
print '\rbinninng ' + str(num),
del distcuf
del distcub
del distent
del shortestpath
self.rho_ent, self.pent = spearmanr(ent, dist3D, axis=None)
self.rho_cub, self.pcub = spearmanr(cub, dist3D, axis=None)
self.rho_cuf, self.pcuf = spearmanr(cuf, dist3D, axis=None)
elif use == "diament2":
div, i = divmod(len(names)**2, bins)
X = np.zeros(bins)
cuf = np.zeros(bins)
cub = np.zeros(bins)
ent = np.zeros(bins)
dist3D = np.zeros(bins)
X[:int(i)] = np.ceil(div) + 1
X[int(i):] = np.floor(div)
distent = np.ravel(distent)
shortestpath = np.ravel(shortestpath)
#distcub = np.ravel(distcub)
#distcuf = np.ravel(distcuf)
# for CUF
ind = np.argsort(distent)
distent = distent[ind]
sortshortestpath = shortestpath[ind]
start = 0
for i, val in enumerate(X):
dist3D[i] = sortshortestpath[start:start + int(val)].mean()
ent[i] = distent[start:start + int(val)].mean()
start += int(val)
self.rho_ent, self.pent = spearmanr(ent, dist3D)
# for CUB
"""
ind = np.argsort(distcuf)
distcuf = distcuf[ind]
sortshortestpath = shortestpath[ind]
start = 0
for i, val in enumerate(X):
dist3D[i] = sortshortestpath[start:start + int(val)].mean()
cuf[i] = distcuf[start:start + int(val)].mean()
start += int(val)
self.rho_cuf, self.pcuf = spearmanr(cuf, dist3D)
# for CUF
ind = np.argsort(distcub)
distcub = distcub[ind]
sortshortestpath = shortestpath[ind]
start = 0
for i, val in enumerate(X):
dist3D[i] = sortshortestpath[start:start + int(val)].mean()
cub[i] = distcub[start:start + int(val)].mean()
start += int(val)
self.rho_cub, self.pcub = spearmanr(cub, dist3D)
"""
else:
# computation jerem 1st
div, i = divmod(len(names), bins)
X = np.zeros(bins)
cuf = np.zeros((bins, 64))
cub = np.zeros((bins, 59))
ent = np.zeros((bins, 18))
dist3D = np.zeros((bins, bins), dtype=float)
X[:int(i)] = np.ceil(div) + 1
X[int(i):] = np.floor(div)
start = 0
for num, val in enumerate(X):
ent[num] = vals[start:start + int(val)].mean(0)
cuf[num] = cufs[start:start + int(val)].mean(0)
cub[num] = cubs[start:start + int(val)].mean(0)
star = 0
for nu, va in enumerate(X):
dist3D[num, nu] = shortestpath[start:start + int(val), star:star + int(va)].mean()
star += int(va)
start += int(val)
print '\rbinninng ' + str(num),
del vals
del cufs
del shortestpath
distcuf = np.zeros((bins, bins), dtype=float)
distcub = np.zeros((bins, bins), dtype=float)
distent = np.zeros((bins, bins), dtype=float)
ent = np.ma.masked_equal(ent, 0)
cuf = np.ma.masked_equal(cuf, 0)
cub = np.ma.masked_equal(cub, 0)
j = 0
for val in ent:
i = 0
for comp in ent:
if i < j:
distent[j, i] = distent[i, j]
distcub[j, i] = distcub[i, j]
distcuf[j, i] = distcuf[i, j]
elif i > j:
distent[j, i] = euclidean(val, comp)
distcub[j, i] = utils.endresdistance(cub[j], cub[i])
distcuf[j, i] = utils.endresdistance(cuf[j], cuf[i])
i += 1
j += 1
print '\rdistcomputation ' + str(j),
self.rho_ent, self.pent = spearmanr(distent, dist3D, axis=None)
self.rho_cub, self.pcub = spearmanr(distcub, dist3D, axis=None)
self.rho_cuf, self.pcuf = spearmanr(distcuf, dist3D, axis=None)
return self.rho_ent, self.pent, self.rho_cuf, self.pcuf, self.rho_cub, self.pcub
def search(value, arr):
"""
modified binary search for closest value search in a sorted array
<NAME> @https://stackoverflow.com/questions/30245166
Args:
value: a value to look for
arr: array like, an array to skim
Returns:
the closest value present in the array
"""
if value < arr[0]:
return arr[0]
if value > arr[-1]:
return arr[-1]
lo = 0
hi = len(arr) - 1
while lo <= hi:
mid = (hi + lo) / 2
if value < arr[mid]:
hi = mid - 1
elif value > arr[mid]:
lo = mid + 1
else:
return arr[mid]
return arr[lo] if arr[lo] - value < value - arr[hi] else arr[hi]
# we then do a correlation to the two matrices and see if they are highly correlated
# we will use the spearman's rho for each bins
def plot_distances(self, size=40):
"""
plot the phylogenetic distance matrix
Args:
size: int the x size of the plot
Raises:
UnboundLocalError: "compute the phylo distance matrix first (look at the doc)"
"""
if utils.phylo_distances is not None:
plt.figure(figsize=(size, 200))
plt.title('evolutionary distances')
plt.imshow(1. / (1 + np.array(utils.phylo_distances)))
plt.savefig("utils/templot/evolutionarydistances.pdf")
plt.show()
else:
raise UnboundLocalError("compute the phylo distance matrix first (look at the doc)")
# SPECIAL FUNCTION
def _dictify(self, save_workspace, save_homo, add_homosets):
"""
Used by the saving function. transform the workspace object into a dictionary that can be json serializable
adding some params because else the object may be too big
Args:
save_workspace: bool to save working_homoset
save_homo: bool to save all_homoset
add_homosets: PyCUB.homoset instances to add to this dict
Return:
A dict holding every element to be jsonized
"""
dictispecies = {}
for key, val in self.species.iteritems():
dictispecies.update({key: val._dictify()})
di = {"species": dictispecies,
"all_homoset": self.all_homoset._dictify(savehomos=True) if save_homo and
(self.all_homoset is not None) else None,
"working_homoset": self.working_homoset._dictify() if save_workspace and
(self.working_homoset is not None) else None
}
for key, val in add_homosets:
di.update({key: val})
return di
def _undictify(self, data):
"""
same function but to retransform everything
Here we don't use other classes undictify functions but we just recreate them by passing it
to their init methods which is clearer.
Args:
data: dict to undictify into the workspace object
Returns:
Other PyCUB.homosets that would have been saved as add_homosets
"""
self.species = {}
self._is_saved = False
ret = {}
if data["all_homoset"] is not None:
self.all_homoset = hset.HomoSet(data=data["all_homoset"])
for key, val in data.iteritems():
if key == 'species':
for ke, va in val.iteritems():
self.species.update({ke: spe.Espece(data=va)})
elif key == "working_homoset":
if val is not None:
self.working_homoset = hset.HomoSet(data=val)
for v in self.working_homoset.homo_namelist: # as we don't save the homologies twice here
self.working_homoset.update({v: self.all_homoset[v]})
elif key == "all_homoset":
pass
else:
ret.update({key: val})
return ret
def loadspeciestable(self):
"""
short function to retrieve the speciestable from Disk
Raises:
IOError: "no speciestable file"
"""
filename = "utils/meta/savings/speciestable.json"
if not os.path.isfile(filename):
raise IOError("no speciestable file")
with open(filename, "r") as f:
speciestable = json.loads(f.read())
print "it worked !"
utils.speciestable = {}
for key, val in speciestable.iteritems():
utils.speciestable.update({int(key): str(val)})
def savespeciestable(self):
"""
short function to put the speciestable on Disk
This is done since there may be some memory leakage, probably due to some autoreloading behavior
of the global data stored on utils.
"""
filename = "utils/meta/savings/speciestable.json"
data = json.dumps(dict(utils.speciestable), indent=4, separators=(',', ': '))
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'w') as f:
f.write(data)
print "it worked !"
# TODO: [in the end] check that everything is imported, create a req file, check everything is saved,
# retry imports on a new machine, export doc in html and latex, redo the readme (the one for the data
# as well),
# create the file for loading in pip, put it in pip. Create a small medium article. create an html index page
# referencing the project, the documentation, the html plots, the dissertation, the article.
"""
def offload(self, var='species'):
""
offload when your file is too big (easier than using HDF or something).. for now
save and
""
print "offload when your file is too big (easier than using HDF or something).. for now"
if var == 'species'
dictispecies = {}
for key, val in self.species.iteritems():
dictispecies.update({key: val._dictify()})
filename = "utils/save/" + self.session + '/' + species + ".json"
print "writing in " + name
dictify = self._dictify(save_workspace, save_homo)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'w') as f:
f.write(data)
print "it worked !"
self.species = 'off'
self.all_homoset = 'off'
else:
print "offload only when you have saved your object and got a working homoset"
"""
``` |
{
"source": "jkobject/common_variant_filter",
"score": 2
} |
#### File: jkobject/common_variant_filter/common_variant_filter.py
```python
import argparse
import pandas as pd
import numpy as np
GENE = 'Hugo_Symbol'
PROTEIN = 'Protein_Change'
CHROMOSOME = 'Chromosome'
ALT = 'Alteration'
START_POSITION = 'Start_position'
END_POSITION = 'End_position'
REF_ALLELE = 'Reference_Allele'
ALT_ALLELE = 'Tumor_Seq_Allele2'
REF_COUNT = 't_ref_count'
ALT_COUNT = 't_alt_count'
VAR_CLASS = 'Variant_Classification'
sample_id = 'sample_id'
maf_handle = 'maf_handle'
exac_handle = 'exac_handle'
whitelist_handle = 'whitelist_handle'
filter_syn = 'filter_syn'
min_exac_ac = 'min_exac_ac'
min_depth = 'min_depth'
boolean_filter_noncoding = 'boolean_filter_noncoding'
boolean_whitelist = 'boolean_disable_whitelist'
EXAC_CHR = 'CHROM'
EXAC_POS = 'POS'
EXAC_REF = 'REF'
EXAC_ALT = 'ALT'
EXAC_AF = 'AF'
EXAC_AC = 'AC'
EXAC_AC_AFR = 'AC_AFR'
EXAC_AC_AMR = 'AC_AMR'
EXAC_AC_EAS = 'AC_EAS'
EXAC_AC_FIN = 'AC_FIN'
EXAC_AC_NFE = 'AC_NFE'
EXAC_AC_OTH = 'AC_OTH'
EXAC_AC_SAS = 'AC_SAS'
EXAC_AN = 'AN'
EXAC_AN_AFR = 'AN_AFR'
EXAC_AN_AMR = 'AN_AMR'
EXAC_AN_EAS = 'AN_EAS'
EXAC_AN_FIN = 'AN_FIN'
EXAC_AN_NFE = 'AN_NFE'
EXAC_AN_OTH = 'AN_OTH'
EXAC_AN_SAS = 'AN_SAS'
MAPPED_GENE = 'gene'
MAPPED_CHR = 'chromosome'
MAPPED_REF = 'ref_allele'
MAPPED_ALT = 'alt_allele'
MAPPED_POS = 'start_position'
MAPPED_AA = 'protein_change'
MAPPED_VAR_CLASS = 'variant_classification'
MAPPED_REF_COUNT = 'ref_count'
MAPPED_ALT_COUNT = 'alt_count'
EXAC_COMMON = 'exac_common'
WL = 'whitelist'
DEPTH = 'read_depth'
LOW_DEPTH = 'low_read_depth'
CODING = 'coding'
COMMON = 'common_variant'
maf_column_map = {
GENE: MAPPED_GENE,
CHROMOSOME: MAPPED_CHR,
PROTEIN: MAPPED_AA,
START_POSITION: MAPPED_POS,
REF_ALLELE: MAPPED_REF,
ALT_ALLELE: MAPPED_ALT,
VAR_CLASS: MAPPED_VAR_CLASS,
REF_COUNT: MAPPED_REF_COUNT,
ALT_COUNT: MAPPED_ALT_COUNT
}
output_column_map = {v: k for k, v in maf_column_map.items()}
exac_column_map = {
EXAC_CHR: MAPPED_CHR,
EXAC_POS: MAPPED_POS,
EXAC_REF: MAPPED_REF,
EXAC_ALT: MAPPED_ALT,
EXAC_AF: 'exac_af',
EXAC_AC: 'exac_ac',
EXAC_AC_AFR: 'exac_ac_afr',
EXAC_AC_AMR: 'exac_ac_amr',
EXAC_AC_EAS: 'exac_ac_eas',
EXAC_AC_FIN: 'exac_ac_fin',
EXAC_AC_NFE: 'exac_ac_nfe',
EXAC_AC_OTH: 'exac_ac_oth',
EXAC_AC_SAS: 'exac_ac_sas',
EXAC_AN: 'exac_an',
EXAC_AN_AFR: 'exac_an_afr',
EXAC_AN_AMR: 'exac_an_amr',
EXAC_AN_EAS: 'exac_an_eas',
EXAC_AN_FIN: 'exac_an_fin',
EXAC_AN_NFE: 'exac_an_nfe',
EXAC_AN_OTH: 'exac_an_oth',
EXAC_AN_SAS: 'exac_an_sas',
}
whitelist_column_map = {0: MAPPED_CHR, 1: MAPPED_POS, 2: END_POSITION, 3:ALT}
population_keys = [EXAC_AC_AFR, EXAC_AC_AMR, EXAC_AC_EAS, EXAC_AC_FIN, EXAC_AC_NFE, EXAC_AC_OTH, EXAC_AC_SAS]
populations = [exac_column_map[x] for x in population_keys]
def check_column_names(df, map):
for column_name in map.keys():
assert column_name in df.columns, \
'Expected column %s not found among %s' % (column_name, df.columns)
def read(handle, **kwargs):
return pd.read_csv(handle, sep='\t', comment='#', dtype='object', **kwargs)
def standard_read(handle, column_map, **kwargs):
check_column_names(read(handle, nrows=3), column_map)
return read(handle, encoding='latin-1', **kwargs).rename(columns=column_map)
def apply_str(x):
try:
return x.astype(int).astype(str)
except ValueError:
return x.astype(str)
def annotate_read_depth(series_alt_count, series_ref_count):
return series_alt_count.astype(int).add(series_ref_count.astype(int))
def get_idx_low_depth(series_depth, min_depth):
return series_depth[series_depth.astype(int).lt(int(min_depth))].index
def get_idx_coding_classifications(series_classification):
coding_classifications = [
'Missense_Mutation', 'Nonsense_Mutation', 'Nonstop_Mutation', 'Splice_Site',
'Frame_Shift_Ins', 'Frame_Shift_Del', 'In_Frame_Ins', 'In_Frame_Del']
return series_classification[series_classification.isin(coding_classifications)].index
def rename_exac_cols(df):
colmap = {}
old_columns = df.columns[df.columns.str.lower().str.contains('exac')]
new_columns = ['_'.join([col, 'previous_annotation']) for col in old_columns]
for old, new in zip(old_columns, new_columns):
colmap[old] = new
return df.rename(columns=colmap)
def write_integer(number, filename):
with open(filename, 'w') as f:
f.write('%d' % number)
def main(inputs):
df = standard_read(inputs[maf_handle], maf_column_map, low_memory=False)
df = rename_exac_cols(df)
exac = standard_read(inputs[exac_handle], exac_column_map, low_memory=False)
merge_cols = [MAPPED_CHR, MAPPED_POS, MAPPED_REF, MAPPED_ALT]
df = df.merge(exac, on=merge_cols, how='left')
df.loc[:, populations] = df.loc[:, populations].fillna(0.0)
df.loc[:, LOW_DEPTH] = np.nan
df.loc[:, CODING] = np.nan
df.loc[:, WL] = np.nan
df.loc[:, EXAC_COMMON] = 0.0
idx_original = df.index
df[MAPPED_ALT_COUNT] = df[MAPPED_ALT_COUNT].fillna(0.0)
df[MAPPED_REF_COUNT] = df[MAPPED_REF_COUNT].fillna(0.0)
df[DEPTH] = annotate_read_depth(df[MAPPED_ALT_COUNT], df[MAPPED_REF_COUNT])
df.loc[:, LOW_DEPTH] = 0.0
idx_read_depth = get_idx_low_depth(df[DEPTH], inputs[min_depth])
df.loc[:, CODING] = 0.0
idx_coding = get_idx_coding_classifications(df[MAPPED_VAR_CLASS])
idx_noncoding = idx_original.difference(idx_coding)
if not inputs[boolean_whitelist]:
df.loc[:, WL] = 0.0
whitelist = read(inputs_dict[whitelist_handle], header=-1).rename(columns=whitelist_column_map)
df[whitelist_column_map[3]] = df[MAPPED_GENE].astype(str) + ':' + \
df[MAPPED_AA].str.split('p.', expand=True).loc[:, 1].astype(str)
df[whitelist_column_map[3]] = df[whitelist_column_map[3]].fillna('')
idx_whitelist = df[df[whitelist_column_map[3]].isin(whitelist[whitelist_column_map[3]])].index
else:
idx_whitelist = pd.DataFrame().index
idx_common_exac = df[(df.loc[:, populations].astype(float) > float(inputs[min_exac_ac])).sum(axis=1) != 0].index
df.loc[idx_read_depth, LOW_DEPTH] = 1.0
df.loc[idx_coding, CODING] = 1.0
df.loc[idx_whitelist, WL] = 1.0
df.loc[idx_common_exac, EXAC_COMMON] = 1.0
df[COMMON] = 0
idx_common = idx_common_exac.difference(idx_whitelist)
df.loc[idx_common, COMMON] = 1
idx_reject = idx_read_depth.union(idx_common).union(idx_common)
if inputs[boolean_filter_noncoding]:
idx_reject = idx_reject.union(idx_noncoding)
idx_pass = idx_original.difference(idx_reject)
df.drop(whitelist_column_map[3], axis=1, inplace=True)
df = df.rename(columns=output_column_map)
df_pass = df.loc[idx_pass, :]
df_reject = df.loc[idx_reject, :]
outname = ''.join([inputs[sample_id], '.common_variant_filter.pass.maf'])
df_pass.to_csv(outname, sep='\t', index=False)
outname = ''.join([inputs[sample_id], '.common_variant_filter.reject.maf'])
df_reject.to_csv(outname, sep='\t', index=False)
write_integer(np.int(df.shape[0]), 'considered.txt')
write_integer(np.int(df_pass.shape[0]), 'passed.txt')
write_integer(np.int(df_reject.shape[0]), 'rejected.txt')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--id', type=str, required=True, help='Sample ID')
parser.add_argument('--maf', type=str, required=True, help='MAF to annotate and filter')
parser.add_argument('--min_exac_ac', type=int, required=False, default=10,
help='Minimum allele count across any population to filter')
parser.add_argument('--min_filter_depth', type=int, required=False, default=0,
help='Minimum coverage of variant to not be filtered')
parser.add_argument('--filter_noncoding', action='store_true', required=False, default=False,
help='Filters non-coding variants')
parser.add_argument('--disable_wl', action='store_true', required=False, default=False,
help='Will filter variants in whitelist if enabled')
args = parser.parse_args()
inputs_dict = {
sample_id: args.id,
maf_handle: args.maf,
min_exac_ac: args.min_exac_ac,
min_depth: args.min_filter_depth,
boolean_filter_noncoding: args.filter_noncoding,
boolean_whitelist: args.disable_wl,
exac_handle: 'datasources/exac.expanded.r1.txt',
whitelist_handle: 'datasources/known_somatic_sites.bed'
}
print('Common variant filter')
print(inputs_dict)
main(inputs_dict)
``` |
{
"source": "jkobject/JKBio",
"score": 2
} |
#### File: JKBio/utils/helper.py
```python
from __future__ import print_function
from matplotlib import pyplot as plt
import json
import os
import sys
import string
import subprocess
import pdb
import ipdb
import pandas as pd
import numpy as np
import itertools
import random
from taigapy import TaigaClient
tc = TaigaClient()
rename_mut = {'contig': 'chr', 'position': 'pos', 'Reference_Allele': 'ref', 'ref_allele': 'ref', 'alt_allele': 'alt',
'Chromosome': 'chr', 'End_postition': 'end', 'Start_position': 'pos', 'Tumor_Seq_Allele1': "alt"}
def fileToList(filename):
"""
loads an input file with a\\n b\\n.. into a list [a,b,..]
"""
with open(filename) as f:
return [val[:-1] for val in f.readlines()]
def listToFile(l, filename):
"""
loads a list with [a,b,..] into an input file a\\n b\\n..
"""
with open(filename, 'w') as f:
for item in l:
f.write("%s\n" % item)
def dictToFile(d, filename):
"""
turn a dict into a json file
"""
with open(filename, 'w') as json_file:
json.dump(d, json_file)
def fileToDict(filename):
"""
loads a json file into a python dict
"""
with open(filename) as f:
data = json.load(f)
return data
def batchMove(l, pattern=['*.', '.*'], folder='', add=''):
"""
moves a set of files l into a folder:
Args:
-----
l: file list
pattern: if files are a set of patterns to match
folder: folder to move file into
add: some additional mv parameters
"""
for val in l:
cmd = 'mv '
if add:
cmd += add + ' '
if '*.' in pattern:
cmd += '*'
cmd += val
if '.*' in pattern:
cmd += '*'
cmd += " " + folder
res = os.system(cmd)
if res != 0:
raise Exception("Leave command pressed or command failed")
def batchRename(dt, folder='', sudo=False, doAll=False, add='', dryrun=False):
"""
Given a dict renames corresponding files in a folder
Args:
----
dt: dict(currentName:newName) renaming dictionnary
folder: folder to look into
add: some additional mv parameters
"""
cmd = 'ls -R ' + folder if doAll else 'ls ' + folder
files = os.popen(cmd).read().split('\n')
if doAll:
prep=''
f = []
for val in files:
if len(val)==0:
prep=''
continue
if val[0]=='.' and len(val)>3:
prep=val[:-1]
continue
if "." in val:
f.append(prep+"/"+val)
files = f
for k, val in dt.items():
for f in files:
if k in f:
cmd = 'sudo mv ' if sudo else 'mv '
if add:
cmd += add + ' '
if not doAll:
cmd += folder
cmd += f
cmd += ' '
if not doAll:
cmd += folder
cmd += f.replace(k, val)
if dryrun:
print(cmd)
else:
res = os.system(cmd)
if res != 0:
raise Exception("Leave command pressed or command failed")
def grouped(iterable, n):
"""
iterate over element of list 2 at a time python
s -> (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1), (s2n,s2n+1,s2n+2,...s3n-1), ...
"""
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
def overlap(interval1, interval2):
"""
Given [0, 4] and [1, 10] returns [1, 4]
Given [0, 4] and [8, 10] returns False
"""
if interval2[0] <= interval1[0] <= interval2[1]:
start = interval1[0]
elif interval1[0] <= interval2[0] <= interval1[1]:
start = interval2[0]
else:
return False
if interval2[0] <= interval1[1] <= interval2[1]:
end = interval1[1]
elif interval1[0] <= interval2[1] <= interval1[1]:
end = interval2[1]
else:
return False
return (start, end)
def union(interval1, interval2):
"""
Given [0, 4] and [1, 10] returns [0, 10]
Given [0, 4] and [8, 10] returns False
"""
if interval1[0] <= interval2[0] <= interval1[1]:
start = interval1[0]
end = interval1[1] if interval2[1] <= interval1[1] else interval2[1]
elif interval1[0] <= interval2[1] <= interval1[1]:
start = interval2[0] if interval2[0] <= interval1[0] else interval1[0]
end = interval1[1]
else:
return False
return (start, end)
def nans(df): return df[df.isnull().any(axis=1)]
def createFoldersFor(filepath):
"""
will recursively create folders if needed until having all the folders required to save the file in this filepath
"""
prevval = ''
for val in filepath.split('/')[:-1]:
prevval += val + '/'
if not os.path.exists(prevval):
os.mkdir(prevval)
def randomString(stringLength=6, stype='all', withdigits=True):
"""
Generate a random string of letters and digits
Args:
-----
stringLength: the amount of char
stype: one of lowercase, uppercase, all
withdigits: digits allowed in the string?
Returns:
-------
the string
"""
if stype == 'lowercase':
lettersAndDigits = ascii_lowercase
elif stype == 'uppercase':
lettersAndDigits = ascii_uppercase
else:
lettersAndDigits = string.ascii_letters
if withdigits:
lettersAndDigits += string.digits
return ''.join(random.choice(lettersAndDigits) for i in range(stringLength))
def pdDo(df, op="mean", of="value1", over="value2"):
"""
apply a function to a panda dataframe WIP
"""
df = df.sort_values(by=over)
index = []
data = df.iloc[0, of]
ret = []
prev = df.iloc[0, over]
j = 0
for k, val in df.iloc[1:].iterrows():
if val[over] == prev:
data.append(val[of])
else:
if of == "mean":
ret[j] = np.mean(data)
elif of == "sum":
ret[j] = np.sum(data)
elif of == "max":
ret[j] = np.max(data)
elif of == "min":
ret[j] = np.min(data)
index.append(k)
j += 1
data = [val[of]]
return index, ret
def parrun(cmds, cores, add=[]):
"""
runs a set of commands in parallel using the "&" command
Args:
-----
cmds: the list of commands
cores: number of parallel execution
add: an additional list(len(cmds)) of command to run in parallel at the end of each parallel run
"""
count = 0
exe = ''
if len(add) != 0 and len(add) != len(cmds):
raise ValueError("we would want them to be the same size")
else:
addexe = ''
for i, cmd in enumerate(cmds):
count += 1
exe += cmd
if len(add) != 0:
addexe += add[i]
if count < cores and i < len(cmds) - 1:
exe += ' & '
if len(add) != 0:
addexe += ' & '
else:
count = 0
res = subprocess.run(exe, capture_output=True, shell=True)
if res.returncode != 0:
raise ValueError('issue with the command: ' + str(res.stderr))
exe = ''
if len(add) != 0:
res = subprocess.run(addexe, capture_output=True, shell=True)
if res.returncode != 0:
raise ValueError(
'issue with the command: ' + str(res.stderr))
addexe = ''
def askif(quest):
"""
asks a y/n question to the user about something and returns true or false given his answer
"""
print(quest)
inp = input()
if inp in ['yes', 'y', 'Y', 'YES', 'oui', 'si']:
return 1
elif inp in ['n', 'no', 'nope', 'non', 'N']:
return 0
else:
return askif('you need to answer by yes or no')
def inttodate(i, lim=1965, unknown='U', sep='-', order="asc", startsatyear=0):
"""
transforms an int representing days into a date
Args:
----
i: the int
lim: the limited year below which we have a mistake
unknown: what to return when unknown (date is bellow the limited year)
sep: the sep between your date (e.g. /, -, ...)
order: if 'asc', do d,m,y else do y,m,d
startsatyear: when is the year to start counting for this int
Returns:
the date or unknown
"""
a = int(i // 365)
if a > lim:
a = str(a + startsatyear)
r = i % 365
m = str(int(r // 32)) if int(r // 32) > 0 else str(1)
r = r % 32
d = str(int(r)) if int(r) > 0 else str(1)
else:
return unknown
return d + sep + m + sep + a if order == "asc" else a + sep + m + sep + d
def datetoint(dt, split='-', unknown='U', order="des"):
"""
same as inttodate but in the opposite way;
starts at 0y,0m,0d
dt: the date string
split: the splitter in the string (e.g. /,-,...)
unknown: maybe the some dates are 'U' or 0 and the program will output 0 for unknown instead of crashing
order: if 'asc', do d,m,y else do y,m,d
Returns:
the date
"""
arr = np.array(dt[0].split(split) if dt[0] !=
unknown else [0, 0, 0]).astype(int)
if len(dt) > 1:
for val in dt[1:]:
arr = np.vstack(
(arr, np.array(val.split(split) if val != unknown and val.count(split) == 2 else [0, 0, 0]).astype(int)))
arr = arr.T
res = arr[2] * 365 + arr[1] * 31 + \
arr[0] if order == "asc" else arr[0] * 365 + arr[1] * 31 + arr[2]
return [res] if type(res) is np.int64 else res
def showcount(i, size):
"""
pretty print of i/size%, to put in a for loop
"""
print(str(1 + int(100 * (i / size))) + '%', end='\r')
def combin(n, k):
"""
Nombre de combinaisons de n objets pris k a k
outputs the number of comabination of n object taken k at a time
"""
if k > n // 2:
k = n - k
x = 1
y = 1
i = n - k + 1
while i <= n:
x = (x * i) // y
y += 1
i += 1
return x
def dups(lst):
"""
shows the duplicates in a list
"""
seen = set()
# adds all elements it doesn't know yet to seen and all other to seen_twice
seen_twice = set(x for x in lst if x in seen or seen.add(x))
# turn the set into a list (as requested)
return list(seen_twice)
def makeCombinations(size, proba):
"""
produces probability of X event happening at the same time
pretty usefull for cobinding analysis. wil compute it
given binomial probabilities of each event occuring and the number of trials
Args:
-----
size: int number of trials
proba: list[float] probabilities of each event occuring
"""
sums = {i:0 for i in range(1,size)}
for i in range(size-1,0,-1):
print(i)
if sums[i]> 0:
continue
print(combin(size+3,i))
v=0
for j in itertools.combinations(proba, i):
v+=np.prod(j)
sums[i] = v
for i in range(size-1,0,-1):
for j in range(i+1,size):
icomb = combin(j,i)
sums[i] -= icomb*sums[j]
sums[0] = 1-sum(list(sums.values()))
return sums
def closest(lst, K):
"""
returns the index of the value closest to K in a lst
"""
return lst[min(range(len(lst)), key = lambda i: abs(lst[i]-K))]
def compareDfs(df1, df2):
"""
compares df1 to df2
shows col difference, index difference, nans & 0s differences
"""
nmissmatchCols = set(df1.columns)-set(df2.columns)
omissmatchCols = set(df2.columns)-set(df1.columns)
nmissmatchInds = set(df1.index)-set(df2.index)
omissmatchInds = set(df2.index)-set(df1.index)
newNAs = df1.isna().sum().sum() - df2.isna().sum().sum()
new0s = (df1 == 0).sum().sum() - (df2 == 0).sum().sum()
print('FOUND missmatch Columns IN df1: ' + str(nmissmatchCols))
print('FOUND missmatch Columns NOT IN df1: ' + str(omissmatchCols))
print('FOUND missmatch Index IN df1: ' + str(nmissmatchInds))
print('FOUND missmatch Index NOT IN df1: ' + str(omissmatchInds))
print('FOUND new NAs in df1: ' + str(newNAs))
print('FOUND new 0s in df1: ' + str(new0s))
return nmissmatchCols, omissmatchCols, nmissmatchInds, omissmatchInds, newNAs, new0s
def stringifydict(res):
"""
"""
a = {}
for k,v in res.items():
if type(v) is dict:
a[k] = stringifydict(v)
else:
a[str(k)] = v
return a
def list_to_str(l):
"""
convert a list into a string in a better way than original python
"""
return str(l).replace("'","").replace(",","")[1:-1]
``` |
{
"source": "jkocherhans/maillib",
"score": 3
} |
#### File: maillib/tests/subclassing.py
```python
from maillib import Message
from StringIO import StringIO
import unittest
import email
RAW_MESSAGE = """\
Date: Sun, 25 Oct 2009 20:27:58 -0500
Subject: Test
From: <NAME> <<EMAIL>>
To: <NAME> <<EMAIL>>
Test.
"""
class TestMessage(Message):
pass
class ConstructorsTestCase(unittest.TestCase):
"""
Make sure the constructors work for subclasses.
"""
def test_from_file(self):
msg = TestMessage.from_file(StringIO(RAW_MESSAGE))
self.assertTrue(isinstance(msg, TestMessage))
def test_from_string(self):
msg = TestMessage.from_string(RAW_MESSAGE)
self.assertTrue(isinstance(msg, TestMessage))
def test_from_message(self):
msg = TestMessage.from_message(email.message_from_string(RAW_MESSAGE))
self.assertTrue(isinstance(msg, TestMessage))
```
#### File: maillib/maillib/utils.py
```python
import re
def normalize_subject(subject):
"""
Strips any leading Re or Fwd from the subject, and returns it. This is
sometimes useful for grouping messages.
"""
return re.sub(r'(?i)(re:|fw:|fwd:)\s+', '', subject)
def extract_list_id(value):
"""
Extracts and returns the first things that looks like a list-id from a
message header value.
"""
match = re.search(r'<([^>]*?)>', value)
if match is None:
return value
return match.group(1)
``` |
{
"source": "jkochNU/scqubits",
"score": 2
} |
#### File: scqubits/tests/conftest.py
```python
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pytest
import scqubits
import scqubits.settings
import scqubits.utils.plotting as plot
from scqubits.core.constants import FileType
from scqubits.core.storage import SpectrumData
from scqubits.settings import IN_IPYTHON
if not IN_IPYTHON:
matplotlib.use('Agg')
scqubits.settings.FILE_FORMAT = FileType.h5
TESTDIR, _ = os.path.split(scqubits.tests.__file__)
DATADIR = os.path.join(TESTDIR, 'data', '')
class BaseTest:
"""Used as base class for pytests of qubit classes"""
qbt = None
@pytest.fixture(autouse=True)
def set_tmpdir(self, request):
"""Pytest fixture that provides a temporary directory for writing test files"""
setattr(self, 'tmpdir', request.getfixturevalue('tmpdir'))
@classmethod
def teardown_class(cls):
plt.close('all')
def set_params(self, h5file_root):
"""Read and store parameters from open h5 file
Parameters
----------
h5file_root: h5py.Group
handle to root group in open h5 file
"""
h5params = h5file_root.attrs
for paramname in h5params.keys():
paramvalue = h5params[paramname]
if isinstance(paramvalue, (int, float, np.number)):
setattr(self.qbt, paramname, h5params[paramname])
def eigenvals(self, evals_reference):
evals_count = len(evals_reference)
evals_tst = self.qbt.eigenvals(evals_count=evals_count, filename=self.tmpdir + 'test')
assert np.allclose(evals_reference, evals_tst)
def eigenvecs(self, evecs_reference):
evals_count = evecs_reference.shape[1]
_, evecs_tst = self.qbt.eigensys(evals_count=evals_count, filename=self.tmpdir + 'test')
assert np.allclose(np.abs(evecs_reference), np.abs(evecs_tst))
def plot_evals_vs_paramvals(self, param_name, param_list):
self.qbt.plot_evals_vs_paramvals(param_name, param_list, evals_count=5, subtract_ground=True,
filename=self.tmpdir + 'test')
def get_spectrum_vs_paramvals(self, param_name, param_list, evals_reference, evecs_reference):
evals_count = len(evals_reference[0])
calculated_spectrum = self.qbt.get_spectrum_vs_paramvals(param_name, param_list, evals_count=evals_count,
subtract_ground=False, get_eigenstates=True)
calculated_spectrum.filewrite(filename=self.tmpdir + 'test')
assert np.allclose(evals_reference, calculated_spectrum.energy_table)
assert np.allclose(np.abs(evecs_reference), np.abs(calculated_spectrum.state_table), atol=1e-07)
def matrixelement_table(self, op, matelem_reference):
evals_count = len(matelem_reference)
calculated_matrix = self.qbt.matrixelement_table(op, evecs=None, evals_count=evals_count,
filename=self.tmpdir + 'test')
assert np.allclose(np.abs(matelem_reference), np.abs(calculated_matrix))
def plot_matrixelements(self, op, evals_count=7):
self.qbt.plot_matrixelements(op, evecs=None, evals_count=evals_count)
def print_matrixelements(self, op):
mat_data = self.qbt.matrixelement_table(op)
plot.print_matrix(abs(mat_data))
def plot_matelem_vs_paramvals(self, op, param_name, param_list, select_elems):
self.qbt.plot_matelem_vs_paramvals(op, param_name, param_list, select_elems=select_elems,
filename=self.tmpdir + 'test')
class StandardTests(BaseTest):
@classmethod
def setup_class(cls):
cls.qbt = None
cls.qbt_type = None
cls.file_str = ''
cls.op1_str = ''
cls.op2_str = ''
cls.param_name = ''
cls.param_list = None
def test_eigenvals(self):
testname = self.file_str + '_1'
specdata = SpectrumData.create_from_file(DATADIR + testname)
self.qbt = self.qbt_type.create_from_dict(specdata._get_metadata_dict())
evals_reference = specdata.energy_table
return self.eigenvals(evals_reference)
def test_eigenvecs(self):
testname = self.file_str + '_2'
specdata = SpectrumData.create_from_file(DATADIR + testname)
self.qbt = self.qbt_type.create_from_dict(specdata._get_metadata_dict())
evecs_reference = specdata.state_table
return self.eigenvecs(evecs_reference)
def test_plot_wavefunction(self):
testname = self.file_str + '_1'
specdata = SpectrumData.create_from_file(DATADIR + testname)
self.qbt = self.qbt_type.create_from_dict(specdata._get_metadata_dict())
self.qbt.plot_wavefunction(esys=None, which=5, mode='real')
self.qbt.plot_wavefunction(esys=None, which=9, mode='abs_sqr')
def test_plot_evals_vs_paramvals(self):
testname = self.file_str + '_1'
specdata = SpectrumData.create_from_file(DATADIR + testname)
self.qbt = self.qbt_type.create_from_dict(specdata._get_metadata_dict())
return self.plot_evals_vs_paramvals(self.param_name, self.param_list)
def test_get_spectrum_vs_paramvals(self):
testname = self.file_str + '_4'
specdata = SpectrumData.create_from_file(DATADIR + testname)
self.qbt = self.qbt_type.create_from_dict(specdata._get_metadata_dict())
self.param_list = specdata.param_vals
evecs_reference = specdata.state_table
evals_reference = specdata.energy_table
return self.get_spectrum_vs_paramvals(self.param_name, self.param_list, evals_reference, evecs_reference)
def test_matrixelement_table(self):
testname = self.file_str + '_5'
specdata = SpectrumData.create_from_file(DATADIR + testname)
self.qbt = self.qbt_type.create_from_dict(specdata._get_metadata_dict())
matelem_reference = specdata.matrixelem_table
return self.matrixelement_table(self.op1_str, matelem_reference)
def test_plot_matrixelements(self):
testname = self.file_str + '_1'
specdata = SpectrumData.create_from_file(DATADIR + testname)
self.qbt = self.qbt_type.create_from_dict(specdata._get_metadata_dict())
self.plot_matrixelements(self.op1_str, evals_count=10)
def test_print_matrixelements(self):
testname = self.file_str + '_1'
specdata = SpectrumData.create_from_file(DATADIR + testname)
self.qbt = self.qbt_type.create_from_dict(specdata._get_metadata_dict())
self.print_matrixelements(self.op2_str)
def test_plot_matelem_vs_paramvals(self):
testname = self.file_str + '_1'
specdata = SpectrumData.create_from_file(DATADIR + testname)
self.qbt = self.qbt_type.create_from_dict(specdata._get_metadata_dict())
self.plot_matelem_vs_paramvals(self.op1_str, self.param_name, self.param_list,
select_elems=[(0, 0), (1, 4), (1, 0)])
def test_plot_potential(self):
testname = self.file_str + '_1'
specdata = SpectrumData.create_from_file(DATADIR + testname)
self.qbt = self.qbt_type.create_from_dict(specdata._get_metadata_dict())
if 'plot_potential' not in dir(self.qbt):
pytest.skip('This is expected, no reason for concern.')
self.qbt.plot_potential()
```
#### File: scqubits/tests/test_explorer.py
```python
import numpy as np
import scqubits as qubit
import scqubits.core.sweep_generators as swp
from scqubits import InteractionTerm, ParameterSweep, Explorer
def test_explorer():
qbt = qubit.Fluxonium(
EJ=2.55,
EC=0.72,
EL=0.12,
flux=0.0,
cutoff=110,
truncated_dim=9
)
osc = qubit.Oscillator(
E_osc=4.0,
truncated_dim=5
)
hilbertspace = qubit.HilbertSpace([qbt, osc])
interaction = InteractionTerm(
g_strength=0.2,
hilbertspace=hilbertspace,
op1=qbt.n_operator(),
subsys1=qbt,
op2=osc.creation_operator() + osc.annihilation_operator(),
subsys2=osc
)
interaction_list = [interaction]
hilbertspace.interaction_list = interaction_list
param_name = r'$\Phi_{ext}/\Phi_0$'
param_vals = np.linspace(-0.5, 0.5, 100)
subsys_update_list = [qbt]
def update_hilbertspace(param_val):
qbt.flux = param_val
sweep = ParameterSweep(
param_name=param_name,
param_vals=param_vals,
evals_count=10,
hilbertspace=hilbertspace,
subsys_update_list=subsys_update_list,
update_hilbertspace=update_hilbertspace,
)
swp.generate_chi_sweep(sweep)
swp.generate_charge_matrixelem_sweep(sweep)
explorer = Explorer(
sweep=sweep,
evals_count=10
)
explorer.interact()
``` |
{
"source": "jkocontreras/drawbotscripts",
"score": 3
} |
#### File: drawbotscripts/sineWave_animations/sineWave_360.py
```python
ph = 350 # pageheight
pw = ph * 2 # pagewidth
steps = 180 # how many pages / steps to draw
dot_s = 8 # size of the dots at the end of the radius
radius = ph/2 * .75
angle = 2*pi/steps
gap = radius / (steps - 1) * 2 # used to draw the dots of the sine wave
sin_vals = [sin(i*angle) for i in range(steps)] # calculate and store the sine values once
def base_page(i):
'''basic stuff that gets drawn and used on every page'''
newPage(pw, ph)
fill(1)
rect(0, 0, pw, ph)
fill(0)
translate(0, ph/2)
fontSize(8)
for j in range(-10, 11, 2):
text('%.1f' % (j/10), (pw/2, radius/10 * j), align = 'right')
fill(.5)
rect(ph/2 * .25, -ph/2, 1, ph)
rect(ph/2 * .25 + 2 * radius, -ph/2, 1, ph)
# drawing of the sine wave
with savedState():
translate((ph - radius *2)/2, 0)
for st in range(steps):
sin_val = sin_vals[ (st+i+1) % steps]
oval(st * gap - 2, sin_val * radius - 2, 4, 4)
translate(ph + ph/2, 0)
fill(None)
stroke(.5)
strokeWidth(1)
oval(-radius, -radius, radius*2, radius*2)
line((-ph/2 * 3, 0), (ph/2, 0))
line((0, -ph/2), (0, ph/2))
fontSize(14)
for i in range(steps):
base_page(i)
cos_val = cos(i * angle)
sin_val = sin(i * angle)
x = cos_val * radius
y = sin_val * radius
stroke(.75)
line((x, max(0, y)),(x, -radius))
line((max(x, 0), y),(- pw/2 + radius, y))
strokeWidth(2)
# radius
stroke(0, 0, 1)
line((0, 0),(x, y))
# cosine distance
stroke(1, 0, 0)
line((0, 0),(x, 0))
# sine distance
stroke(0, .5, 0)
line((0, 0),(0, y))
stroke(None)
fill(0, 0, 1, .2)
pth = BezierPath()
pth.moveTo((0, 0))
pth.lineTo((radius/4, 0))
pth.arc((0, 0), radius/4, 0, degrees(i * angle), clockwise = False)
pth.closePath()
drawPath(pth)
fill(0)
text('cos(θ)', (x/2, -18), align = 'center')
text('sin(θ)', (8, y/2 + 4))
text('%d°' % (i * degrees(angle)), (radius/4 + 4, 8))
text('%.3f' % cos_val, (x, -radius - 16), align = 'center')
text('%.3f' % sin_val, (- pw/2 + radius - 4, y), align = 'right')
fill(.5)
oval(-dot_s/2, -dot_s/2, dot_s, dot_s)
oval(x-dot_s/2, y-dot_s/2, dot_s, dot_s)
# saveImage('sine.gif')
```
#### File: drawbotscripts/vera molnar/lines_rotating.py
```python
pw = ph = 500
amount = 24
st_w = 4
cell_s = pw / (amount + 2)
line_cap = (st_w/2)/sqrt(2)
possible_vectors = [
[(-1, -1), (1, 1)],
[(-1, 1), (1, -1)],
[( 0, 1), (0, -1)],
[(-1, 0), (1, 0)] ]
# --------------
# function(s)
def rnd(x, val): return round(x, val)
def rnd_p(p, val=3): return rnd(p[0], val), rnd(p[1], val)
def a_line(pos, s):
global visited
x, y = pos
selected = choice(possible_vectors)
p1_, p2_ = [rnd_p((x + x_off * s/2, y + y_off * s/2)) for x_off, y_off in selected]
p1, p2 = p1_, p2_
if p1_ in visited and 0 not in selected[0]:
p1 = p1[0] + selected[0][0]*line_cap, p1[1] + selected[0][1]*line_cap
if p2_ in visited and 0 not in selected[1]:
p2 = p2[0] + selected[1][0]*line_cap, p2[1] + selected[1][1]*line_cap
line(p1, p2)
stroke(0)
for p in [p1_, p2_]:
visited[p] = visited.get(p, True)
# --------------
# drawings
newPage(pw, ph)
translate(cell_s * 1.5, cell_s * 1.5)
fill(None)
stroke(0)
strokeWidth(st_w)
visited = {}
for x in range(amount):
for y in range(amount):
a_line((x * cell_s, y * cell_s), cell_s)
# saveImage('lines_rotating.jpg')
```
#### File: drawbotscripts/vera molnar/random_grids.py
```python
import random
# ----------------------
# settings
pw = ph = 500
cell_a = 10 # amount of cells
sbdvs = 3 # subdivisions
gap = pw /(cell_a * sbdvs + cell_a + 1)
cell_s = sbdvs * gap
points = [(x * gap, y * gap) for x in range(sbdvs+1) for y in range(sbdvs+1) ]
# ----------------------
# function(s)
def a_grid_cell(pos, s, points, amount = len(points)):
random.shuffle(points)
points = random.sample( points, amount )
with savedState():
translate(x * (cell_s + gap), y * (cell_s + gap))
polygon(*points, close=False)
# ----------------------
# drawing
newPage(pw, ph)
rect(0, 0, pw, ph)
translate(gap, gap)
fill(None)
strokeWidth(1)
stroke(1)
lineCap('round')
lineJoin('round')
for x in range( cell_a ):
for y in range( cell_a ):
a_grid_cell((x * cell_s, y * cell_s), cell_s, points, y + 3)
# saveImage('random_grids.jpg')
```
#### File: drawbotscripts/vera molnar/rotating_Y.py
```python
pw = ph = 500
amount = 5
stroke_w = 20
margin = stroke_w * sqrt(2)
y_size = (pw - 2*margin) / (amount)
# ----------------------
# function(s)
def y_shape(pos, s, rot ):
x, y = pos
with savedState():
translate(x + s/2, y + s/2)
rotate(rot)
for i in range(3):
line( (0, 0), ( 0, -s/2) )
line( (0, 0), ( s/2, s/2) )
line( (0, 0), (-s/2, s/2) )
# ----------------------
# drawing
newPage(pw, ph)
translate(margin, margin)
fill(None)
strokeWidth( stroke_w )
stroke(0, 0.65, 0)
for x in range( amount ):
for y in range( amount ):
# rot = choice([0, 90, 180, 270])
rot = 90 - x * 90 - y * 90
y_shape( (x * y_size, y * y_size), y_size, rot)
# saveImage('rotating_Y.jpg')
``` |
{
"source": "jkodner05/LowResPOS",
"score": 3
} |
#### File: jkodner05/LowResPOS/readers.py
```python
import re, unicodedata
from collections import defaultdict
START = "#START"
STOP = "#STOP"
WSJ_FINDWORD = re.compile(r"\(([\w\d\.,\?!]+)\s([A-Za-z\d'-\.,\?!]+?)\)")
def tag_word(wordtup, keeptag):
if keeptag:
return unicodedata.normalize('NFC',(wordtup[0] + "_" + wordtup[1]).decode("utf-8"))
else:
return unicodedata.normalize('NFC',wordtup[1].decode("utf-8"))
def check_tokenlimit(maxtokens, numtokens):
if maxtokens and numtokens > maxtokens:
print "REACHED TOKEN LIMIT"
print "Num Tokens:\t", numtokens
return True
return False
def read_turkishtsfile(filename, tags, freqs, contexts, numtokens, maxtokens):
findword = re.compile(r"([^\s]+)_([^\s]+)")
with open(filename, "r") as f:
prevprev = START
prev = START
for line in f:
if len(freqs) >= 50000:
break
words = findword.findall(line.lower())
for wordtup in words:
if "//" in wordtup[0]:
continue
word = tag_word((wordtup[1],wordtup[0]),False).lower()
numtokens += 1
if check_tokenlimit(maxtokens, numtokens):
return tags, freqs, contexts, numtokens
# if word == u"\u0130mparatorlu\u011fu'nu":
# print "GOT IT"
if word not in tags:
tags[word] = {}
if wordtup[1] not in tags[word]:
tags[word][wordtup[1]] = 0
tags[word][wordtup[1]] += 1
freqs[word] += 1
contexts[prev].add((prevprev,word))
#print word, wordtup[1]
#print prevprev, prev, word
# print prevprev.encode("utf-8"), prev.encode("utf-8"), word.encode("utf-8"), "\t", prev.encode("utf-8"), contexts[prev]
prevprev = prev
prev = word
contexts[prev].add((prevprev,STOP))
prevprev = START
prev = START
# print set([tag for tagdict in tags.values() for tag in tagdict])
print len(freqs), sum(freqs.values())
return tags, freqs, contexts, numtokens
def read_ctbfile(filename, tags, freqs, contexts, numtokens, maxtokens):
findword = re.compile(r"([^\s]+)_([^\s]+)")
with open(filename, "r") as f:
prevprev = START
prev = START
for line in f:
if "</S>" in line or "<S ID" in line or "<P>" in line or "</P>" in line or "HEADER>" in line or "DATE>" in line or "BODY>" in line or "DOCID>" in line or "HEADLINE>" in line or "DOC>" in line or "TEXT>" in line:
continue
# print "\n", line.strip()
words = findword.findall(line.lower())
for wordtup in words:
if wordtup[1].lower() == "-none-":
continue
word = tag_word((wordtup[1],wordtup[0]),False).lower()
numtokens += 1
if check_tokenlimit(maxtokens, numtokens):
return tags, freqs, contexts, numtokens
if word not in tags:
tags[word] = {}
if wordtup[1] not in tags[word]:
tags[word][wordtup[1]] = 0
tags[word][wordtup[1]] += 1
freqs[word] += 1
contexts[prev].add((prevprev,word))
# print prevprev, prev, word
# print prevprev.encode("utf-8"), prev.encode("utf-8"), word.encode("utf-8"), "\t", prev.encode("utf-8"), contexts[prev]
prevprev = prev
prev = word
contexts[prev].add((prevprev,STOP))
prevprev = START
prev = START
# print set([tag for tagdict in tags.values() for tag in tagdict])
return tags, freqs, contexts, numtokens
def read_ctbfile_eval(filename, numtokens, maxtokens):
findword = re.compile(r"([^\s]+)_([^\s]+)")
pospairs = []
with open(filename, "r") as f:
for line in f:
if "</S>" in line or "<S ID" in line or "<P>" in line or "</P>" in line or "HEADER>" in line or "DATE>" in line or "BODY>" in line or "DOCID>" in line or "HEADLINE>" in line or "DOC>" in line or "TEXT>" in line:
continue
words = findword.findall(line.lower())
if not words:
continue
pospairs.extend([(tag_word((wordtup[1],wordtup[0]),False),wordtup[1]) for wordtup in words if wordtup[1].lower() != "-none-"])
numtokens += len(words)
#try:
# pospairs.extend([(tag_word(wordtup,True).decode("utf-8"),CHILDES_TAGMAP[wordtup[0]]) for wordtup in words])
#except KeyError:
# pospairs.extend([(tag_word(wordtup,True).decode("utf-8"),"SKIP") for wordtup in words])
return pospairs, numtokens
def read_turkishtsfile_eval(filename, numtokens, maxtokens):
findword = re.compile(r"([^\s]+)_([^\s]+)")
pospairs = []
types = set([])
with open(filename, "r") as f:
for line in f:
words = findword.findall(line.lower())
if not words:
continue
if len(types) >= 50000:
break
for wordtup in words:
types.add(wordtup[0])
pospairs.extend([(tag_word((wordtup[1],wordtup[0]),False).lower(),wordtup[1]) for wordtup in words if wordtup[1].lower() != "-none-"])
numtokens += len(words)
#try:
# pospairs.extend([(tag_word(wordtup,True).decode("utf-8"),CHILDES_TAGMAP[wordtup[0]]) for wordtup in words])
#except KeyError:
# pospairs.extend([(tag_word(wordtup,True).decode("utf-8"),"SKIP") for wordtup in words])
return pospairs, numtokens
def read_wsjfile(filename, tags, freqs, contexts, numtokens, maxtokens):
# findword = re.compile(r"\((\w+)\s([a-z'-]+)\)")
findword = WSJ_FINDWORD
with open(filename, "r") as f:
prevprev = START
prev = START
savedend = False
for line in f:
savedend = False
if not line.strip():
savedend = True
# if line.strip() == "(. .) ))":
contexts[prev].add((prevprev,STOP))
# print prevprev, prev, STOP, "\t", prev, contexts[prev]
prevprev = START
prev = START
words = findword.findall(line.lower())
for i, wordtup in enumerate(words):
word = tag_word(wordtup,False).lower()
if ")" in word:
print word, line
numtokens += 1
if check_tokenlimit(maxtokens, numtokens):
return tags, freqs, contexts, numtokens
if word not in tags:
tags[word] = {}
if wordtup[0] not in tags[word]:
tags[word][wordtup[0]] = 0
tags[word][wordtup[0]] += 1
freqs[word] += 1
contexts[prev].add((prevprev,word))
# print prevprev, prev, word, "\t", prev, contexts[prev]
prevprev = prev
prev = word
if not savedend:
contexts[prev].add((prevprev,STOP))
prevprev = START
prev = START
# print set([tag for tagdict in tags.values() for tag in tagdict])
return tags, freqs, contexts, numtokens
def read_wsjfile_eval(filename, numtokens, maxtokens):
findword = WSJ_FINDWORD
pospairs = []
with open(filename, "r") as f:
for line in f:
words = findword.findall(line.lower())
for wordtup in words:
word = wordtup[1]
if ")" in word:
print word
if not words:
continue
pospairs.extend([(tag_word(wordtup,False),wordtup[0]) for wordtup in words])
numtokens += len(words)
#try:
# pospairs.extend([(tag_word(wordtup,True).decode("utf-8"),CHILDES_TAGMAP[wordtup[0]]) for wordtup in words])
#except KeyError:
# pospairs.extend([(tag_word(wordtup,True).decode("utf-8"),"SKIP") for wordtup in words])
return pospairs, numtokens
def read_childesbrownfile_eval(filename, numtokens, maxtokens):
findword = re.compile(r"([A-Za-z:]+)\|([^\s]+)")
pospairs = []
with open(filename, "r") as f:
for line in f:
if line[0:5] != "%mor:":
continue
words = findword.findall(line.lower())
pospairs.extend([(tag_word(wordtup,False),wordtup[0]) for wordtup in words])
numtokens += len(words)
#try:
# pospairs.extend([(tag_word(wordtup,True).decode("utf-8"),CHILDES_TAGMAP[wordtup[0]]) for wordtup in words])
#except KeyError:
# pospairs.extend([(tag_word(wordtup,True).decode("utf-8"),"SKIP") for wordtup in words])
return pospairs, numtokens
def read_childesbrownfile(filename, tags, freqs, contexts, numtokens, maxtokens):
findword = re.compile(r"([A-Za-z:]+)\|([^\s]+)")
with open(filename, "r") as f:
prevprev = START
prev = START
for line in f:
if line[0:5] != "%mor:":
continue
words = findword.findall(line.lower())
for wordtup in words:
word = tag_word(wordtup,False)
numtokens += 1
if check_tokenlimit(maxtokens, numtokens):
return tags, freqs, contexts, numtokens
if word not in tags:
tags[word] = {}
if wordtup[0] not in tags[word]:
tags[word][wordtup[0]] = 0
tags[word][wordtup[0]] += 1
#try:
# if CHILDES_TAGMAP[wordtup[0]] == "SKIP":
# prevprev = prev
# prev = word
# continue
# tags[word][CHILDES_TAGMAP[wordtup[0]]] += 1
#except KeyError:
# prevprev = prev
# prev = word
# continue
# tags[word]["SKIP"] += 1
freqs[word] += 1
contexts[prev].add((prevprev,word))
# print prevprev, prev, word, "\t", prev, contexts[prev]
prevprev = prev
prev = word
contexts[prev].add((prevprev,STOP))
return tags, freqs, contexts, numtokens
def read_conlluniversalfile_eval(filename, numtokens, maxtokens):
findword = re.compile(r"^\d+\t(.+?)\t.+?\t(\w+)")
pospairs = []
with open(filename, "r") as f:
for line in f:
words = findword.findall(line.lower())
if not words:
continue
pospairs.extend([(tag_word((wordtup[1],wordtup[0]),False).lower(),wordtup[1]) for wordtup in words])
numtokens += len(words)
#try:
# pospairs.extend([(tag_word(wordtup,True).decode("utf-8"),CHILDES_TAGMAP[wordtup[0]]) for wordtup in words])
#except KeyError:
# pospairs.extend([(tag_word(wordtup,True).decode("utf-8"),"SKIP") for wordtup in words])
return pospairs, numtokens
def read_conlluniversalfile(filename, tags, freqs, contexts, numtokens, maxtokens):
findword = re.compile(r"^\d+\t(.+?)\t.+?\t(\w+)")
# findword = re.compile(r"\d+\t(.+?)\t.+?\t(\w+)\t(\w+)")
with open(filename, "r") as f:
prevprev = START
prev = START
ended = False
for line in f:
if not line.strip():
ended = True
contexts[prev].add((prevprev,STOP))
# print prevprev, prev, STOP, "\t", prev, contexts[prev]
prevprev = START
prev = START
continue
ended = False
words = findword.findall(line.lower())
for wordtup in words:
word = tag_word((wordtup[1],wordtup[0]),False).lower()
numtokens += 1
if check_tokenlimit(maxtokens, numtokens):
return tags, freqs, contexts, numtokens
if word not in tags:
tags[word] = defaultdict(int)
tags[word][wordtup[1]] += 1
freqs[word] += 1
contexts[prev].add((prevprev,word))
# print prevprev, prev, word, "\t", prev, contexts[prev]
prevprev = prev
prev = word
contexts[prev].add((prevprev,STOP))
prevprev = START
prev = START
return tags, freqs, contexts, numtokens
def read_lctlfile(filename, tags, freqs, contexts, numtokens, maxtokens):
findword = re.compile(r"""pos="(\w+)">(.+?)</""")
with open(filename, "r") as f:
prevprev = START
prev = START
for line in f:
if "</SEG>" in line:
contexts[prev].add((prevprev,STOP))
# print prevprev, prev, STOP, "\t", prev, contexts[prev]
prevprev = START
prev = START
continue
elif "<TOKEN id=" not in line:
continue
words = findword.findall(line.lower())
for wordtup in words:
word = tag_word((wordtup[1],wordtup[0]),False)
numtokens += 1
if check_tokenlimit(maxtokens, numtokens):
return tags, freqs, contexts, numtokens
freqs[word] += 1
if word not in tags:
tags[word] = defaultdict(int)
tags[word][wordtup[0]] += 1
contexts[prev].add((prevprev,word))
# print prevprev, prev, word, "\t", prev, contexts[prev]
prevprev = prev
prev = word
return tags, freqs, contexts, numtokens
def read_Xu_morphfile(filename):
roots_by_word = {}
suffixes_by_root = defaultdict(lambda : set([]))
with open(filename, "r") as f:
for line in f:
components = unicodedata.normalize('NFC',line.decode("utf-8")).split("\t")
word = components[0]
segmentation = components[1].split(" ")
root = segmentation[0]
finalsuff = segmentation[-1]
roots_by_word[word] = root
suffixes_by_root[root].add(finalsuff)
suffixes_by_root = dict(suffixes_by_root)
return roots_by_word, suffixes_by_root
``` |
{
"source": "jkoelker/holland",
"score": 2
} |
#### File: holland-commvault/holland_commvault/commvault.py
```python
import holland.core
import sys, os
import logging
import resource
from holland.core.util.bootstrap import bootstrap
from holland.commands.backup import Backup
from holland.core.command import run
from holland.core.cmdshell import HOLLAND_VERSION
from holland.core.util.fmt import format_loglevel
from argparse import ArgumentParser, Action
# The janky arguments Commvault throws at us
# http://documentation.commvault.com/commvault/release_8_0_0/books_online_1/english_us/features/pre_post/prepost_process.htm
# http://documentation.commvault.com/commvault/release_7_0_0/books_online_1/english_us/features/pre_post/prepost_process.htm
#CV_ARGS = ("-bkplevel",
# "-attempt",
# "-status",
# "-job",
# "-vm"
# "-cn")
class ArgList(Action):
def __call__(self, parser, namespace, value, option_string=None):
arg_list = [x.strip() for x in value.split(',')]
setattr(namespace, self.dest, arg_list)
def main():
# For some reason (take a wild guess) Commvault has decided that
# their long options will take the form of '-option' not the standard
# '--option'.
# Always set HOME to '/root', as the commvault environment is bare
os.environ['HOME'] = '/root'
os.environ['TMPDIR'] = '/tmp'
# ensure we do not inherit commvault's LD_LIBRARY_PATH
os.environ.pop('LD_LIBRARY_PATH', None)
argv = sys.argv[1:]
parser = ArgumentParser()
parser.add_argument("--config-file", "-c", metavar="<file>",
help="Read configuration from the given file")
parser.add_argument("--log-level", "-l", type='choice',
choices=['critical','error','warning','info',
'debug'],
help="Specify the log level."
)
parser.add_argument("--quiet", "-q", action="store_true",
help="Don't log to console")
parser.add_argument("--verbose", "-v", action="store_true",
help="Verbose output")
parser.add_argument("--bksets", "-b", metavar="<bkset>,<bkset>...",
help="only run the specified backupset",
default=[], action=ArgList)
parser.add_argument("-bkplevel", type=int)
parser.add_argument("-attempt", type=int)
parser.add_argument("-status", type=int)
parser.add_argument("-job", type=int)
parser.add_argument("-vm")
parser.add_argument("-cn")
parser.set_defaults(
config_file=os.getenv('HOLLAND_CONFIG') or '/etc/holland/holland.conf',
verbose=False,
)
args, largs = parser.parse_known_args(argv)
bootstrap(args)
logging.info("Holland (commvault agent) %s started with pid %d",
HOLLAND_VERSION, os.getpid())
# Commvault usually runs with a very low default limit for nofile
# so a best effort is taken to raise that here.
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (262144, 262144))
logging.debug("(Adjusted ulimit -n (RLIMIT_NOFILE) to %d)", 262144)
except (ValueError, resource.error), exc:
logging.debug("Failed to raise RLIMIT_NOFILE: %s", exc)
if args.log_level:
args.log_level = format_loglevel(opts.log_level)
if run(['backup'] + args.bksets):
return 1
else:
return 0
```
#### File: core/backup/legacy.py
```python
import os
import sys
import time
import pprint
import logging
from holland.core.exceptions import BackupError
from holland.core.util.path import disk_free, directory_size
from holland.core.util.fmt import format_interval, format_bytes
from holland.core.plugin import load_backup_plugin, PluginLoadError
from holland.core.config import load_backupset_config
from holland.core.spool import spool
LOGGER = logging.getLogger(__name__)
def load_plugin(cfg):
provider = cfg.lookup('holland:backup.plugin')
LOGGER.info("Loading Backup Plugin '%s'", provider)
if not provider:
raise IOError("No provider defined")
try:
plugincls = load_first_entrypoint("holland.backup", provider)
except PluginLoadError, e:
raise LookupError("Failed to load plugin %r: %s" % (provider, e))
if not plugincls:
raise LookupError("Plugin %r not found" % ('holland.backup.' + provider))
return plugincls
def _find_existing_parent(path):
path = os.path.abspath(path)
while not os.path.exists(path):
path, _ = os.path.split(path)
if _ == '':
break
return path
def verify_space(required_space, target_directory):
available_space = disk_free(_find_existing_parent(target_directory))
if required_space >= available_space:
LOGGER.error("Insufficient Disk Space. Required: %s Available: %s",
format_bytes(required_space),
format_bytes(available_space))
raise BackupError("%s required but only %s available on %s" % \
(format_bytes(required_space),
format_bytes(available_space),
target_directory))
def purge_old_backups(backupset, backups_to_keep=1, exclude=()):
assert backups_to_keep > 0
LOGGER.info("Purging old backups from backupset '%s'", backupset)
backupset = spool.find_backupset(backupset)
if not backupset:
backups = []
else:
backups = [bk for bk in backupset.list_backups(reverse=True)
if bk not in exclude]
# Make sure we keep holland:backup.backups-to-keep
LOGGER.info("Found %d backups. Keeping %d", len(backups), backups_to_keep)
purge_list = []
for backup in backups:
if backup.config.get('holland:backup',{}).get('stop-time', 0) == 0:
LOGGER.debug("Purging broken backup")
purge_list.insert(0, backup)
elif backups_to_keep == 0:
LOGGER.debug("Purging old backup")
purge_list.insert(0, backup)
else:
LOGGER.debug("Retaining backup %s", backup.name)
backups_to_keep -= 1
if not purge_list:
LOGGER.info("No backups to purge")
else:
for backup in purge_list:
LOGGER.info("Purging %s", backup.name)
backup.purge()
def backup(backupset_name, dry_run=False, skip_purge=False):
# May raise a ConfigError if not backupset is found
LOGGER.info("Loading config for backupset %s", backupset_name)
try:
backupset_cfg = load_backupset_config(backupset_name)
except IOError, e:
LOGGER.error("Failed to load backupset %s: %s", backupset_name, e)
raise BackupError("Aborting due to previous errors.")
except SyntaxError, e:
LOGGER.error("Failed to load backupset config %r [%s]. %s",
backupset_name,
e.config.filename,
e
)
LOGGER.error("Bad line appears to be '%s'", e.line)
raise BackupError("Aborting due to previous errors.")
# May raise a PluginError if the plugin could not be loaded
LOGGER.info("Loading plugin %s", backupset_cfg.lookup('holland:backup.plugin'))
try:
plugincls = load_backup_plugin(backupset_cfg.lookup('holland:backup.plugin'))
except PluginLoadError, e:
LOGGER.error("Failed to load plugin %s: %s",
backupset_cfg.lookup('holland:backup.plugin'), e)
raise BackupError(e)
# Possible IOError here if we cannot write to spool
# Don't create the directory in dry-run mode
backup_job = spool.add_backup(backupset_name)
LOGGER.info("Prepared backup spool %s", backup_job.path)
# Always merge in the backupset config to the backup-local config
LOGGER.debug("Merging backupset config into local backup.conf config")
backup_job.config.merge(backupset_cfg)
backup_job.validate_config()
# Plugin may fail to initialize due to programming error
LOGGER.debug("Initializing backup plugin instance")
try:
plugin = plugincls(backupset_name, backup_job.config, backup_job.path, dry_run)
except Exception, e:
LOGGER.debug("Failed to instantiate backup plugin %s: %s",
backupset_cfg.lookup('holland:backup.plugin'),
e, exc_info=True)
raise BackupError("Failed to initialize backup plugin %s: %s" %
(backupset_cfg.lookup('holland:backup.plugin'), e))
# Plugin may raise exception due to programming error, be careful
estimated_size = plugin.estimate_backup_size()
estimate_factor = backup_job.config['holland:backup']['estimated-size-factor']
adjusted_estimate = estimate_factor*estimated_size
LOGGER.info("Estimated Backup Size: %s",
format_bytes(estimated_size)
)
if adjusted_estimate != estimated_size:
LOGGER.info("Using estimated-size-factor=%.2f and adjusting estimate to %s",
estimate_factor,
format_bytes(adjusted_estimate)
)
# Save the estimated size in the backup.conf
backup_job.config['holland:backup']['estimated-size'] = estimated_size
try:
verify_space(adjusted_estimate, backup_job.path)
except BackupError, exc:
if not dry_run:
raise
if not dry_run:
LOGGER.info("Purging old backup jobs")
purge_old_backups(backupset_name,
backup_job.config.lookup('holland:backup.backups-to-keep'),
exclude=[backup_job])
# Start backup
backup_job.config['holland:backup']['start-time'] = time.time()
# initialize spool directory
if not dry_run:
backup_job.prepare()
exc = None
try:
LOGGER.info("Starting backup[%s] via plugin %s",
backup_job.name,
backupset_cfg.lookup('holland:backup.plugin'))
plugin.backup()
except KeyboardInterrupt:
exc = BackupError("Interrupted")
except Exception, exc:
if not isinstance(exc, BackupError):
LOGGER.debug("Unexpected exception when running backups.", exc_info=True)
exc = BackupError(exc)
backup_job.config['holland:backup']['stop-time'] = time.time()
backup_interval = (backup_job.config['holland:backup']['stop-time'] -
backup_job.config['holland:backup']['start-time'])
if dry_run:
LOGGER.info("Dry-run completed in %s",
format_interval(backup_interval))
else:
LOGGER.info("Backup completed in %s",
format_interval(backup_interval))
if not dry_run and exc is None:
final_size = directory_size(backup_job.path)
LOGGER.info("Final on-disk backup size: %s %.2f%% of estimated size %s",
format_bytes(final_size),
estimated_size and 100*(float(final_size)/estimated_size) or 0.0,
format_bytes(estimated_size))
backup_job.config['holland:backup']['on-disk-size'] = final_size
LOGGER.debug("Flushing backup job")
backup_job.flush()
if exc is not None:
if backup_job.config['holland:backup']['auto-purge-failures'] is True:
LOGGER.warning("Purging this backup (%s) due to failure", backup_job.name)
backup_job.purge()
raise
```
#### File: core/command/command.py
```python
import os
import sys
import re
import optparse
import textwrap
import logging
from types import StringTypes
from inspect import getargspec, getdoc
import logging
from holland.core.util.template import Template
LOGGER = logging.getLogger(__name__)
def option(*args, **kwargs):
return optparse.make_option(*args, **kwargs)
class StopOptionProcessing(Exception):
pass
class _OptionParserEx(optparse.OptionParser):
def __init__(self, **kwargs):
optparse.OptionParser.__init__(self, **kwargs)
#self.remove_option('--help')
def error(self, msg):
raise optparse.OptParseError(msg)
def exit(self, status=0, msg=None):
if not status:
raise StopOptionProcessing(msg)
else:
# TODO: don't lose status info here
raise optparse.OptParseError(msg)
option = optparse.make_option
class Command(object):
"""Base Command class for implementing pluggable
commmands.
User commands typically inherit this class and
implement an appropriate run(self, cmdname, opts, [args...])
and this parent class will discover the acceptable arguments
based on the run() method signature
"""
name = None
aliases = [
]
options = [
]
description = ''
def __init__(self):
help_fmt = optparse.IndentedHelpFormatter()
self.optparser = _OptionParserEx(prog=self.name,
add_help_option=False,
formatter=help_fmt)
self.optparser.add_option('--help', '-h', action='store_true',
help='show this help message and exit')
self.optparser.add_options(self.options)
def format_cmd_options(self):
"""
Format the options this command supports
Default behavior is to delegate to format_option_help() of
the associated OptionParser instance for this command
"""
return self.optparser.format_option_help()
def format_arg(self, arg):
"""
Format an individual argument this command supports
"""
return arg.replace('_', '-')
def format_varargs(self, arg):
"""
Format how variable arguments (\*args) are displayed
"""
return '[%s...]' % self.format_arg(arg)
def _check_argspec(self, args, varargs, varkw, defaults):
for arg in args:
if not isinstance(arg, StringTypes):
raise AssertionError('Tuple arguments are not supported')
if varkw:
raise AssertionError('Keyword arguments are not supported')
def format_cmd_args(self):
"""
Format all the arguments accepted by this command
Defers to self.format_arg and self.format_varargs
"""
args, varargs, varkw, defaults = getargspec(self.run)
self._check_argspec(args, varargs, varkw, defaults)
args = args[3:]
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i in range(len(args)):
spec = self.format_arg(args[i])
if defaults and i >= firstdefault:
spec = '[' + spec + ']'
specs.append(spec)
if varargs is not None:
specs.append(self.format_varargs(varargs))
return ' '.join(specs)
def usage(self):
"""
Format this command's usage string
"""
tpl = Template("Usage: ${cmd_name} ${options}${cmd_args}")
return tpl.safe_substitute(cmd_name=self.name,
options=self.options and "[options] " or "",
cmd_args=self.format_cmd_args())
def reformat_paragraphs(self, str):
from textwrap import wrap
paragraphs = []
buffer = ''
for line in str.splitlines():
if not line and buffer:
paragraphs.append("\n".join(wrap(buffer, 65)))
buffer = ''
else:
buffer += line
if buffer:
paragraphs.append(buffer)
return "\n\n".join(paragraphs)
def help(self):
"""
Format this command's help output
Default is to use the class' docstring as a
template and interpolate the name, options and
arguments
"""
usage_str = getdoc(self) or ''
usage_str = self.reformat_paragraphs(usage_str)
cmd_name = self.name
cmd_opts = self.format_cmd_options()
cmd_args = self.format_cmd_args()
help_str = Template(usage_str).safe_substitute(cmd_name=cmd_name,
cmd_option_list=cmd_opts,
cmd_args=cmd_args,
cmd_usage=self.usage()
).rstrip()
return re.sub(r'\n\n+', r'\n\n', help_str)
def parse_args(self, argv):
"""
Parse the options for this command
"""
self.optparser.prog = argv.pop(0)
opts, args = self.optparser.parse_args(argv)
return opts, args
def dispatch(self, argv):
"""
Dispatch arguments to this command
Parses the arguments through this command's
option parser and delegates to self.run(\*args)
"""
run_args, run_varargs, run_varkw, run_defaults = getargspec(self.run)
try:
opts, args = self.parse_args(argv)
except StopOptionProcessing, e:
return 1
except optparse.OptParseError, e:
print >>sys.stderr, self.usage()
print
print >>sys.stderr, "%s: error: %s" % (self.name, e)
return 1
if opts.help:
print self.help()
return os.EX_USAGE
cmd_name = self.optparser.prog
if len(args) > len(run_args[3:]) and not run_varargs:
print >>sys.stderr, "Error: %s only accepts %d arguments but %d were provided" % ( (self.name, len(run_args[3:]), len(args)))
print self.help()
return os.EX_USAGE
num_req = len(run_defaults or []) or 0
if len(args) < len(run_args[3:-num_req or None]):
print >>sys.stderr, "Failed: %s requires %d arguments required, %d provided" % (cmd_name,len(run_args[3:-num_req or None]), len(args))
print self.help()
return os.EX_USAGE
try:
return self.run(self.optparser.prog, opts, *args)
except KeyboardInterrupt:
raise
except Exception, e:
LOGGER.error("Uncaught exception while running command '%s': %r", cmd_name, e, exc_info=True)
return os.EX_SOFTWARE
def run(self, cmd, opts, *args):
"""
This should be overridden by subclasses
"""
pass
def __cmp__(self, other):
"""
Sort this commmand alphabetically
"""
# Useful for sorting a list of commands alphabetically
return cmp(self.name, other.name)
```
#### File: core/command/__init__.py
```python
import os
import sys
import logging
from command import Command, option, StopOptionProcessing
from holland.core.plugin import get_commands
__all__ = [
'Command',
'option',
'StopOptionProcessing',
'run'
]
LOGGER = logging.getLogger(__name__)
def run(args=None):
if args is None:
args = sys.argv[1:]
# Run the requested command
commands = get_commands()
if not args:
args = ['help']
command_name = args[0]
if command_name not in commands:
print >>sys.stderr, "No such command: %r" % command_name
return os.EX_UNAVAILABLE
else:
cmdobj = commands[command_name]()
try:
return cmdobj.dispatch(args)
except KeyboardInterrupt:
LOGGER.info("Interrupt")
return os.EX_SOFTWARE
except Exception, e:
LOGGER.debug("Command %r failed: %r", exc_info=True)
print >>sys.stderr, "Command %r failed: %r" % (command_name, e)
return os.EX_SOFTWARE
```
#### File: core/util/path.py
```python
import os
import sys
import stat
import time
import logging
LOG = logging.getLogger(__name__)
def ensure_dir(dir_path):
"""
Ensure a directory path exists (by creating it if it doesn't).
"""
if not os.path.exists(dir_path):
try:
os.makedirs(dir_path)
LOG.debug("created directory %s" % dir_path)
return True
except OSError, e:
# FIX ME: Need error codes/etc so this will exit(<code>) or raise
# an appropriate holland exception
LOG.error("os.makedirs(%s): %s" % (dir_path, e))
raise
return False
def protected_path(path):
"""
Take a path, and if the file/dir exist pass back a protected path
(suffixed).
Returns:
string = new file path
Example:
>>> mypath = '/tmp'
>>> new_path = helpers.protected_path(mypath)
>>> new_path
'/tmp.0'
"""
log = logging.getLogger(__name__)
safety = 0
safe_path = path
while True:
if os.path.exists(safe_path):
safe_path = "%s.%s" % (path, safety)
else:
break
safety = safety + 1
return safe_path
def format_bytes(bytes, precision=2):
"""
Format an integer number of bytes to a human
readable string.
If bytes is negative, this method raises ArithmeticError
"""
import math
if bytes < 0:
raise ArithmeticError("Only Positive Integers Allowed")
if bytes != 0:
exponent = math.floor(math.log(bytes, 1024))
else:
exponent = 0
return "%.*f%s" % (
precision,
bytes / (1024 ** exponent),
['B','KB','MB','GB','TB','PB','EB','ZB','YB'][int(exponent)]
)
def normpath(path):
from os.path import normpath, abspath
return abspath(normpath(path))
def relpath(path, start=os.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = [x for x in os.path.abspath(start).split(os.sep) if x]
path_list = [x for x in os.path.abspath(path).split(os.sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.curdir
return os.path.join(*rel_list)
def getmount(path):
"""Return the mount point of a path
:param path: path to find the mountpoint for
:returns: str mounpoint path
"""
path = os.path.realpath(path)
while path != os.path.sep:
if os.path.ismount(path):
return path
path = os.path.abspath(os.path.join(path, os.pardir))
return path
def disk_capacity(target_path):
"""Find the total capacity of the filesystem that target_path is on
:returns: integer number of bytes
"""
path = getmount(target_path)
info = os.statvfs(path)
return info.f_frsize*info.f_blocks
def disk_free(target_path):
"""
Find the amount of space free on a given path
Path must exist.
This method does not take into account quotas
returns the size in bytes potentially available
to a non privileged user
"""
path = getmount(target_path)
info = os.statvfs(path)
return info.f_frsize*info.f_bavail
def directory_size(path):
"""
Find the size of all files in a directory, recursively
Returns the size in bytes on success
"""
from os.path import join, getsize
result = 0
for root, dirs, files in os.walk(path):
for name in files:
try:
sz = getsize(join(root,name))
result = result + sz
except OSError, exc:
pass
return result
```
#### File: mysqldump/mock/popen.py
```python
from mocker import *
def _debug_wait(*args, **kwargs):
print "Waiting(args=%r, kwargs=%r)" % (args, kwargs)
return 0
def mock_subprocess(mocker):
popen = mocker.replace('subprocess.Popen')
pid = popen(ARGS, KWARGS)
mocker.count(min=0,max=None)
pid.poll()
mocker.count(min=0,max=None)
mocker.result(0)
pid.wait()
mocker.count(min=0,max=None)
mocker.result(0)
foo = pid.returncode
mocker.count(min=0,max=None)
mocker.result(0)
mock_subprocess_stdin(mocker, pid)
mock_subprocess_stdout(mocker, pid)
mock_subprocess_stderr(mocker, pid)
def mock_subprocess_stdin(mocker, pid):
# mock stdin, stdout, stderr as iterate file-like objects
pid.stdin.write(ANY)
mocker.count(min=0,max=None)
mocker.call(lambda s: len(s))
pid.stdin.close()
mocker.count(min=0,max=None)
def mock_subprocess_stdout(mocker, pid):
pid.stdout.read(ANY)
mocker.count(min=0, max=None)
mocker.result('')
iter(pid.stdout)
mocker.count(min=0, max=None)
mocker.generate('')
pid.stdout.fileno()
mocker.count(min=0,max=None)
mocker.result(-1)
pid.stdout.close()
mocker.count(min=0,max=None)
mocker.result(-1)
def mock_subprocess_stderr(mocker, pid):
pid.stderr.read(ANY)
mocker.count(min=0, max=None)
mocker.result('')
iter(pid.stderr)
mocker.count(min=0, max=None)
mocker.generate('')
pid.stderr.fileno()
mocker.count(min=0,max=None)
mocker.result(-1)
```
#### File: mysqldump/mysql/option.py
```python
import os
import re
import codecs
import logging
from holland.backup.mysqldump.util import INIConfig, BasicConfig
from holland.backup.mysqldump.util.config import update_config
from holland.backup.mysqldump.util.ini import ParsingError
LOG = logging.getLogger(__name__)
def merge_options(path,
*defaults_files,
**kwargs):
defaults_config = INIConfig()
defaults_config._new_namespace('client')
for config in defaults_files:
_my_config = load_options(config)
update_config(defaults_config, _my_config)
for key in ('user', 'password', 'socket', 'host', 'port'):
if kwargs.get(key) is not None:
defaults_config['client'][key] = kwargs[key]
write_options(defaults_config, path)
def load_options(filename):
"""Load mysql option file from filename"""
filename = os.path.abspath(os.path.expanduser(filename))
cfg = INIConfig()
try:
cfg._readfp(open(filename, 'r'))
except ParsingError, exc:
LOG.debug("Skipping unparsable lines")
for lineno, line in exc.errors:
LOG.debug("Ignored line %d: %s", lineno, line.rstrip())
return client_sections(cfg)
def unquote(value):
"""Remove quotes from a string."""
if len(value) > 1 and value[0] == '"' and value[-1] == '"':
value = value[1:-1]
# substitute meta characters per:
# http://dev.mysql.com/doc/refman/5.0/en/option-files.html
MYSQL_META = {
'b' : "\b",
't' : "\t",
'n' : "\n",
'r' : "\r",
'\\': "\\",
's' : " ",
'"' : '"',
}
return re.sub(r'\\(["btnr\\s])',
lambda m: MYSQL_META[m.group(1)],
value)
def quote(value):
"""Added quotes around a value"""
return '"' + value.replace('"', '\\"') + '"'
def client_sections(config):
"""Create a copy of config with only valid client auth sections
This includes [client], [mysql] and [mysqldump] with only options
related to mysql authentication.
"""
clean_cfg = INIConfig()
clean_cfg._new_namespace('client')
valid_sections = ['client', 'mysql', 'holland']
for section in valid_sections:
if section in config:
clean_section = client_keys(config[section])
update_config(clean_cfg.client, clean_section)
return clean_cfg
def client_keys(config):
"""Create a copy of option_section with non-authentication options
stripped out.
Authentication options supported are:
user, password, host, port, and socket
"""
clean_namespace = BasicConfig()
update_config(clean_namespace, config)
valid_keys = ['user', 'password', 'host', 'port', 'socket']
for key in config:
if key not in valid_keys:
del clean_namespace[key]
else:
clean_namespace[key] = unquote(config[key])
return clean_namespace
def write_options(config, filename):
quoted_config = INIConfig()
update_config(quoted_config, config)
for section in config:
for key in config[section]:
if '"' in config[section][key]:
config[section][key] = quote(config[section][key])
if isinstance(filename, basestring):
filename = codecs.open(filename, 'w', 'utf8')
data = unicode(config)
print >>filename, data
filename.close()
```
#### File: tests/util/__init__.py
```python
import unittest, doctest
import test_ini
import test_misc
import test_fuzz
import test_compat
import test_unicode
from holland.backup.mysqldump.util import config
from holland.backup.mysqldump.util import ini
class suite(unittest.TestSuite):
def __init__(self):
unittest.TestSuite.__init__(self, [
doctest.DocTestSuite(config),
doctest.DocTestSuite(ini),
test_ini.suite(),
test_misc.suite(),
test_fuzz.suite(),
test_compat.suite(),
test_unicode.suite(),
])
```
#### File: holland/lib/compression.py
```python
import os
import logging
import errno
import subprocess
import which
import shlex
from tempfile import TemporaryFile
LOG = logging.getLogger(__name__)
#: This is a simple table of method_name : (command, extension)
#: mappings.
COMPRESSION_METHODS = {
'gzip' : ('gzip', '.gz'),
'gzip-rsyncable' : ('gzip --rsyncable', '.gz'),
'pigz' : ('pigz', '.gz'),
'bzip2' : ('bzip2', '.bz2'),
'pbzip2': ('pbzip2', '.bz2'),
'lzop' : ('lzop', '.lzo'),
'lzma' : ('xz', '.xz'),
'gpg' : ('gpg -e --batch --no-tty', '.gpg'),
}
def lookup_compression(method):
"""
Looks up the passed compression method in supported COMPRESSION_METHODS
and returns a tuple in the form of ('command_name', 'file_extension').
Arguments:
method -- A string identifier of the compression method (i.e. 'gzip').
"""
try:
cmd, ext = COMPRESSION_METHODS[method]
argv = shlex.split(cmd)
try:
return [which.which(argv[0])] + argv[1:], ext
except which.WhichError, e:
raise OSError("No command found for compression method '%s'" %
method)
except KeyError:
raise OSError("Unsupported compression method '%s'" % method)
class CompressionInput(object):
"""
Class to create a compressed file descriptor for reading. Functions like
a standard file descriptor such as from open().
"""
def __init__(self, path, mode, argv, bufsize=1024*1024):
self.fileobj = open(path, 'r')
self.pid = subprocess.Popen(argv + ['--decompress'],
stdin=self.fileobj.fileno(),
stdout=subprocess.PIPE,
bufsize=bufsize)
self.fd = self.pid.stdout.fileno()
self.name = path
self.closed = False
def fileno(self):
return self.fd
def read(self, size):
return os.read(self.fd, size)
def next(self):
return self.pid.stdout.next()
def __iter__(self):
return iter(self.pid.stdout)
def close(self):
import signal
os.kill(self.pid.pid, signal.SIGTERM)
self.fileobj.close()
self.pid.stdout.close()
self.pid.wait()
self.closed = True
class CompressionOutput(object):
"""
Class to create a compressed file descriptor for writing. Functions like
a standard file descriptor such as from open().
"""
def __init__(self, path, mode, argv, level, inline):
self.argv = argv
self.level = level
self.inline = inline
if not inline:
self.fileobj = open(os.path.splitext(path)[0], mode)
self.fd = self.fileobj.fileno()
else:
self.fileobj = open(path, 'w')
if level:
if "gpg" in argv[0]:
argv += ['-z%d' % level]
else:
argv += ['-%d' % level]
LOG.debug("* Executing: %s", subprocess.list2cmdline(argv))
self.stderr = TemporaryFile()
self.pid = subprocess.Popen(argv,
stdin=subprocess.PIPE,
stdout=self.fileobj.fileno(),
stderr=self.stderr)
self.fd = self.pid.stdin.fileno()
self.name = path
self.closed = False
def fileno(self):
return self.fd
def write(self, data):
return os.write(self.fd, data)
def close(self):
self.closed = True
if not self.inline:
argv = list(self.argv)
if self.level:
if "gpg" in argv[0]:
argv += ['-z%d' % self.level, '-']
else:
argv += ['-%d' % self.level, '-']
self.fileobj.close()
self.fileobj = open(self.fileobj.name, 'r')
cmp_f = open(self.name, 'w')
LOG.debug("Running %r < %r[%d] > %r[%d]",
argv, self.fileobj.name, self.fileobj.fileno(),
cmp_f.name, cmp_f.fileno())
pid = subprocess.Popen(args,
stdin=self.fileobj.fileno(),
stdout=cmp_f.fileno())
status = pid.wait()
os.unlink(self.fileobj.name)
else:
self.pid.stdin.close()
status = self.pid.wait()
stderr = self.stderr
stderr.flush()
stderr.seek(0)
try:
if status != 0:
for line in stderr:
if not line.strip(): continue
LOG.error("%s: %s", self.argv[0], line.rstrip())
raise IOError(errno.EPIPE,
"Compression program '%s' exited with status %d" %
(self.argv[0], status))
else:
for line in stderr:
if not line.strip(): continue
LOG.info("%s: %s", self.argv[0], line.rstrip())
finally:
stderr.close()
def stream_info(path, method=None, level=None):
"""
Determine compression command, and compressed path based on original path
and compression method. If method is not passed, or level is 0 the
original path is returned.
Arguments:
path -- Path to file to compress/decompress
method -- Compression method (i.e. 'gzip', 'bzip2', 'pbzip2', 'lzop')
level -- Compression level (0-9)
"""
if not method or level == 0:
return path
argv, ext = lookup_compression(method)
if not argv:
raise IOError("Unknown compression method '%s'" % argv[0])
if not path.endswith(ext):
path += ext
return argv, path
def _parse_args(value):
"""Convert a cmdline string to a list"""
if isinstance(value, unicode):
value = value.encode('utf8')
return shlex.split(value)
def open_stream(path,
mode,
method=None,
level=None,
inline=True,
extra_args=None):
"""
Opens a compressed data stream, and returns a file descriptor type object
that acts much like os.open() does. If no method is passed, or the
compression level is 0, simply returns a file descriptor from open().
Arguments:
mode -- File access mode (i.e. 'r' or 'w')
method -- Compression method (i.e. 'gzip', 'bzip2', 'pbzip2', 'lzop')
level -- Compression level
inline -- Boolean whether to compress inline, or after the file is written.
"""
if not method or method == 'none' or level == 0:
return open(path, mode)
else:
argv, path = stream_info(path, method)
if extra_args:
argv += _parse_args(extra_args)
if mode == 'r':
return CompressionInput(path, mode, argv=argv)
elif mode == 'w':
return CompressionOutput(path, mode, argv=argv, level=level,
inline=inline)
else:
raise IOError("invalid mode: %s" % mode)
```
#### File: holland.lib.common/tests/test_archive.py
```python
import os
import shutil
import tempfile
from nose.tools import *
from holland.lib.archive import *
def setup_func():
global tmpdir
tmpdir = tempfile.mkdtemp()
def teardown_func():
global tmpdir
shutil.rmtree(tmpdir)
@with_setup(setup_func, teardown_func)
def test_dir_archive():
global tmpdir
axv = DirArchive(os.path.join(tmpdir, 'dir'))
name_list = []
for num in xrange(1, 16):
fd, filename = tempfile.mkstemp(dir=tmpdir)
os.close(fd)
basename = os.path.basename(filename)
axv.add_file(filename, basename)
name_list.append(basename)
for name in axv.list():
ok_(name in name_list)
@with_setup(setup_func, teardown_func)
def test_tar_archive():
global tmpdir
axv = TarArchive(os.path.join(tmpdir, 'tar'))
name_list = []
for num in xrange(1, 16):
fd, filename = tempfile.mkstemp(dir=tmpdir)
os.close(fd)
basename = os.path.basename(filename)
axv.add_file(filename, basename)
name_list.append(basename)
for name in axv.list():
ok_(name in name_list)
@with_setup(setup_func, teardown_func)
def test_zip_archive():
global tmpdir
axv = ZipArchive(os.path.join(tmpdir, 'zip'))
name_list = []
for num in xrange(1, 16):
fd, filename = tempfile.mkstemp(dir=tmpdir)
os.close(fd)
basename = os.path.basename(filename)
axv.add_file(filename, basename)
name_list.append(basename)
for name in axv.list():
ok_(name in name_list)
```
#### File: holland/tests/deprecrated_test_sysutils_helper.py
```python
import sys, os
import platform
import unittest
import tempfile
import string
import random
import logging
import logging.handlers as handlers
import md5
from shutil import rmtree
# FIXME: This used to test holland.helpers which was
# a collection of utility methods. These have
# since been forked off into various plugins
# or merged into holland.core.util. This
# should be updated to test holland.core.util
# and any other tests added to the appropriate
# plugin egg's test suite.
#class TestSysUtilsHelper(unittest.TestCase):
# hack: disabling test until I fix
# many of these functions have been shuffled around
# into individual plugins and need merged into those
# test cases
class Test(object):
"""
A test class for testing the sysutils helper
"""
def setUp(self):
self.log = logging.getLogger('holland')
file = logging.FileHandler(filename='/dev/null')
self.log.addHandler(file)
def test_ensure_dir(self):
# No arguments
self.assertRaises(TypeError, h.ensure_dir);
# Directory that already exists
self.assertEqual(h.ensure_dir('/tmp'), True)
# File that already exists
self.assertEqual(h.ensure_dir('/dev/null'), True)
# Directory that does not exist
self.assertEqual(h.ensure_dir('/tmp/testdir'), True)
# Directory that cannot be created
self.assertRaises(OSError, h.ensure_dir, '/dev/null/dir')
# Cleanup
os.rmdir('/tmp/testdir')
def test_protected_path(self):
# file
fd,file_path = tempfile.mkstemp(prefix='holland-test-')
safe_path = h.protected_path(file_path)
expected_path = "%s.0" % file_path
self.assertEquals(safe_path == expected_path, True)
# dir
dir_path = tempfile.mkdtemp(prefix='holland-test-')
safe_path = h.protected_path(dir_path)
expected_path = "%s.0" % dir_path
self.assertEquals(safe_path == expected_path, True)
# clean up
os.remove(file_path)
rmtree(dir_path)
def test_get_compression_stream(self):
for c_mode in ['gzip', 'bzip2']:
fd,file_path = tempfile.mkstemp(prefix='holland-test-')
dir_path = tempfile.mkdtemp(prefix='holland-test-dir')
file_path = os.path.realpath(file_path)
os.remove(file_path)
dir_path = os.path.realpath(dir_path)
data = ''
for i in xrange(1024**2):
data = data + random.choice(string.letters)
stream = h.get_compression_stream(output_path=file_path, mode=c_mode)
stream.write(data)
stream.close()
new_file_path = h.decompress_path(
source_path=file_path, dest_dir=dir_path, mode=c_mode
)
f = open(new_file_path, 'r')
a = md5.new(f.read()).digest()
b = md5.new(data).digest()
self.assertEqual(a == b, True)
f.close()
# clean up
os.remove(new_file_path)
rmtree(dir_path)
def test_compress_path(self):
# Test to see if a file can be gzipped and ungzipped
# (and it returns the same md5sum)
fd,file_path = tempfile.mkstemp(prefix='holland-test-')
dir_path = tempfile.mkdtemp(prefix='holland-test-dir')
file_path = os.path.realpath(file_path)
dir_path = os.path.realpath(dir_path)
# Create and compress the file
handle = os.fdopen(fd, 'w')
for i in xrange(1024**2):
handle.write(random.choice(string.letters))
handle.close()
comp_path = h.compress_path(
source_path = file_path, dest_dir = dir_path,
remove_source = False, mode = 'gzip'
)
self.assertEqual(comp_path != None, True)
# Uncompress the file and compare to original
uncomp_path = h.decompress_path(
source_path = comp_path, dest_dir = dir_path,
remove_source = False, mode = 'gzip'
)
self.assertEqual(uncomp_path != None, True)
original_file = file(file_path)
uncompressed_file = file(uncomp_path)
a = md5.new(original_file.read()).digest()
b = md5.new(uncompressed_file.read()).digest()
self.assertEqual(a == b, True)
# Platform-specific tests
# FIX ME:
# Tests are incomplete and have not been tested on Linux platform
def test_mount_info(self):
self.assertRaises(TypeError, h.mount_info)
if platform.system() != 'Linux':
print "Skipping Test For This Platform (%s)" % platform.system()
return False
def test_which(self):
# No arguments given
self.assertRaises(TypeError, h.which)
if platform.system() == 'Windows':
print "Skipping Test For This Platform (%s)" % platform.system()
return False
# Common utility test
self.assertEqual(h.which('ls'), '/bin/ls')
# Not found test
self.assertRaises(OSError, h.which, 'notacommand')
# FIX ME: Incomplete Test
def test_relpath(self):
# No arguments given
self.assertRaises(TypeError, h.relpath)
if platform.system() == 'Windows':
print "Skipping Test For This Platform (%s)" % platform.system()
return False
# Same Path
self.assertEqual(h.relpath('test', 'test'), '')
# Empty Path
self.assertEqual(h.relpath('', ''), '')
# Sub-Path
self.assertEqual(h.relpath('/tmp/test', '/test'), None)
# End of platform-specific tests
def test_format_bytes(self):
# No arguments given
self.assertRaises(TypeError, h.format_bytes)
# 0 bytes
self.assertEqual(h.format_bytes(0), '0.00B')
# 1b
self.assertEqual(h.format_bytes(1), '1.00B')
# 1KiB
self.assertEqual(h.format_bytes(1024), '1.00KiB')
# Remaing test for other units.
# Note the + 2 since we ran the '1b' and '1KiB' tests above
# and these were taken from the array in the original function
units = ['MiB','GiB','TiB','PiB','EiB','ZiB','YiB']
for unit in units:
power = units.index(unit) + 2
self.assertEqual(h.format_bytes(1024**power),
'1.00' + unit)
# Negative Bytes
self.assertRaises(ArithmeticError, h.format_bytes, -1);
def tearDown(self):
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSysUtilsHelper))
return suite
if __name__ == '__main__':
unittest.main()
unittest.TextTestRunner(verbosity=3).run(suite())
``` |
{
"source": "jkoelker/investing",
"score": 3
} |
#### File: picloud/magicformula/predictng.py
```python
import argparse
import sys
import eoddata
import numpy as np
import pandas as pd
import twitter
EXCLUDED_SECTORS = ('Finance - Savings and Loans',
'Closed-End Investment Bond Funds',
'Banks and Finance')
EXCLUDED_INDUSTRIES = ('Power Generation', # Utilites
'Electric Utilities: Central',
'Savings Institutions', # Finantial
'Accident &Health Insurance',
'Finance Companies',
'Finance/Investors Services',
'Commercial Banks',
'Closed-End Fund - Foreign',
'Closed-End Fund - Equity',
'Water Supply',
'Finance: Consumer Services',
'Major Banks',
'Investment Bankers/Brokers/Service',
'Diversified Financial Services',
'Savings & Loans',
'Credit Services',
'Diversified Investments',
'Financial Services',
'Banks',
'Regional - Pacific Banks',
'Regional - Mid-Atlantic Banks',
'Real Estate Investment Trusts',
'Investment Managers',
'Life Insurance',
'Property-Casualty Insurers',
'Closed-End Fund - Debt',
'Specialty Insurers',
'Oil & Gas Production', # Oil and gass
'Integrated oil Companies',
'Oil Refining/Marketing',
'Oil/Gas Transmission',
'Oilfield Services/Equipment',
'Independent Oil & Gas',
'Natural Gas Distribution')
def publish_to_twitter(df, prefix='MF', api=None, **kwargs):
if api is None:
api = twitter.Api(**kwargs)
msg = ' '.join(['$%s' % s for s in df.T.index])
msg = '%s: %s' % (prefix, msg)
if len(msg) > 140:
return publish_to_twitter(df[:-1], prefix, api, **kwargs)
return api.PostUpdate(msg)
def rank_stocks(df):
df['roc_rank'] = df['roc'].rank(method='max', ascending=0)
df['yield_rank'] = df['yield'].rank(method='max', ascending=0)
df['pe_rank'] = df['pe'].rank(method='min', ascending=1)
return df.sort_index(by=['pe_rank', 'roc_rank'],
ascending=[1, 1])
def get_stocks(eod_kwargs):
client = eoddata.Client(**eod_kwargs)
fundamentals = {}
for exchange in ('NYSE', 'NASDAQ'):
fundamentals.update(client.fundamentals(exchange))
df = pd.DataFrame(fundamentals).T
for col in ('market_cap', 'pt_b', 'ebitda', 'pe', 'yield'):
df[col] = df[col].astype(np.float64)
df = df[df['market_cap'] >= 50000000]
df = df[df['pe'] > 5]
df = df[df['pe'] < 25]
df = df[df['yield'] > 6]
df = df[df['sector'].map(lambda x: x not in EXCLUDED_SECTORS)]
df = df[df['industry'].map(lambda x: x not in EXCLUDED_INDUSTRIES)]
df = df[df['description'].map(lambda x: 'energy' not in x.lower())]
df = df[df['description'].map(lambda x: 'financ' not in x.lower())]
df = df[df['description'].map(lambda x: 'invest' not in x.lower())]
df = df[df['description'].map(lambda x: 'bank' not in x.lower())]
df = df[df['description'].map(lambda x: 'banc' not in x.lower())]
df = df[df['description'].map(lambda x: 'equity' not in x.lower())]
df = df[df['description'].map(lambda x: not x.lower().endswith(' ads'))]
df = df[df['description'].map(lambda x: not x.lower().endswith(' adr'))]
df = df[df['description'].map(lambda x: not x.lower().endswith(' s.a.'))]
df['assets'] = df['market_cap'] / df['pt_b']
df['roc'] = df['ebitda'] / df['assets']
df = df[df['roc'] > 0]
return df
def predict(num_stocks, eod_kwargs, twitter_kwargs):
stocks = get_stocks(eod_kwargs)
rank = rank_stocks(stocks)
return publish_to_twitter(rank[:num_stocks].T, **twitter_kwargs)
def main():
description = 'Run MagicFormula Prediction'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-k', '--consumer-key',
required=True,
help='Twitter application consumer key')
parser.add_argument('-s', '--consumer-secret',
required=True,
help='Twitter application consumer secret')
parser.add_argument('-K', '--access-token-key',
required=True,
help='Twitter User access token key')
parser.add_argument('-S', '--access-token-secret',
required=True,
help='Twitter User access token secret')
parser.add_argument('-n', '--num_stocks',
default=15,
type=int,
help='Number of stocks to publish')
parser.add_argument('-u', '--user',
required=True,
help='EOD Data User')
parser.add_argument('-p', '--password',
required=True,
help='EOD Data password')
args = parser.parse_args()
eod_kwargs = {'username': args.user,
'password': args.password}
twitter_kwargs = {'consumer_key': args.consumer_key,
'consumer_secret': args.consumer_secret,
'access_token_key': args.access_token_key,
'access_token_secret': args.access_token_secret}
if predict(args.num_stocks, eod_kwargs, twitter_kwargs):
return 0
return 1
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "jkoelker/powerlinex-segment-weechat-remote",
"score": 3
} |
#### File: jkoelker/powerlinex-segment-weechat-remote/hotlist2jsonfile.py
```python
import json
import os
import weechat
SCRIPT_NAME = 'hotlist2jsonfile'
SCRIPT_AUTHOR = '<NAME>'
SCRIPT_VERSION = '0.0.1'
SCRIPT_LICENSE = 'GPL'
SCRIPT_DESC = 'hotlist2jsonfile: writeout the hotlist to a file as json'
SETTINGS = {'output_file': '%h/hotlist.json'}
def write_file(data):
filename = weechat.config_get_plugin('output_file')
if not filename:
return
weechat_dir = weechat.info_get('weechat_dir', '')
filename = os.path.expanduser(filename.replace('%h', weechat_dir))
with open(filename, 'w') as f:
f.write(json.dumps(data))
def hotlist_changed(data, signal, signal_data):
hotlist = weechat.infolist_get('hotlist', '', '')
data = []
while weechat.infolist_next(hotlist):
priority = weechat.infolist_integer(hotlist, 'priority')
plugin_name = weechat.infolist_string(hotlist, 'plugin_name')
buffer_name = weechat.infolist_string(hotlist, 'buffer_name')
buffer_number = weechat.infolist_integer(hotlist, 'buffer_number')
low_messages = weechat.infolist_integer(hotlist, 'count_00')
channel_messages = weechat.infolist_integer(hotlist, 'count_01')
private_messages = weechat.infolist_integer(hotlist, 'count_02')
highlight_messages = weechat.infolist_integer(hotlist, 'count_03')
buffer_pointer = weechat.infolist_pointer(hotlist, 'buffer_pointer')
short_name = weechat.buffer_get_string(buffer_pointer, 'short_name')
data.append({'priority': priority,
'plugin_name': plugin_name,
'buffer_name': buffer_name,
'buffer_number': buffer_number,
'low_messages': low_messages,
'channel_messages': channel_messages,
'private_messages': private_messages,
'highlight_messages': highlight_messages,
0: low_messages,
1: channel_messages,
2: private_messages,
3: highlight_messages,
'short_name': short_name})
weechat.infolist_free(hotlist)
write_file({'hotlist': data})
return weechat.WEECHAT_RC_OK
if __name__ == '__main__':
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,
SCRIPT_LICENSE, SCRIPT_DESC, '', ''):
for opt, val in SETTINGS.iteritems():
if not weechat.config_is_set_plugin(opt):
weechat.config_set_plugin(opt, val)
weechat.hook_signal('hotlist_changed', 'hotlist_changed', '')
# vim:set shiftwidth=4 tabstop=4 softtabstop=4 expandtab textwidth=80:
``` |
{
"source": "jkoelker/python-tradeking",
"score": 2
} |
#### File: python-tradeking/tradeking/api.py
```python
import urllib.parse
import requests_oauthlib as roauth
import pandas as pd
from tradeking import utils
BASE_URL = 'https://api.tradeking.com/v1'
_DATE_KEYS = ('date', 'datetime', 'divexdate', 'divpaydt', 'timestamp',
'pr_date', 'wk52hidate', 'wk52lodate', 'xdate')
_FLOAT_KEYS = ('ask', 'bid', 'chg', 'cl', 'div', 'dollar_value', 'eps',
'hi', 'iad', 'idelta', 'igamma', 'imp_volatility', 'irho',
'itheta', 'ivega', 'last', 'lo', 'opn', 'opt_val', 'pchg',
'pcls', 'pe', 'phi', 'plo', 'popn', 'pr_adp_100', 'pr_adp_200',
'pr_adp_50', 'prbook', 'prchg', 'strikeprice', 'volatility12',
'vwap', 'wk52hi', 'wk52lo', 'yield')
_INT_KEYS = ('asksz', 'basis', 'bidsz', 'bidtick', 'days_to_expiration',
'incr_vl', 'openinterest', 'pr_openinterest', 'prem_mult', 'pvol',
'sho', 'tr_num', 'vl', 'xday', 'xmonth', 'xyear')
def _quotes_to_df(quotes):
if not isinstance(quotes, list):
quotes = [quotes]
df = pd.DataFrame.from_records(quotes, index='symbol')
for col in df.keys().intersection(_DATE_KEYS):
kwargs = {}
if col == 'timestamp':
kwargs['unit'] = 's'
try:
df[col] = pd.to_datetime(df[col], **kwargs)
except ValueError:
pass
for col in df.keys().intersection(_INT_KEYS):
cleaned = df[col].str.replace(r'[$,%]', '')
df[col] = cleaned.astype('int', errors='ignore')
for col in df.keys().intersection(_FLOAT_KEYS):
cleaned = df[col].str.replace(r'[$,%]', '')
df[col] = cleaned.astype('float', errors='ignore')
return df
# TODO(jkoelker) Would be nice to do a proper DSL
class OptionQuery(object):
FIELDS = ('strikeprice', 'xdate', 'xmonth', 'xyear', 'put_call', 'unique')
OPS = {'<': 'lt', 'lt': 'lt',
'>': 'gt', 'gt': 'gt',
'>=': 'gte', 'gte': 'gte',
'<=': 'lte', 'lte': 'lte',
'=': 'eq', '==': 'eq', 'eq': 'eq'}
def __init__(self, query):
if isinstance(query, str):
query = [query]
self._query = []
for part in query:
field, op, value = part.split()
field = field.lower()
if field not in self.FIELDS or op not in self.OPS:
continue
if field == 'xdate':
value = pd.to_datetime(value).strftime('%Y%m%d')
self._query.append((field, self.OPS[op], value))
def __str__(self):
return ' AND '.join(['%s-%s:%s' % (field, op, value)
for field, op, value in self._query])
class API(object):
def __init__(self, consumer_key, consumer_secret,
oauth_token, oauth_secret):
self._api = roauth.OAuth1Session(client_key=consumer_key,
client_secret=consumer_secret,
resource_owner_key=oauth_token,
resource_owner_secret=oauth_secret)
def join(self, *paths, **kwargs):
if len(paths) == 1:
paths = paths[0]
if kwargs.get('clean', True):
paths = [p.rstrip('/') for p in paths]
return '/'.join(paths)
def request(self, method, url, format='json', decode=True, **kwargs):
if format:
url = '.'.join((url, format))
r = self._api.request(method, url, **kwargs)
if decode:
r = r.json()
return r
def get(self, url, format='json', decode=True, **kwargs):
return self.request('GET', url=url, format=format, decode=decode,
**kwargs)
def post(self, url, format='json', decode=True, **kwargs):
return self.request('POST', url=url, format=format, decode=decode,
**kwargs)
class Account(object):
def __init__(self, api, account_id):
self._api = api
self.account_id = account_id
def _get(self, what=None, **kwargs):
params = [BASE_URL, 'accounts', self.account_id]
if what is not None:
params.append(what)
path = self._api.join(params)
return self._api.get(path, **kwargs)
def _balances(self, **kwargs):
return self._get('balances', **kwargs)
def _history(self, date_range='all', transactions='all', **kwargs):
params = {'range': date_range, 'transactions': transactions}
return self._get('history', params=params, **kwargs)
def _holdings(self, **kwargs):
return self._get('holdings', **kwargs)
def _orders(self, **kwargs):
return self._get('orders', **kwargs)
@property
def balances(self):
r = self._balances()
return r['response']['accountbalance']
def history(self, date_range='all', transactions='all'):
r = self._history(date_range=date_range, transactions=transactions)
return r['response']['transactions']['transaction']
@property
def holdings(self):
r = self._holdings()
return r['response']['accountholdings']['holding']
# TODO(jkoelker)
def order(self, order, preview=True):
pass
@property
def orders(self):
r = self._orders()
return r['response']['orderstatus']
class News(object):
def __init__(self, api):
self._api = api
def _article(self, article_id, **kwargs):
path = self._api.join(BASE_URL, 'market', 'news', article_id)
return self._api.get(path, **kwargs)
def _search(self, keywords=None, symbols=None, maxhits=None,
startdate=None, enddate=None, **kwargs):
if not keywords and not symbols:
raise ValueError('Either keywords or symbols are required')
data = {}
if keywords:
if isinstance(keywords, str):
keywords = [keywords]
data['keywords'] = ','.join(keywords)
if symbols:
if isinstance(symbols, str):
symbols = [symbols]
data['symbols'] = ','.join(symbols)
if maxhits:
data['maxhits'] = maxhits
# TODO(jkoelker) calculate enddate to be now()
if (not startdate and enddate) or (not enddate and startdate):
raise ValueError('Both startdate and endate are required if one '
'is specified')
if startdate and enddate:
data['startdate'] = startdate
data['enddate'] = enddate
path = self._api.join(BASE_URL, 'market', 'news', 'search')
return self._api.post(path, data=data, **kwargs)
def article(self, article_id):
r = self._article(article_id=article_id)
return r['response']['article']
def search(self, keywords=None, symbols=None, maxhits=None, startdate=None,
enddate=None):
r = self._search(keywords=keywords, symbols=symbols, maxhits=maxhits,
startdate=startdate, enddate=enddate)
return r['response']['articles']['article']
class Options(object):
def __init__(self, api, market):
self._api = api
self._market = market
symbol = staticmethod(utils.option_symbol)
symbols = staticmethod(utils.option_symbols)
decode = staticmethod(utils.parse_option_symbol)
def _expirations(self, symbol, **kwargs):
params = {'symbol': symbol}
path = self._api.join(BASE_URL, 'market', 'options', 'expirations')
return self._api.get(path, params=params, **kwargs)
def _search(self, symbol, query, fields=None, query_is_prepared=False,
**kwargs):
if not isinstance(query, OptionQuery) and not query_is_prepared:
query = OptionQuery(query)
data = {'symbol': symbol, 'query': query}
if fields is not None:
data['fids'] = ','.join(fields)
path = self._api.join(BASE_URL, 'market', 'options', 'search')
return self._api.post(path, data=data, **kwargs)
def _strikes(self, symbol, **kwargs):
params = {'symbol': symbol}
path = self._api.join(BASE_URL, 'market', 'options', 'strikes')
return self._api.get(path, params=params, **kwargs)
def expirations(self, symbol):
r = self._expirations(symbol=symbol)
expirations = r['response']['expirationdates']['date']
return pd.to_datetime(pd.Series(expirations))
def search(self, symbol, query, fields=None):
r = self._search(symbol=symbol, query=query, fields=fields)
return _quotes_to_df(r['response']['quotes']['quote'])
def strikes(self, symbol):
r = self._strikes(symbol=symbol)
strikes = r['response']['prices']['price']
return pd.Series(strikes, dtype=float)
def quote(self, symbol, strikes=None, expirations=None, calls=True,
puts=True, fields=None):
if strikes is None:
strikes = self.strikes(symbol)
if expirations is None:
expirations = self.expirations(symbol)
symbols = utils.option_symbols(symbol, expirations, strikes, calls,
puts)
return self._market.quotes(symbols=symbols, fields=fields)
class Market(object):
def __init__(self, api):
self._api = api
self.news = News(self._api)
self.options = Options(self._api, self)
def _clock(self, **kwargs):
path = self._api.join(BASE_URL, 'market', 'clock')
return self._api.get(path, **kwargs)
def _quotes(self, symbols, fields=None, **kwargs):
if isinstance(symbols, (list, tuple)):
symbols = ','.join(symbols)
params = {'symbols': symbols}
if fields is not None:
params['fids'] = ','.join(fields)
path = self._api.join(BASE_URL, 'market', 'ext', 'quotes')
return self._api.post(path, data=params, **kwargs)
def _toplist(self, list_type='toppctgainers', **kwargs):
path = self._api.join(BASE_URL, 'market', 'toplists', list_type)
return self._api.get(path, **kwargs)
@property
def clock(self):
r = self._clock()
r = r['response']
del r['@id']
return r
def quotes(self, symbols, fields=None):
r = self._quotes(symbols=symbols, fields=fields)
return _quotes_to_df(r['response']['quotes']['quote'])
def toplist(self, list_type='toppctgainers'):
r = self._toplist(list_type=list_type)
return _quotes_to_df(r['response']['quotes']['quote'])
# TODO(jkoelker) market/timesales
# TODO(jkoelker) market/quotes (iterator)
class TradeKing(object):
def __init__(self, consumer_key, consumer_secret,
oauth_token, oauth_secret):
self._api = API(consumer_key=consumer_key,
consumer_secret=consumer_secret,
oauth_token=oauth_token,
oauth_secret=oauth_secret)
self.market = Market(self._api)
def _accounts(self, **kwargs):
path = urllib.parse.urljoin(BASE_URL, 'accounts')
return self._api.get(path, **kwargs)
def account(self, account_id):
return Account(self._api, account_id)
# TODO(jkoelker) member/profile
# TODO(jkoelker) utility/status
# TODO(jkoelker) utility/version
# TODO(jkoelker) utility/version
# TODO(jkoelker) watchlists
```
#### File: python-tradeking/tradeking/orders.py
```python
import functools
from lxml import etree
BUY_TO_COVER = '5'
OPTION_CALL = 'OC'
OPTION_PUT = 'OP'
OPEN = 'O'
CLOSE = 'C'
STOCK = 'CS'
OPTION = 'OPT'
BUY = '1'
SELL = '2'
SELL_SHORT = '5'
DAY = '0'
GTC = '1'
MOC = '2'
MARKET = '1'
LIMIT = '2'
STOP = '3'
STOP_LIMIT = '4'
TRAILING_STOP = 'P'
PRICE = '0'
BASIS = '1'
def Order(account, security_type, security, quantity, time_in_force=GTC,
order_type=MARKET, side=BUY, trailing_stop_offset=None,
trailing_stop_offset_type=PRICE, trailing_stop_peg_type='1'):
fixml = etree.Element("FIXML",
xmlns="http://www.fixprotocol.org/FIXML-5-0-SP2")
order = etree.Element("Order",
TmInForce=str(time_in_force),
Typ=str(order_type),
Side=str(side),
Acct=str(account))
instrument = etree.Element("Instrmt",
SecTyp=str(security_type),
Sym=str(security))
order_quantity = etree.Element("OrdQty",
Qty=str(quantity))
if trailing_stop_offset is not None:
order.set('ExecInst' 'a')
peg_instruction = etree.Element('PegInstr',
OfstTyp=str(trailing_stop_offset_type),
PegPxType=str(trailing_stop_peg_type),
OfstVal=str(trailing_stop_offset))
order.append(peg_instruction)
order.append(instrument)
order.append(order_quantity)
fixml.append(order)
return fixml
Buy = functools.partial(Order, side=BUY)
Sell = functools.partial(Order, side=SELL)
Short = functools.partial(Order, side=SELL_SHORT)
#<FIXML xmlns="http://www.fixprotocol.org/FIXML-5-0-SP2">
# <Order TmInForce="0" Typ="1" Side="1" Acct="12345678">
# <Instrmt SecTyp="CS" Sym="F"/>
# <OrdQty Qty="1"/>
# </Order>
#</FIXML>
```
#### File: python-tradeking/tradeking/utils.py
```python
import itertools
import time
import pandas as pd
CALL = 'C'
PUT = 'P'
LONG = 'L'
SHORT = 'S'
class Price(int):
BASE = 1000.0
def __new__(cls, value=0):
return int.__new__(cls, cls.encode(value))
def __str__(self):
return self.__repr__()
def __repr__(self):
return self._decode().__repr__()
@classmethod
def encode(cls, value):
return int(value * cls.BASE)
@classmethod
def decode(cls, value):
return float(value) / cls.BASE
def _decode(self):
return self.decode(self.real)
def option_symbol(underlying, expiration, call_put, strike):
'''Format an option symbol from its component parts.'''
call_put = call_put.upper()
if call_put not in (CALL, PUT):
raise ValueError("call_put value not one of ('%s', '%s'): %s" %
(CALL, PUT, call_put))
expiration = pd.to_datetime(expiration).strftime('%y%m%d')
strike = str(Price.encode(strike)).rstrip('L')
strike = ('0' * (8 - len(strike))) + strike
return '%s%s%s%s' % (underlying, expiration, call_put, strike)
def option_symbols(underlying, expirations, strikes, calls=True, puts=True):
'''Generate a list of option symbols for expirations and strikes.'''
if not calls and not puts:
raise ValueError('Either calls or puts must be true')
call_put = ''
if calls:
call_put = call_put + CALL
if puts:
call_put = call_put + PUT
return [option_symbol(*args) for args in
itertools.product([underlying], expirations, call_put, strikes)]
def parse_option_symbol(symbol):
'''
Parse an option symbol into its component parts.
returns (Underlying, Expiration, C/P, strike)
'''
strike = Price.decode(symbol[-8:])
call_put = symbol[-9:-8].upper()
expiration = pd.to_datetime(symbol[-15:-9])
underlying = symbol[:-15].upper()
return underlying, expiration, call_put, strike
#
# © 2011 <NAME>, MIT License
#
class cached_property(object):
'''
Decorator for read-only properties evaluated only once within TTL period.
It can be used to created a cached property like this::
import random
# the class containing the property must be a new-style class
class MyClass(object):
# create property whose value is cached for ten minutes
@cached_property(ttl=600)
def randint(self):
# will only be evaluated every 10 min. at maximum.
return random.randint(0, 100)
The value is cached in the '_cache' attribute of the object instance that
has the property getter method wrapped by this decorator. The '_cache'
attribute value is a dictionary which has a key for every property of the
object which is wrapped by this decorator. Each entry in the cache is
created only when the property is accessed for the first time and is a
two-element tuple with the last computed property value and the last time
it was updated in seconds since the epoch.
The default time-to-live (TTL) is 300 seconds (5 minutes). Set the TTL to
zero for the cached value to never expire.
To expire a cached property value manually just do::
del instance._cache[<property name>]
'''
def __init__(self, ttl=300):
self.ttl = ttl
def __call__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
return self
def __get__(self, inst, owner):
now = time.time()
try:
value, last_update = inst._cache[self.__name__]
if self.ttl > 0 and now - last_update > self.ttl:
raise AttributeError
except (KeyError, AttributeError):
value = self.fget(inst)
try:
cache = inst._cache
except AttributeError:
cache = inst._cache = {}
cache[self.__name__] = (value, now)
return value
``` |
{
"source": "jkoelker/quark",
"score": 2
} |
#### File: quark/api/auth.py
```python
import webob.dec
import webob.exc
from neutron import context
from neutron.openstack.common import log as logging
from neutron import wsgi
LOG = logging.getLogger(__name__)
class NoAuthMiddleware(wsgi.Middleware):
"""Return a fake token if one isn't specified."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if 'X-Auth-Token' not in req.headers:
user_id = req.headers.get('X-Auth-User', 'admin')
project_id = req.headers.get('X-Auth-Project-Id', 'admin')
res = webob.Response()
res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id)
res.content_type = 'text/plain'
res.status = '204'
return res
token = req.headers['X-Auth-Token']
user_id, _sep, project_id = token.partition(':')
project_id = project_id or user_id
ctx = context.Context(user_id,
project_id,
is_admin=True)
req.environ['neutron.context'] = ctx
return self.application
```
#### File: quark/db/api.py
```python
import datetime
import inspect
from neutron.openstack.common import log as logging
from neutron.openstack.common import timeutils
from neutron.openstack.common import uuidutils
from sqlalchemy import event
from sqlalchemy import func as sql_func
from sqlalchemy import and_, orm, or_
from quark.db import models
from quark import network_strategy
STRATEGY = network_strategy.STRATEGY
LOG = logging.getLogger(__name__)
ONE = "one"
ALL = "all"
# NOTE(jkoelker) init event listener that will ensure id is filled in
# on object creation (prior to commit).
def _perhaps_generate_id(target, args, kwargs):
if hasattr(target, 'id') and target.id is None:
target.id = uuidutils.generate_uuid()
# NOTE(jkoelker) Register the event on all models that have ids
for _name, klass in inspect.getmembers(models, inspect.isclass):
if klass is models.HasId:
continue
if models.HasId in klass.mro():
event.listen(klass, "init", _perhaps_generate_id)
def _listify(filters):
for key in ["name", "network_id", "id", "device_id", "tenant_id",
"mac_address", "shared", "version"]:
if key in filters:
if not filters[key]:
continue
listified = filters[key]
if not isinstance(listified, list):
listified = [listified]
filters[key] = listified
def _model_query(context, model, filters, fields=None):
filters = filters or {}
model_filters = []
if filters.get("name"):
model_filters.append(model.name.in_(filters["name"]))
if filters.get("network_id"):
model_filters.append(model.network_id.in_(filters["network_id"]))
if filters.get("mac_address"):
model_filters.append(model.mac_address.in_(filters["mac_address"]))
if filters.get("id"):
model_filters.append(model.id.in_(filters["id"]))
if filters.get("reuse_after"):
reuse_after = filters["reuse_after"]
reuse = (timeutils.utcnow() -
datetime.timedelta(seconds=reuse_after))
model_filters.append(model.deallocated_at <= reuse)
if filters.get("subnet_id"):
model_filters.append(model.subnet_id ==
filters["subnet_id"])
if filters.get("deallocated"):
model_filters.append(model.deallocated == filters["deallocated"])
if filters.get("_deallocated") is not None:
if filters.get("_deallocated"):
model_filters.append(model._deallocated == 1)
else:
model_filters.append(model._deallocated != 1)
if filters.get("address"):
model_filters.append(model.address == filters["address"])
if filters.get("version"):
model_filters.append(model.version.in_(filters["version"]))
if filters.get("ip_version"):
model_filters.append(model.ip_version == filters["ip_version"])
if filters.get("ip_address"):
model_filters.append(model.address == int(filters["ip_address"]))
if filters.get("mac_address_range_id"):
model_filters.append(model.mac_address_range_id ==
filters["mac_address_range_id"])
if filters.get("cidr"):
model_filters.append(model.cidr == filters["cidr"])
# Inject the tenant id if none is set. We don't need unqualified queries.
# This works even when a non-shared, other-tenant owned network is passed
# in because the authZ checks that happen in Neutron above us yank it back
# out of the result set.
if not filters and not context.is_admin:
filters["tenant_id"] = [context.tenant_id]
if filters.get("tenant_id"):
model_filters.append(model.tenant_id.in_(filters["tenant_id"]))
return model_filters
def scoped(f):
def wrapped(*args, **kwargs):
scope = None
if "scope" in kwargs:
scope = kwargs.pop("scope")
if scope not in [None, ALL, ONE]:
raise Exception("Invalid scope")
_listify(kwargs)
res = f(*args, **kwargs)
if not res:
return
if "order_by" in kwargs:
res = res.order_by(kwargs["order_by"])
if scope == ALL:
if isinstance(res, list):
return res
return res.all()
elif scope == ONE:
if isinstance(res, list):
return res[0]
return res.first()
return res
return wrapped
@scoped
def port_find(context, **filters):
query = context.session.query(models.Port).\
options(orm.joinedload(models.Port.ip_addresses))
model_filters = _model_query(context, models.Port, filters)
if filters.get("ip_address_id"):
model_filters.append(models.Port.ip_addresses.any(
models.IPAddress.id.in_(filters["ip_address_id"])))
if filters.get("device_id"):
model_filters.append(models.Port.device_id.in_(filters["device_id"]))
return query.filter(*model_filters)
def port_count_all(context, **filters):
query = context.session.query(sql_func.count(models.Port.id))
model_filters = _model_query(context, models.Port, filters)
return query.filter(*model_filters).scalar()
def port_create(context, **port_dict):
port = models.Port()
port.update(port_dict)
port["tenant_id"] = context.tenant_id
if "addresses" in port_dict:
port["ip_addresses"].extend(port_dict["addresses"])
context.session.add(port)
return port
def port_update(context, port, **kwargs):
if "addresses" in kwargs:
port["ip_addresses"] = kwargs.pop("addresses")
port.update(kwargs)
context.session.add(port)
return port
def port_delete(context, port):
context.session.delete(port)
def ip_address_update(context, address, **kwargs):
address.update(kwargs)
context.session.add(address)
return address
def ip_address_create(context, **address_dict):
ip_address = models.IPAddress()
address = address_dict.pop("address")
ip_address.update(address_dict)
ip_address["address"] = int(address)
ip_address["address_readable"] = str(address)
ip_address["tenant_id"] = context.tenant_id
ip_address["_deallocated"] = 0
ip_address["allocated_at"] = timeutils.utcnow()
context.session.add(ip_address)
return ip_address
@scoped
def ip_address_find(context, lock_mode=False, **filters):
query = context.session.query(models.IPAddress)
ip_shared = filters.pop("shared", None)
if ip_shared is not None:
cnt = sql_func.count(models.port_ip_association_table.c.port_id)
stmt = context.session.query(models.IPAddress,
cnt.label("ports_count"))
if lock_mode:
stmt = stmt.with_lockmode("update")
stmt = stmt.outerjoin(models.port_ip_association_table)
stmt = stmt.group_by(models.IPAddress).subquery()
query = query.outerjoin(stmt, stmt.c.id == models.IPAddress.id)
#!@# HACK(amir): replace once attributes are configured in ip address
# extension correctly
if "True" in ip_shared:
query = query.filter(stmt.c.ports_count > 1)
else:
query = query.filter(stmt.c.ports_count <= 1)
model_filters = _model_query(context, models.IPAddress, filters)
if filters.get("device_id"):
model_filters.append(models.IPAddress.ports.any(
models.Port.device_id.in_(filters["device_id"])))
return query.filter(*model_filters)
@scoped
def mac_address_find(context, lock_mode=False, **filters):
query = context.session.query(models.MacAddress)
if lock_mode:
query.with_lockmode("update")
model_filters = _model_query(context, models.MacAddress, filters)
return query.filter(*model_filters)
def mac_address_range_find_allocation_counts(context, address=None):
query = context.session.query(models.MacAddressRange,
sql_func.count(models.MacAddress.address).
label("count")).with_lockmode("update")
query = query.outerjoin(models.MacAddress)
query = query.group_by(models.MacAddressRange)
query = query.order_by("count DESC")
if address:
query = query.filter(models.MacAddressRange.last_address >= address)
query = query.filter(models.MacAddressRange.first_address <= address)
return query
@scoped
def mac_address_range_find(context, **filters):
query = context.session.query(models.MacAddressRange)
model_filters = _model_query(context, models.MacAddressRange, filters)
return query.filter(*model_filters)
def mac_address_range_create(context, **range_dict):
new_range = models.MacAddressRange()
new_range.update(range_dict)
context.session.add(new_range)
return new_range
def mac_address_range_delete(context, mac_address_range):
context.session.delete(mac_address_range)
def mac_address_update(context, mac, **kwargs):
mac.update(kwargs)
context.session.add(mac)
return mac
def mac_address_create(context, **mac_dict):
mac_address = models.MacAddress()
mac_address.update(mac_dict)
mac_address["tenant_id"] = context.tenant_id
mac_address["deallocated"] = False
mac_address["deallocated_at"] = None
context.session.add(mac_address)
return mac_address
@scoped
def network_find(context, fields=None, **filters):
ids = []
defaults = []
if "id" in filters:
ids, defaults = STRATEGY.split_network_ids(context, filters["id"])
if ids:
filters["id"] = ids
else:
filters.pop("id")
if "shared" in filters:
if True in filters["shared"]:
defaults = STRATEGY.get_assignable_networks(context)
if ids:
defaults = [net for net in ids if net in defaults]
filters.pop("id")
if not defaults:
return []
filters.pop("shared")
return _network_find(context, fields, defaults=defaults, **filters)
def _network_find(context, fields, defaults=None, **filters):
query = context.session.query(models.Network)
model_filters = _model_query(context, models.Network, filters, query)
if defaults:
if filters:
query = query.filter(or_(models.Network.id.in_(defaults),
and_(*model_filters)))
else:
query = query.filter(models.Network.id.in_(defaults))
else:
query = query.filter(*model_filters)
return query
def network_find_all(context, fields=None, **filters):
return network_find(context, fields, **filters).all()
def network_create(context, **network):
new_net = models.Network()
new_net.update(network)
context.session.add(new_net)
return new_net
def network_update(context, network, **kwargs):
network.update(kwargs)
context.session.add(network)
return network
def network_count_all(context):
query = context.session.query(sql_func.count(models.Network.id))
return query.filter(models.Network.tenant_id == context.tenant_id).\
scalar()
def network_delete(context, network):
context.session.delete(network)
def subnet_find_allocation_counts(context, net_id, **filters):
query = context.session.query(models.Subnet,
sql_func.count(models.IPAddress.address).
label("count")).with_lockmode('update')
query = query.outerjoin(models.Subnet.allocated_ips)
query = query.group_by(models.Subnet)
query = query.order_by("count DESC")
query = query.filter(models.Subnet.network_id == net_id)
if "ip_version" in filters:
query = query.filter(models.Subnet.ip_version == filters["ip_version"])
return query
@scoped
def subnet_find(context, **filters):
if "shared" in filters and True in filters["shared"]:
return []
query = context.session.query(models.Subnet).\
options(orm.joinedload(models.Subnet.routes))
model_filters = _model_query(context, models.Subnet, filters)
return query.filter(*model_filters)
def subnet_count_all(context, **filters):
query = context.session.query(sql_func.count(models.Subnet.id))
if filters.get("network_id"):
query = query.filter(
models.Subnet.network_id == filters["network_id"])
query.filter(models.Subnet.tenant_id == context.tenant_id)
return query.scalar()
def subnet_delete(context, subnet):
context.session.delete(subnet)
def subnet_create(context, **subnet_dict):
subnet = models.Subnet()
subnet.update(subnet_dict)
subnet["tenant_id"] = context.tenant_id
context.session.add(subnet)
return subnet
def subnet_update(context, subnet, **kwargs):
subnet.update(kwargs)
context.session.add(subnet)
return subnet
@scoped
def route_find(context, fields=None, **filters):
query = context.session.query(models.Route)
model_filters = _model_query(context, models.Route, filters)
return query.filter(*model_filters)
def route_create(context, **route_dict):
new_route = models.Route()
new_route.update(route_dict)
new_route["tenant_id"] = context.tenant_id
context.session.add(new_route)
return new_route
def route_update(context, route, **kwargs):
route.update(kwargs)
context.session.add(route)
return route
def route_delete(context, route):
context.session.delete(route)
def dns_create(context, **dns_dict):
dns_nameserver = models.DNSNameserver()
ip = dns_dict.pop("ip")
dns_nameserver.update(dns_dict)
dns_nameserver["ip"] = int(ip)
dns_nameserver["tenant_id"] = context.tenant_id
context.session.add(dns_nameserver)
return dns_nameserver
def dns_delete(context, dns):
context.session.delete(dns)
@scoped
def security_group_find(context, **filters):
query = context.session.query(models.SecurityGroup).\
options(orm.joinedload(models.SecurityGroup.rules))
model_filters = _model_query(context, models.SecurityGroup, filters)
return query.filter(*model_filters)
def security_group_create(context, **sec_group_dict):
new_group = models.SecurityGroup()
new_group.update(sec_group_dict)
new_group["tenant_id"] = context.tenant_id
context.session.add(new_group)
return new_group
def security_group_update(context, group, **kwargs):
group.update(kwargs)
context.session.add(group)
return group
def security_group_delete(context, group):
context.session.delete(group)
@scoped
def security_group_rule_find(context, **filters):
query = context.session.query(models.SecurityGroupRule)
model_filters = _model_query(context, models.SecurityGroupRule, filters)
return query.filter(*model_filters)
def security_group_rule_create(context, **rule_dict):
new_rule = models.SecurityGroupRule()
new_rule.update(rule_dict)
new_rule.group_id = rule_dict['security_group_id']
new_rule.tenant_id = rule_dict['tenant_id']
context.session.add(new_rule)
return new_rule
def security_group_rule_delete(context, rule):
context.session.delete(rule)
def ip_policy_create(context, **ip_policy_dict):
new_policy = models.IPPolicy()
ranges = ip_policy_dict.pop("exclude")
for arange in ranges:
new_policy["exclude"].append(models.IPPolicyRange(
offset=arange["offset"],
length=arange["length"]))
new_policy.update(ip_policy_dict)
new_policy["tenant_id"] = context.tenant_id
context.session.add(new_policy)
return new_policy
@scoped
def ip_policy_find(context, **filters):
query = context.session.query(models.IPPolicy)
model_filters = _model_query(context, models.IPPolicy, filters)
return query.filter(*model_filters)
def ip_policy_update(context, ip_policy, **ip_policy_dict):
ranges = ip_policy_dict.pop("exclude", [])
if ranges:
ip_policy["exclude"] = []
for arange in ranges:
ip_policy["exclude"].append(models.IPPolicyRange(
offset=arange["offset"],
length=arange["length"]))
ip_policy.update(ip_policy_dict)
context.session.add(ip_policy)
return ip_policy
def ip_policy_delete(context, ip_policy):
context.session.delete(ip_policy)
```
#### File: quark/drivers/nvp_driver.py
```python
from oslo.config import cfg
import aiclib
from neutron.extensions import securitygroup as sg_ext
from neutron.openstack.common import log as logging
from quark.drivers import base
from quark import exceptions
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
nvp_opts = [
cfg.IntOpt('max_ports_per_switch',
default=0,
help=_('Maximum amount of NVP ports on an NVP lswitch')),
cfg.StrOpt('default_tz',
help=_('The default transport zone UUID')),
cfg.MultiStrOpt('controller_connection',
default=[],
help=_('NVP Controller connection string')),
cfg.IntOpt('max_rules_per_group',
default=30,
help=_('Maxiumum size of NVP SecurityRule list per group')),
cfg.IntOpt('max_rules_per_port',
default=30,
help=_('Maximum rules per NVP lport across all groups')),
]
physical_net_type_map = {
"stt": "stt",
"gre": "gre",
"flat": "bridge",
"bridge": "bridge",
"vlan": "bridge",
"local": "local"
}
CONF.register_opts(nvp_opts, "NVP")
def _tag_roll(tags):
return [{'scope': k, 'tag': v} for k, v in tags]
def _tag_unroll(tags):
return dict((t['scope'], t['tag']) for t in tags)
class NVPDriver(base.BaseDriver):
def __init__(self):
self.nvp_connections = []
self.conn_index = 0
self.limits = {'max_ports_per_switch': 0,
'max_rules_per_group': 0,
'max_rules_per_port': 0}
super(NVPDriver, self).__init__()
@classmethod
def get_name(klass):
return "NVP"
def load_config(self):
#NOTE(mdietz): What does default_tz actually mean?
# We don't have one default.
default_tz = CONF.NVP.default_tz
LOG.info("Loading NVP settings " + str(default_tz))
connections = CONF.NVP.controller_connection
self.limits.update({
'max_ports_per_switch': CONF.NVP.max_ports_per_switch,
'max_rules_per_group': CONF.NVP.max_rules_per_group,
'max_rules_per_port': CONF.NVP.max_rules_per_port})
LOG.info("Loading NVP settings " + str(connections))
for conn in connections:
(ip, port, user, pw, req_timeout,
http_timeout, retries, redirects) = conn.split(":")
self.nvp_connections.append(dict(ip_address=ip,
port=port,
username=user,
password=pw,
req_timeout=req_timeout,
http_timeout=http_timeout,
retries=retries,
redirects=redirects,
default_tz=default_tz))
def get_connection(self):
conn = self.nvp_connections[self.conn_index]
if "connection" not in conn:
scheme = conn["port"] == "443" and "https" or "http"
uri = "%s://%s:%s" % (scheme, conn["ip_address"], conn["port"])
user = conn['username']
passwd = conn['password']
conn["connection"] = aiclib.nvp.Connection(uri,
username=user,
password=<PASSWORD>)
return conn["connection"]
def create_network(self, context, network_name, tags=None,
network_id=None, **kwargs):
return self._lswitch_create(context, network_name, tags,
network_id, **kwargs)
def delete_network(self, context, network_id):
lswitches = self._lswitches_for_network(context, network_id).results()
connection = self.get_connection()
for switch in lswitches["results"]:
LOG.debug("Deleting lswitch %s" % switch["uuid"])
connection.lswitch(switch["uuid"]).delete()
def _collect_lswitch_info(self, lswitch, get_status):
info = {
'port_isolation_enabled': lswitch['port_isolation_enabled'],
'display_name': lswitch['display_name'],
'uuid': lswitch['uuid'],
'transport_zones': lswitch['transport_zones'],
}
info.update(_tag_unroll(lswitch['tags']))
if get_status:
status = lswitch.pop('_relations')['LogicalSwitchStatus']
info.update({
'lport_stats': {
'fabric_up': status['lport_fabric_up_count'],
'admin_up': status['lport_admin_up_count'],
'link_up': status['lport_link_up_count'],
'count': status['lport_count'],
}, 'fabric_status': status['fabric_status'],
})
return info
def diag_network(self, context, network_id, get_status):
switches = self._lswitch_status_query(context, network_id)['results']
return {'logical_switches': [self._collect_lswitch_info(s, get_status)
for s in switches]}
def create_port(self, context, network_id, port_id,
status=True, security_groups=[], allowed_pairs=[]):
tenant_id = context.tenant_id
lswitch = self._create_or_choose_lswitch(context, network_id)
connection = self.get_connection()
port = connection.lswitch_port(lswitch)
port.admin_status_enabled(status)
port.allowed_address_pairs(allowed_pairs)
nvp_group_ids = self._get_security_groups_for_port(context,
security_groups)
port.security_profiles(nvp_group_ids)
tags = [dict(tag=network_id, scope="neutron_net_id"),
dict(tag=port_id, scope="neutron_port_id"),
dict(tag=tenant_id, scope="os_tid")]
LOG.debug("Creating port on switch %s" % lswitch)
port.tags(tags)
res = port.create()
res["lswitch"] = lswitch
return res
def update_port(self, context, port_id, status=True,
security_groups=[], allowed_pairs=[]):
connection = self.get_connection()
lswitch_id = self._lswitch_from_port(context, port_id)
port = connection.lswitch_port(lswitch_id, port_id)
nvp_group_ids = self._get_security_groups_for_port(context,
security_groups)
if nvp_group_ids:
port.security_profiles(nvp_group_ids)
if allowed_pairs:
port.allowed_address_pairs(allowed_pairs)
port.admin_status_enabled(status)
return port.update()
def delete_port(self, context, port_id, **kwargs):
connection = self.get_connection()
lswitch_uuid = kwargs.get('lswitch_uuid', None)
if not lswitch_uuid:
lswitch_uuid = self._lswitch_from_port(context, port_id)
LOG.debug("Deleting port %s from lswitch %s" % (port_id, lswitch_uuid))
connection.lswitch_port(lswitch_uuid, port_id).delete()
def _collect_lport_info(self, lport, get_status):
info = {
'mirror_targets': lport['mirror_targets'],
'display_name': lport['display_name'],
'portno': lport['portno'],
'allowed_address_pairs': lport['allowed_address_pairs'],
'nvp_security_groups': lport['security_profiles'],
'uuid': lport['uuid'],
'admin_status_enabled': lport['admin_status_enabled'],
'queue_uuid': lport['queue_uuid'],
}
if get_status:
stats = lport['statistics']
status = lport['status']
lswitch = {
'uuid': status['lswitch']['uuid'],
'display_name': status['lswitch']['display_name'],
}
lswitch.update(_tag_unroll(status['lswitch']['tags']))
info.update({
'statistics': {
'recieved': {
'packets': stats['rx_packets'],
'bytes': stats['rx_bytes'],
'errors': stats['rx_errors']
},
'transmitted': {
'packets': stats['tx_packets'],
'bytes': stats['tx_bytes'],
'errors': stats['tx_errors']
},
},
'status': {
'link_status_up': status['link_status_up'],
'admin_status_up': status['admin_status_up'],
'fabric_status_up': status['fabric_status_up'],
},
'lswitch': lswitch,
})
info.update(_tag_unroll(lport['tags']))
return info
def diag_port(self, context, port_id, get_status=False):
connection = self.get_connection()
lswitch_uuid = self._lswitch_from_port(context, port_id)
lswitch_port = connection.lswitch_port(lswitch_uuid, port_id)
query = lswitch_port.query()
query.relations("LogicalPortAttachment")
results = query.results()
if results['result_count'] == 0:
return {'lport': "Logical port not found."}
config = results['results'][0]
relations = config.pop('_relations')
config['attachment'] = relations['LogicalPortAttachment']['type']
if get_status:
config['status'] = lswitch_port.status()
config['statistics'] = lswitch_port.statistics()
return {'lport': self._collect_lport_info(config, get_status)}
def _get_network_details(self, context, network_id, switches):
name, phys_net, phys_type, segment_id = None, None, None, None
for res in switches["results"]:
name = res["display_name"]
for zone in res["transport_zones"]:
phys_net = zone["zone_uuid"]
phys_type = zone["transport_type"]
if "binding_config" in zone:
binding = zone["binding_config"]
segment_id = binding["vlan_translation"][0]["transport"]
break
return dict(network_name=name, phys_net=phys_net,
phys_type=phys_type, segment_id=segment_id)
return {}
def create_security_group(self, context, group_name, **group):
tenant_id = context.tenant_id
connection = self.get_connection()
group_id = group.get('group_id')
profile = connection.securityprofile()
if group_name:
profile.display_name(group_name)
ingress_rules = group.get('port_ingress_rules', [])
egress_rules = group.get('port_egress_rules', [])
if (len(ingress_rules) + len(egress_rules) >
self.limits['max_rules_per_group']):
raise exceptions.DriverLimitReached(limit="rules per group")
if egress_rules:
profile.port_egress_rules(egress_rules)
if ingress_rules:
profile.port_ingress_rules(ingress_rules)
tags = [dict(tag=group_id, scope="neutron_group_id"),
dict(tag=tenant_id, scope="os_tid")]
LOG.debug("Creating security profile %s" % group_name)
profile.tags(tags)
return profile.create()
def delete_security_group(self, context, group_id):
guuid = self._get_security_group_id(context, group_id)
connection = self.get_connection()
LOG.debug("Deleting security profile %s" % group_id)
connection.securityprofile(guuid).delete()
def update_security_group(self, context, group_id, **group):
query = self._get_security_group(context, group_id)
connection = self.get_connection()
profile = connection.securityprofile(query.get('uuid'))
ingress_rules = group.get('port_ingress_rules',
query.get('logical_port_ingress_rules'))
egress_rules = group.get('port_egress_rules',
query.get('logical_port_egress_rules'))
if (len(ingress_rules) + len(egress_rules) >
self.limits['max_rules_per_group']):
raise exceptions.DriverLimitReached(limit="rules per group")
if group.get('name', None):
profile.display_name(group['name'])
if group.get('port_ingress_rules', None) is not None:
profile.port_ingress_rules(ingress_rules)
if group.get('port_egress_rules', None) is not None:
profile.port_egress_rules(egress_rules)
return profile.update()
def _update_security_group_rules(self, context, group_id, rule, operation,
checks):
groupd = self._get_security_group(context, group_id)
direction, secrule = self._get_security_group_rule_object(context,
rule)
rulelist = groupd['logical_port_%s_rules' % direction]
for check in checks:
if not check(secrule, rulelist):
raise checks[check]
getattr(rulelist, operation)(secrule)
LOG.debug("%s rule on security group %s" % (operation, groupd['uuid']))
group = {'port_%s_rules' % direction: rulelist}
return self.update_security_group(context, group_id, **group)
def create_security_group_rule(self, context, group_id, rule):
return self._update_security_group_rules(
context, group_id, rule, 'append',
{(lambda x, y: x not in y):
sg_ext.SecurityGroupRuleExists(id=group_id),
(lambda x, y:
self._check_rule_count_per_port(context, group_id) <
self.limits['max_rules_per_port']):
exceptions.DriverLimitReached(limit="rules per port")})
def delete_security_group_rule(self, context, group_id, rule):
return self._update_security_group_rules(
context, group_id, rule, 'remove',
{(lambda x, y: x in y):
sg_ext.SecurityGroupRuleNotFound(id="with group_id %s" %
group_id)})
def _create_or_choose_lswitch(self, context, network_id):
switches = self._lswitch_status_query(context, network_id)
switch = self._lswitch_select_open(context, network_id=network_id,
switches=switches)
if switch:
LOG.debug("Found open switch %s" % switch)
return switch
switch_details = self._get_network_details(context, network_id,
switches)
if not switch_details:
raise exceptions.BadNVPState(net_id=network_id)
return self._lswitch_create(context, network_id=network_id,
**switch_details)
def _lswitch_status_query(self, context, network_id):
query = self._lswitches_for_network(context, network_id)
query.relations("LogicalSwitchStatus")
results = query.results()
LOG.debug("Query results: %s" % results)
return results
def _lswitch_select_open(self, context, switches=None, **kwargs):
"""Selects an open lswitch for a network. Note that it does not select
the most full switch, but merely one with ports available.
"""
if switches is not None:
for res in switches["results"]:
count = res["_relations"]["LogicalSwitchStatus"]["lport_count"]
if self.limits['max_ports_per_switch'] == 0 or \
count < self.limits['max_ports_per_switch']:
return res["uuid"]
return None
def _lswitch_delete(self, context, lswitch_uuid):
connection = self.get_connection()
LOG.debug("Deleting lswitch %s" % lswitch_uuid)
connection.lswitch(lswitch_uuid).delete()
def _config_provider_attrs(self, connection, switch, phys_net,
net_type, segment_id):
if not (phys_net or net_type):
return
if not phys_net and net_type:
raise exceptions.ProvidernetParamError(
msg="provider:physical_network parameter required")
if phys_net and not net_type:
raise exceptions.ProvidernetParamError(
msg="provider:network_type parameter required")
if not net_type in ("bridge", "vlan") and segment_id:
raise exceptions.SegmentIdUnsupported(net_type=net_type)
if net_type == "vlan" and not segment_id:
raise exceptions.SegmentIdRequired(net_type=net_type)
phys_type = physical_net_type_map.get(net_type.lower())
if not phys_type:
raise exceptions.InvalidPhysicalNetworkType(net_type=net_type)
tz_query = connection.transportzone(phys_net).query()
transport_zone = tz_query.results()
if transport_zone["result_count"] == 0:
raise exceptions.PhysicalNetworkNotFound(phys_net=phys_net)
switch.transport_zone(zone_uuid=phys_net,
transport_type=phys_type,
vlan_id=segment_id)
def _lswitch_create(self, context, network_name=None, tags=None,
network_id=None, phys_net=None,
phys_type=None, segment_id=None,
**kwargs):
# NOTE(mdietz): physical net uuid maps to the transport zone uuid
# physical net type maps to the transport/connector type
# if type maps to 'bridge', then segment_id, which maps
# to vlan_id, is conditionally provided
LOG.debug("Creating new lswitch for %s network %s" %
(context.tenant_id, network_name))
tenant_id = context.tenant_id
connection = self.get_connection()
switch = connection.lswitch()
if network_name is None:
network_name = network_id
switch.display_name(network_name)
tags = tags or []
tags.append({"tag": tenant_id, "scope": "os_tid"})
if network_id:
tags.append({"tag": network_id, "scope": "neutron_net_id"})
switch.tags(tags)
LOG.debug("Creating lswitch for network %s" % network_id)
# When connecting to public or snet, we need switches that are
# connected to their respective public/private transport zones
# using a "bridge" connector. Public uses no VLAN, whereas private
# uses VLAN 122 in netdev. Probably need this to be configurable
self._config_provider_attrs(connection, switch, phys_net, phys_type,
segment_id)
res = switch.create()
return res["uuid"]
def _lswitches_for_network(self, context, network_id):
connection = self.get_connection()
query = connection.lswitch().query()
query.tagscopes(['os_tid', 'neutron_net_id'])
query.tags([context.tenant_id, network_id])
return query
def _lswitch_from_port(self, context, port_id):
connection = self.get_connection()
query = connection.lswitch_port("*").query()
query.relations("LogicalSwitchConfig")
query.uuid(port_id)
port = query.results()
if port['result_count'] > 1:
raise Exception("Could not identify lswitch for port %s" % port_id)
if port['result_count'] < 1:
raise Exception("No lswitch found for port %s" % port_id)
return port['results'][0]["_relations"]["LogicalSwitchConfig"]["uuid"]
def _get_security_group(self, context, group_id):
connection = self.get_connection()
query = connection.securityprofile().query()
query.tagscopes(['os_tid', 'neutron_group_id'])
query.tags([context.tenant_id, group_id])
query = query.results()
if query['result_count'] != 1:
raise sg_ext.SecurityGroupNotFound(id=group_id)
return query['results'][0]
def _get_security_group_id(self, context, group_id):
return self._get_security_group(context, group_id)['uuid']
def _get_security_group_rule_object(self, context, rule):
ethertype = rule.get('ethertype', None)
rule_clone = {}
ip_prefix = rule.get('remote_ip_prefix', None)
if ip_prefix:
rule_clone['ip_prefix'] = ip_prefix
profile_uuid = rule.get('remote_group_id', None)
if profile_uuid:
rule_clone['profile_uuid'] = profile_uuid
for key in ['protocol', 'port_range_min', 'port_range_max']:
if rule.get(key):
rule_clone[key] = rule[key]
connection = self.get_connection()
secrule = connection.securityrule(ethertype, **rule_clone)
direction = rule.get('direction', '')
if direction not in ['ingress', 'egress']:
raise AttributeError(
"Direction not specified as 'ingress' or 'egress'.")
return (direction, secrule)
def _check_rule_count_per_port(self, context, group_id):
connection = self.get_connection()
ports = connection.lswitch_port("*").query().security_profile_uuid(
'=', self._get_security_group_id(
context, group_id)).results().get('results', [])
groups = (port.get('security_profiles', []) for port in ports)
return max([self._check_rule_count_for_groups(
context, (connection.securityprofile(gp).read() for gp in group))
for group in groups] or [0])
def _check_rule_count_for_groups(self, context, groups):
return sum(len(group['logical_port_ingress_rules']) +
len(group['logical_port_egress_rules'])
for group in groups)
def _get_security_groups_for_port(self, context, groups):
if (self._check_rule_count_for_groups(
context,
(self._get_security_group(context, g) for g in groups))
> self.limits['max_rules_per_port']):
raise exceptions.DriverLimitReached(limit="rules per port")
return [self._get_security_group(context, group)['uuid']
for group in groups]
```
#### File: quark/drivers/optimized_nvp_driver.py
```python
from neutron.openstack.common import log as logging
from quark.db import models
from quark.drivers.nvp_driver import NVPDriver
import sqlalchemy as sa
from sqlalchemy import orm
LOG = logging.getLogger(__name__)
class OptimizedNVPDriver(NVPDriver):
def delete_network(self, context, network_id):
lswitches = self._lswitches_for_network(context, network_id)
for switch in lswitches:
self._lswitch_delete(context, switch.nvp_id)
def create_port(self, context, network_id, port_id,
status=True, security_groups=[], allowed_pairs=[]):
nvp_port = super(OptimizedNVPDriver, self).\
create_port(context, network_id,
port_id, status=status,
security_groups=security_groups,
allowed_pairs=allowed_pairs)
switch_nvp_id = nvp_port["lswitch"]
# slightly inefficient for the sake of brevity. Lets the
# parent class do its thing then finds the switch that
# the port was created on for creating the association. Switch should
# be in the query cache so the subsequent lookup should be minimal,
# but this could be an easy optimization later if we're looking.
switch = self._lswitch_select_by_nvp_id(context, switch_nvp_id)
new_port = LSwitchPort(port_id=nvp_port["uuid"],
switch_id=switch.id)
context.session.add(new_port)
switch.port_count = switch.port_count + 1
return nvp_port
def update_port(self, context, port_id,
status=True, security_groups=[], allowed_pairs=[]):
nvp_port = super(OptimizedNVPDriver, self).\
update_port(context, port_id, status=status,
security_groups=security_groups,
allowed_pairs=allowed_pairs)
port = self._lport_select_by_id(context, port_id)
port.update(nvp_port)
def delete_port(self, context, port_id, lswitch_uuid=None):
port = self._lport_select_by_id(context, port_id)
switch = port.switch
super(OptimizedNVPDriver, self).\
delete_port(context, port_id, lswitch_uuid=switch.nvp_id)
context.session.delete(port)
switch.port_count = switch.port_count - 1
if switch.port_count == 0:
self._lswitch_delete(context, switch.nvp_id)
def create_security_group(self, context, group_name, **group):
nvp_group = super(OptimizedNVPDriver, self).create_security_group(
context, group_name, **group)
group_id = group.get('group_id')
profile = SecurityProfile(id=group_id, nvp_id=nvp_group['uuid'])
context.session.add(profile)
def delete_security_group(self, context, group_id):
super(OptimizedNVPDriver, self).\
delete_security_group(context, group_id)
group = self._query_security_group(context, group_id)
context.session.delete(group)
def _lport_select_by_id(self, context, port_id):
query = context.session.query(LSwitchPort)
query = query.filter(LSwitchPort.port_id == port_id)
return query.first()
def _lswitch_delete(self, context, lswitch_uuid):
switch = self._lswitch_select_by_nvp_id(context, lswitch_uuid)
super(OptimizedNVPDriver, self).\
_lswitch_delete(context, lswitch_uuid)
context.session.delete(switch)
def _lswitch_select_by_nvp_id(self, context, nvp_id):
switch = context.session.query(LSwitch).\
filter(LSwitch.nvp_id == nvp_id).\
first()
return switch
def _lswitch_select_first(self, context, network_id):
query = context.session.query(LSwitch)
query.filter(LSwitch.network_id == network_id)
return query.first()
def _lswitch_select_free(self, context, network_id):
query = context.session.query(LSwitch)
query = query.filter(LSwitch.port_count <
self.limits['max_ports_per_switch'])
query = query.filter(LSwitch.network_id == network_id)
switch = query.order_by(LSwitch.port_count).first()
return switch
def _lswitch_status_query(self, context, network_id):
"""Child implementation of lswitch_status_query.
Deliberately empty as we rely on _get_network_details to be more
efficient than we can be here.
"""
pass
def _lswitch_select_open(self, context, network_id=None, **kwargs):
if self.limits['max_ports_per_switch'] == 0:
switch = self._lswitch_select_first(context, network_id)
else:
switch = self._lswitch_select_free(context, network_id)
if switch:
return switch.nvp_id
LOG.debug("Could not find optimized switch")
def _get_network_details(self, context, network_id, switches):
name, phys_net, phys_type, segment_id = None, None, None, None
switch = self._lswitch_select_first(context, network_id)
if switch:
name = switch.display_name
phys_net = switch.transport_zone
phys_type = switch.transport_connector
segment_id = switch.segment_id
return dict(network_name=name, phys_net=phys_net,
phys_type=phys_type, segment_id=segment_id)
def _lswitch_create(self, context, network_name=None, tags=None,
network_id=None, **kwargs):
nvp_id = super(OptimizedNVPDriver, self).\
_lswitch_create(context, network_name, tags,
network_id, **kwargs)
return self._lswitch_create_optimized(context, network_name, nvp_id,
network_id, **kwargs).nvp_id
def _lswitch_create_optimized(self, context, network_name, nvp_id,
network_id, phys_net=None, phys_type=None,
segment_id=None):
new_switch = LSwitch(nvp_id=nvp_id, network_id=network_id,
port_count=0, transport_zone=phys_net,
transport_connector=phys_type,
display_name=network_name,
segment_id=segment_id)
context.session.add(new_switch)
return new_switch
def _lswitches_for_network(self, context, network_id):
switches = context.session.query(LSwitch).\
filter(LSwitch.network_id == network_id).\
all()
return switches
def _lswitch_from_port(self, context, port_id):
port = self._lport_select_by_id(context, port_id)
return port.switch.nvp_id
def _query_security_group(self, context, group_id):
return context.session.query(SecurityProfile).\
filter(SecurityProfile.id == group_id).first()
def _make_security_rule_dict(self, rule):
res = {"port_range_min": rule.get("port_range_min"),
"port_range_max": rule.get("port_range_max"),
"protocol": rule.get("protocol"),
"ip_prefix": rule.get("remote_ip_prefix"),
"group_id": rule.get("remote_group_id"),
"ethertype": rule.get("ethertype")}
for key, value in res.items():
if value is None:
res.pop(key)
return res
def _get_security_group(self, context, group_id):
group = context.session.query(models.SecurityGroup).\
filter(models.SecurityGroup.id == group_id).first()
rulelist = {'ingress': [], 'egress': []}
for rule in group.rules:
rulelist[rule.direction].append(
self._make_security_rule_dict(rule))
return {'uuid': self._query_security_group(context, group_id).nvp_id,
'logical_port_ingress_rules': rulelist['ingress'],
'logical_port_egress_rules': rulelist['egress']}
def _check_rule_count_per_port(self, context, group_id):
ports = context.session.query(models.SecurityGroup).filter(
models.SecurityGroup.id == group_id).first().get('ports', [])
groups = (set(group.id for group in port.get('security_groups', []))
for port in ports)
return max(self._check_rule_count_for_groups(
context, (self._get_security_group(context, id) for id in g))
for g in groups)
class LSwitchPort(models.BASEV2, models.HasId):
__tablename__ = "quark_nvp_driver_lswitchport"
port_id = sa.Column(sa.String(36), nullable=False)
switch_id = sa.Column(sa.String(36),
sa.ForeignKey("quark_nvp_driver_lswitch.id"),
nullable=False)
class LSwitch(models.BASEV2, models.HasId):
__tablename__ = "quark_nvp_driver_lswitch"
nvp_id = sa.Column(sa.String(36), nullable=False)
network_id = sa.Column(sa.String(36), nullable=False)
display_name = sa.Column(sa.String(255))
port_count = sa.Column(sa.Integer())
ports = orm.relationship(LSwitchPort, backref='switch')
transport_zone = sa.Column(sa.String(36))
transport_connector = sa.Column(sa.String(20))
segment_id = sa.Column(sa.Integer())
class QOS(models.BASEV2, models.HasId):
__tablename__ = "quark_nvp_driver_qos"
display_name = sa.Column(sa.String(255), nullable=False)
max_bandwidth_rate = sa.Column(sa.Integer(), nullable=False)
min_bandwidth_rate = sa.Column(sa.Integer(), nullable=False)
class SecurityProfile(models.BASEV2, models.HasId):
__tablename__ = "quark_nvp_driver_security_profile"
nvp_id = sa.Column(sa.String(36), nullable=False)
```
#### File: quark/quark/ipam.py
```python
import netaddr
from neutron.common import exceptions
from neutron.openstack.common import log as logging
from neutron.openstack.common.notifier import api as notifier_api
from neutron.openstack.common import timeutils
from oslo.config import cfg
from quark.db import api as db_api
from quark.db import models
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class QuarkIpam(object):
def allocate_mac_address(self, context, net_id, port_id, reuse_after,
mac_address=None):
if mac_address:
mac_address = netaddr.EUI(mac_address).value
with context.session.begin(subtransactions=True):
deallocated_mac = db_api.mac_address_find(
context, lock_mode=True, reuse_after=reuse_after,
scope=db_api.ONE, address=mac_address)
if deallocated_mac:
return db_api.mac_address_update(
context, deallocated_mac, deallocated=False,
deallocated_at=None)
with context.session.begin(subtransactions=True):
ranges = db_api.mac_address_range_find_allocation_counts(
context, address=mac_address)
for result in ranges:
rng, addr_count = result
last = rng["last_address"]
first = rng["first_address"]
if last - first <= addr_count:
continue
next_address = None
if mac_address:
next_address = mac_address
else:
address = True
while address:
next_address = rng["next_auto_assign_mac"]
rng["next_auto_assign_mac"] = next_address + 1
address = db_api.mac_address_find(
context, tenant_id=context.tenant_id,
scope=db_api.ONE, address=next_address)
address = db_api.mac_address_create(
context, address=next_address,
mac_address_range_id=rng["id"])
return address
raise exceptions.MacAddressGenerationFailure(net_id=net_id)
def attempt_to_reallocate_ip(self, context, net_id, port_id, reuse_after,
version=None, ip_address=None):
version = version or [4, 6]
elevated = context.elevated()
# We never want to take the chance of an infinite loop here. Instead,
# we'll clean up multiple bad IPs if we find them (assuming something
# is really wrong)
for times in xrange(3):
with context.session.begin(subtransactions=True):
address = db_api.ip_address_find(
elevated, network_id=net_id, reuse_after=reuse_after,
deallocated=True, scope=db_api.ONE, ip_address=ip_address,
lock_mode=True, version=version, order_by="address")
if address:
#NOTE(mdietz): We should always be in the CIDR but we've
# also said that before :-/
if address.get("subnet"):
cidr = netaddr.IPNetwork(address["subnet"]["cidr"])
addr = netaddr.IPAddress(address["address"],
version=cidr.version)
if addr in cidr:
updated_address = db_api.ip_address_update(
elevated, address, deallocated=False,
deallocated_at=None,
allocated_at=timeutils.utcnow())
return [updated_address]
else:
# Make sure we never find it again
context.session.delete(address)
continue
break
return []
def is_strategy_satisfied(self, ip_addresses):
return ip_addresses
def _iterate_until_available_ip(self, context, subnet, network_id,
ip_policy_rules):
address = True
while address:
next_ip_int = int(subnet["next_auto_assign_ip"])
next_ip = netaddr.IPAddress(next_ip_int)
if subnet["ip_version"] == 4:
next_ip = next_ip.ipv4()
subnet["next_auto_assign_ip"] = next_ip_int + 1
if ip_policy_rules and next_ip in ip_policy_rules:
continue
address = db_api.ip_address_find(
context, network_id=network_id, ip_address=next_ip,
tenant_id=context.tenant_id, scope=db_api.ONE)
ipnet = netaddr.IPNetwork(subnet["cidr"])
next_addr = netaddr.IPAddress(
subnet["next_auto_assign_ip"])
if ipnet.is_ipv4_mapped() or ipnet.version == 4:
next_addr = next_addr.ipv4()
return next_ip
def allocate_ip_address(self, context, net_id, port_id, reuse_after,
version=None, ip_address=None):
elevated = context.elevated()
if ip_address:
ip_address = netaddr.IPAddress(ip_address)
new_addresses = []
realloc_ips = self.attempt_to_reallocate_ip(context, net_id,
port_id, reuse_after,
version=None,
ip_address=None)
if self.is_strategy_satisfied(realloc_ips):
return realloc_ips
new_addresses.extend(realloc_ips)
with context.session.begin(subtransactions=True):
subnets = self._choose_available_subnet(
elevated, net_id, version, ip_address=ip_address,
reallocated_ips=realloc_ips)
for subnet in subnets:
ip_policy_rules = models.IPPolicy.get_ip_policy_rule_set(
subnet)
# Creating this IP for the first time
next_ip = None
if ip_address:
next_ip = ip_address
address = db_api.ip_address_find(
elevated, network_id=net_id, ip_address=next_ip,
tenant_id=elevated.tenant_id, scope=db_api.ONE)
if address:
raise exceptions.IpAddressGenerationFailure(
net_id=net_id)
else:
next_ip = self._iterate_until_available_ip(
elevated, subnet, net_id, ip_policy_rules)
context.session.add(subnet)
address = db_api.ip_address_create(
elevated, address=next_ip, subnet_id=subnet["id"],
version=subnet["ip_version"], network_id=net_id)
address["deallocated"] = 0
new_addresses.append(address)
for addr in new_addresses:
payload = dict(tenant_id=addr["tenant_id"],
ip_block_id=addr["subnet_id"],
ip_address=addr["address_readable"],
device_ids=[p["device_id"] for p in addr["ports"]],
created_at=addr["created_at"])
notifier_api.notify(context,
notifier_api.publisher_id("network"),
"ip_block.address.create",
notifier_api.CONF.default_notification_level,
payload)
return new_addresses
def _deallocate_ip_address(self, context, address):
address["deallocated"] = 1
payload = dict(tenant_id=address["tenant_id"],
ip_block_id=address["subnet_id"],
ip_address=address["address_readable"],
device_ids=[p["device_id"] for p in address["ports"]],
created_at=address["created_at"],
deleted_at=timeutils.utcnow())
notifier_api.notify(context,
notifier_api.publisher_id("network"),
"ip_block.address.delete",
notifier_api.CONF.default_notification_level,
payload)
def deallocate_ip_address(self, context, port, **kwargs):
with context.session.begin(subtransactions=True):
for addr in port["ip_addresses"]:
# Note: only deallocate ip if this is the only port mapped
if len(addr["ports"]) == 1:
self._deallocate_ip_address(context, addr)
port["ip_addresses"] = []
def deallocate_mac_address(self, context, address):
with context.session.begin(subtransactions=True):
mac = db_api.mac_address_find(context, address=address,
scope=db_api.ONE)
if not mac:
raise exceptions.NotFound(
message="No MAC address %s found" % netaddr.EUI(address))
db_api.mac_address_update(context, mac, deallocated=True,
deallocated_at=timeutils.utcnow())
def select_subnet(self, context, net_id, ip_address, **filters):
subnets = db_api.subnet_find_allocation_counts(context, net_id,
scope=db_api.ALL,
**filters)
for subnet, ips_in_subnet in subnets:
ipnet = netaddr.IPNetwork(subnet["cidr"])
if ip_address and ip_address not in ipnet:
continue
ip_policy_rules = None
if not ip_address:
ip_policy_rules = models.IPPolicy.get_ip_policy_rule_set(
subnet)
policy_size = ip_policy_rules.size if ip_policy_rules else 0
if ipnet.size > (ips_in_subnet + policy_size):
return subnet
class QuarkIpamANY(QuarkIpam):
@classmethod
def get_name(self):
return "ANY"
def _choose_available_subnet(self, context, net_id, version=None,
ip_address=None, reallocated_ips=None):
filters = {}
if version:
filters["ip_version"] = version
subnet = self.select_subnet(context, net_id, ip_address, **filters)
if subnet:
return [subnet]
raise exceptions.IpAddressGenerationFailure(net_id=net_id)
class QuarkIpamBOTH(QuarkIpam):
@classmethod
def get_name(self):
return "BOTH"
def is_strategy_satisfied(self, reallocated_ips):
req = [4, 6]
for ip in reallocated_ips:
if ip is not None:
req.remove(ip["version"])
if len(req) == 0:
return True
return False
def attempt_to_reallocate_ip(self, context, net_id, port_id,
reuse_after, version=None,
ip_address=None):
both_versions = []
with context.session.begin(subtransactions=True):
for ver in (4, 6):
address = super(QuarkIpamBOTH, self).attempt_to_reallocate_ip(
context, net_id, port_id, reuse_after, ver, ip_address)
both_versions.extend(address)
return both_versions
def _choose_available_subnet(self, context, net_id, version=None,
ip_address=None, reallocated_ips=None):
both_subnet_versions = []
need_versions = [4, 6]
for i in reallocated_ips:
if i["version"] in need_versions:
need_versions.remove(i["version"])
filters = {}
for ver in need_versions:
filters["ip_version"] = ver
sub = self.select_subnet(context, net_id, ip_address, **filters)
if sub:
both_subnet_versions.append(sub)
if not reallocated_ips and not both_subnet_versions:
raise exceptions.IpAddressGenerationFailure(net_id=net_id)
return both_subnet_versions
class QuarkIpamBOTHREQ(QuarkIpamBOTH):
@classmethod
def get_name(self):
return "BOTH_REQUIRED"
def _choose_available_subnet(self, context, net_id, version=None,
ip_address=None, reallocated_ips=None):
subnets = super(QuarkIpamBOTHREQ, self)._choose_available_subnet(
context, net_id, version, ip_address, reallocated_ips)
if len(reallocated_ips) + len(subnets) < 2:
raise exceptions.IpAddressGenerationFailure(net_id=net_id)
return subnets
class IpamRegistry(object):
def __init__(self):
self.strategies = {
QuarkIpamANY.get_name(): QuarkIpamANY(),
QuarkIpamBOTH.get_name(): QuarkIpamBOTH(),
QuarkIpamBOTHREQ.get_name(): QuarkIpamBOTHREQ()}
def is_valid_strategy(self, strategy_name):
if strategy_name in self.strategies:
return True
return False
def get_strategy(self, strategy_name):
if self.is_valid_strategy(strategy_name):
return self.strategies[strategy_name]
fallback = CONF.QUARK.default_ipam_strategy
LOG.warn("IPAM strategy %s not found, "
"using default %s" % (strategy_name, fallback))
return self.strategies[fallback]
IPAM_REGISTRY = IpamRegistry()
```
#### File: quark/plugin_modules/ports.py
```python
import netaddr
from neutron.common import exceptions
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron import quota
from oslo.config import cfg
from quark.db import api as db_api
from quark.drivers import registry
from quark import ipam
from quark import plugin_views as v
from quark import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def create_port(context, port):
"""Create a port
Create a port which is a connection point of a device (e.g., a VM
NIC) to attach to a L2 Neutron network.
: param context: neutron api request context
: param port: dictionary describing the port, with keys
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py. All keys will be populated.
"""
LOG.info("create_port for tenant %s" % context.tenant_id)
port_attrs = port["port"]
mac_address = utils.pop_param(port_attrs, "mac_address", None)
segment_id = utils.pop_param(port_attrs, "segment_id")
fixed_ips = utils.pop_param(port_attrs, "fixed_ips")
net_id = port_attrs["network_id"]
addresses = []
with context.session.begin():
port_id = uuidutils.generate_uuid()
net = db_api.network_find(context, id=net_id,
segment_id=segment_id, scope=db_api.ONE)
if not net:
# Maybe it's a tenant network
net = db_api.network_find(context, id=net_id, scope=db_api.ONE)
if not net:
raise exceptions.NetworkNotFound(net_id=net_id)
quota.QUOTAS.limit_check(
context, context.tenant_id,
ports_per_network=len(net.get('ports', [])) + 1)
ipam_driver = ipam.IPAM_REGISTRY.get_strategy(net["ipam_strategy"])
if fixed_ips:
for fixed_ip in fixed_ips:
subnet_id = fixed_ip.get("subnet_id")
ip_address = fixed_ip.get("ip_address")
if not (subnet_id and ip_address):
raise exceptions.BadRequest(
resource="fixed_ips",
msg="subnet_id and ip_address required")
addresses.extend(ipam_driver.allocate_ip_address(
context, net["id"], port_id, CONF.QUARK.ipam_reuse_after,
ip_address=ip_address))
else:
addresses.extend(ipam_driver.allocate_ip_address(
context, net["id"], port_id, CONF.QUARK.ipam_reuse_after))
group_ids, security_groups = v.make_security_group_list(
context, port["port"].pop("security_groups", None))
mac = ipam_driver.allocate_mac_address(context, net["id"], port_id,
CONF.QUARK.ipam_reuse_after,
mac_address=mac_address)
mac_address_string = str(netaddr.EUI(mac['address'],
dialect=netaddr.mac_unix))
address_pairs = [{'mac_address': mac_address_string,
'ip_address': address.get('address_readable', '')}
for address in addresses]
net_driver = registry.DRIVER_REGISTRY.get_driver(net["network_plugin"])
backend_port = net_driver.create_port(context, net["id"],
port_id=port_id,
security_groups=group_ids,
allowed_pairs=address_pairs)
port_attrs["network_id"] = net["id"]
port_attrs["id"] = port_id
port_attrs["security_groups"] = security_groups
LOG.info("Including extra plugin attrs: %s" % backend_port)
port_attrs.update(backend_port)
new_port = db_api.port_create(
context, addresses=addresses, mac_address=mac["address"],
backend_key=backend_port["uuid"], **port_attrs)
# Include any driver specific bits
return v._make_port_dict(new_port)
def update_port(context, id, port):
"""Update values of a port.
: param context: neutron api request context
: param id: UUID representing the port to update.
: param port: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for 'allow_put'
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py.
"""
LOG.info("update_port %s for tenant %s" % (id, context.tenant_id))
with context.session.begin():
port_db = db_api.port_find(context, id=id, scope=db_api.ONE)
if not port_db:
raise exceptions.PortNotFound(port_id=id)
address_pairs = []
fixed_ips = port["port"].pop("fixed_ips", None)
if fixed_ips:
ipam_driver = ipam.IPAM_REGISTRY.get_strategy(
port_db["network"]["ipam_strategy"])
ipam_driver.deallocate_ip_address(
context, port_db, ipam_reuse_after=CONF.QUARK.ipam_reuse_after)
addresses = []
for fixed_ip in fixed_ips:
subnet_id = fixed_ip.get("subnet_id")
ip_address = fixed_ip.get("ip_address")
if not (subnet_id and ip_address):
raise exceptions.BadRequest(
resource="fixed_ips",
msg="subnet_id and ip_address required")
# Note: we don't allow overlapping subnets, thus subnet_id is
# ignored.
addresses.append(ipam_driver.allocate_ip_address(
context, port_db["network_id"], id,
CONF.QUARK.ipam_reuse_after, ip_address=ip_address))
port["port"]["addresses"] = addresses
mac_address_string = str(netaddr.EUI(port_db.mac_address,
dialect=netaddr.mac_unix))
address_pairs = [{'mac_address': mac_address_string,
'ip_address':
address.get('address_readable', '')}
for address in addresses]
group_ids, security_groups = v.make_security_group_list(
context, port["port"].pop("security_groups", None))
net_driver = registry.DRIVER_REGISTRY.get_driver(
port_db.network["network_plugin"])
net_driver.update_port(context, port_id=port_db.backend_key,
security_groups=group_ids,
allowed_pairs=address_pairs)
port["port"]["security_groups"] = security_groups
port = db_api.port_update(context, port_db, **port["port"])
return v._make_port_dict(port)
def post_update_port(context, id, port):
LOG.info("post_update_port %s for tenant %s" % (id, context.tenant_id))
if not port.get("port"):
raise exceptions.BadRequest(resource="ports",
msg="Port body required")
with context.session.begin():
port_db = db_api.port_find(context, id=id, scope=db_api.ONE)
if not port_db:
raise exceptions.PortNotFound(port_id=id, net_id="")
port = port["port"]
if "fixed_ips" in port and port["fixed_ips"]:
for ip in port["fixed_ips"]:
address = None
ipam_driver = ipam.IPAM_REGISTRY.get_strategy(
port_db["network"]["ipam_strategy"])
if ip:
if "ip_id" in ip:
ip_id = ip["ip_id"]
address = db_api.ip_address_find(
context, id=ip_id, tenant_id=context.tenant_id,
scope=db_api.ONE)
elif "ip_address" in ip:
ip_address = ip["ip_address"]
net_address = netaddr.IPAddress(ip_address)
address = db_api.ip_address_find(
context, ip_address=net_address,
network_id=port_db["network_id"],
tenant_id=context.tenant_id, scope=db_api.ONE)
if not address:
address = ipam_driver.allocate_ip_address(
context, port_db["network_id"], id,
CONF.QUARK.ipam_reuse_after,
ip_address=ip_address)
else:
address = ipam_driver.allocate_ip_address(
context, port_db["network_id"], id,
CONF.QUARK.ipam_reuse_after)
address["deallocated"] = 0
already_contained = False
for port_address in port_db["ip_addresses"]:
if address["id"] == port_address["id"]:
already_contained = True
break
if not already_contained:
port_db["ip_addresses"].append(address)
return v._make_port_dict(port_db)
def get_port(context, id, fields=None):
"""Retrieve a port.
: param context: neutron api request context
: param id: UUID representing the port to fetch.
: param fields: a list of strings that are valid keys in a
port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
"""
LOG.info("get_port %s for tenant %s fields %s" %
(id, context.tenant_id, fields))
results = db_api.port_find(context, id=id, fields=fields,
scope=db_api.ONE)
if not results:
raise exceptions.PortNotFound(port_id=id, net_id='')
return v._make_port_dict(results)
def get_ports(context, filters=None, fields=None):
"""Retrieve a list of ports.
The contents of the list depends on the identity of the user
making the request (as indicated by the context) as well as any
filters.
: param context: neutron api request context
: param filters: a dictionary with keys that are valid keys for
a port as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictiontary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
: param fields: a list of strings that are valid keys in a
port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
"""
LOG.info("get_ports for tenant %s filters %s fields %s" %
(context.tenant_id, filters, fields))
if filters is None:
filters = {}
query = db_api.port_find(context, fields=fields, **filters)
return v._make_ports_list(query, fields)
def get_ports_count(context, filters=None):
"""Return the number of ports.
The result depends on the identity of the user making the request
(as indicated by the context) as well as any filters.
: param context: neutron api request context
: param filters: a dictionary with keys that are valid keys for
a network as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictiontary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
NOTE: this method is optional, as it was not part of the originally
defined plugin API.
"""
LOG.info("get_ports_count for tenant %s filters %s" %
(context.tenant_id, filters))
return db_api.port_count_all(context, **filters)
def delete_port(context, id):
"""Delete a port.
: param context: neutron api request context
: param id: UUID representing the port to delete.
"""
LOG.info("delete_port %s for tenant %s" %
(id, context.tenant_id))
port = db_api.port_find(context, id=id, scope=db_api.ONE)
if not port:
raise exceptions.PortNotFound(net_id=id)
with context.session.begin():
backend_key = port["backend_key"]
mac_address = netaddr.EUI(port["mac_address"]).value
ipam_driver = ipam.IPAM_REGISTRY.get_strategy(
port["network"]["ipam_strategy"])
ipam_driver.deallocate_mac_address(context, mac_address)
ipam_driver.deallocate_ip_address(
context, port, ipam_reuse_after=CONF.QUARK.ipam_reuse_after)
db_api.port_delete(context, port)
net_driver = registry.DRIVER_REGISTRY.get_driver(
port.network["network_plugin"])
net_driver.delete_port(context, backend_key)
def disassociate_port(context, id, ip_address_id):
"""Disassociates a port from an IP address.
: param context: neutron api request context
: param id: UUID representing the port to disassociate.
: param ip_address_id: UUID representing the IP address to
disassociate.
"""
LOG.info("disassociate_port %s for tenant %s ip_address_id %s" %
(id, context.tenant_id, ip_address_id))
with context.session.begin():
port = db_api.port_find(context, id=id, ip_address_id=[ip_address_id],
scope=db_api.ONE)
if not port:
raise exceptions.PortNotFound(port_id=id, net_id='')
the_address = [address for address in port["ip_addresses"]
if address["id"] == ip_address_id][0]
port["ip_addresses"] = [address for address in port["ip_addresses"]
if address.id != ip_address_id]
if len(the_address["ports"]) == 0:
the_address["deallocated"] = 1
return v._make_port_dict(port)
def _diag_port(context, port, fields):
p = v._make_port_dict(port)
net_driver = registry.DRIVER_REGISTRY.get_driver(
port.network["network_plugin"])
if 'config' in fields:
p.update(net_driver.diag_port(
context, port["backend_key"], get_status='status' in fields))
return p
def diagnose_port(context, id, fields):
if id == "*":
return {'ports': [_diag_port(context, port, fields) for
port in db_api.port_find(context).all()]}
db_port = db_api.port_find(context, id=id, scope=db_api.ONE)
if not db_port:
raise exceptions.PortNotFound(port_id=id, net_id='')
port = _diag_port(context, db_port, fields)
return {'ports': port}
```
#### File: tests/plugin_modules/test_ip_policies.py
```python
import contextlib
import mock
import netaddr
from neutron.common import exceptions
from quark import exceptions as quark_exceptions
from quark.tests import test_quark_plugin
class TestQuarkGetIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy):
db_mod = "quark.db.api"
with mock.patch("%s.ip_policy_find" % db_mod) as ip_policy_find:
ip_policy_find.return_value = ip_policy
yield
def test_get_ip_policy_not_found(self):
with self._stubs(None):
with self.assertRaises(quark_exceptions.IPPolicyNotFound):
self.plugin.get_ip_policy(self.context, 1)
def test_get_ip_policy(self):
ip_policy = dict(
id=1,
tenant_id=1,
name="foo",
subnets=[dict(id=1)],
networks=[dict(id=2)],
exclude=[dict(offset=1, length=256)])
with self._stubs(ip_policy):
resp = self.plugin.get_ip_policy(self.context, 1)
self.assertEqual(len(resp.keys()), 6)
self.assertEqual(resp["id"], 1)
self.assertEqual(resp["name"], "foo")
self.assertEqual(resp["subnet_ids"], [1])
self.assertEqual(resp["network_ids"], [2])
self.assertEqual(resp["exclude"], ip_policy["exclude"])
self.assertEqual(resp["tenant_id"], 1)
def test_get_ip_policies(self):
ip_policy = dict(
id=1,
tenant_id=1,
name="foo",
subnets=[dict(id=1)],
networks=[dict(id=2)],
exclude=[dict(offset=1, length=256)])
with self._stubs([ip_policy]):
resp = self.plugin.get_ip_policies(self.context)
self.assertEqual(len(resp), 1)
resp = resp[0]
self.assertEqual(len(resp.keys()), 6)
self.assertEqual(resp["id"], 1)
self.assertEqual(resp["subnet_ids"], [1])
self.assertEqual(resp["network_ids"], [2])
self.assertEqual(resp["exclude"], ip_policy["exclude"])
self.assertEqual(resp["name"], "foo")
self.assertEqual(resp["tenant_id"], 1)
class TestQuarkCreateIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy, subnet=None, net=None):
db_mod = "quark.db.api"
with contextlib.nested(
mock.patch("%s.subnet_find" % db_mod),
mock.patch("%s.network_find" % db_mod),
mock.patch("%s.ip_policy_create" % db_mod),
) as (subnet_find, net_find, ip_policy_create):
subnet_find.return_value = [subnet] if subnet else None
net_find.return_value = [net] if net else None
ip_policy_create.return_value = ip_policy
yield ip_policy_create
def test_create_ip_policy_invalid_body_missing_exclude(self):
with self._stubs(None):
with self.assertRaises(exceptions.BadRequest):
self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict()))
def test_create_ip_policy_invalid_body_missing_netsubnet(self):
with self._stubs(None):
with self.assertRaises(exceptions.BadRequest):
self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(exclude=["1.1.1.1/24"])))
def test_create_ip_policy_invalid_subnet(self):
with self._stubs(None):
with self.assertRaises(exceptions.SubnetNotFound):
self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(subnet_ids=[1],
exclude=["1.1.1.1/24"])))
def test_create_ip_policy_invalid_network(self):
with self._stubs(None):
with self.assertRaises(exceptions.NetworkNotFound):
self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(network_ids=[1],
exclude=["1.1.1.1/24"])))
def test_create_ip_policy_network_ip_policy_already_exists(self):
with self._stubs(None, net=dict(id=1, ip_policy=dict(id=2))):
with self.assertRaises(quark_exceptions.IPPolicyAlreadyExists):
self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(network_ids=[1],
exclude=["1.1.1.1/24"])))
def test_create_ip_policy_subnet_ip_policy_already_exists(self):
with self._stubs(None, subnet=dict(id=1, ip_policy=dict(id=2))):
with self.assertRaises(quark_exceptions.IPPolicyAlreadyExists):
self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(subnet_ids=[1],
exclude=["1.1.1.1/24"])))
def test_create_ip_policy_network(self):
ipp = dict(subnet_id=None, network_id=1,
exclude=[dict(address=int(netaddr.IPAddress("1.1.1.1")),
prefix=24)])
with self._stubs(ipp, net=dict(id=1, ip_policy=dict(id=2))):
with self.assertRaises(quark_exceptions.IPPolicyAlreadyExists):
resp = self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(network_ids=[1],
exclude=["1.1.1.1/24"])))
self.assertEqual(len(resp.keys()), 3)
self.assertIsNone(resp["subnet_ids"])
self.assertEqual(resp["network_ids"], 1)
self.assertEqual(resp["exclude"], [dict()])
def test_create_ip_policy_subnet(self):
ipp = dict(subnet_id=1, network_id=None,
exclude=[dict(address=int(netaddr.IPAddress("1.1.1.1")),
prefix=24)])
with self._stubs(ipp, subnet=dict(id=1, ip_policy=dict(id=2))):
with self.assertRaises(quark_exceptions.IPPolicyAlreadyExists):
resp = self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(subnet_ids=[1],
exclude=["1.1.1.1/24"])))
self.assertEqual(len(resp.keys()), 3)
self.assertEqual(resp["subnet_id"], 1)
self.assertIsNone(resp["network_id"])
self.assertEqual(resp["exclude"], ["1.1.1.1/24"])
def test_create_ip_policy(self):
ipp = dict(
subnets=[dict(id=1)],
networks=[],
id=1,
tenant_id=1,
exclude=[dict(offset=0, length=256)],
name="foo")
with self._stubs(ipp, subnet=dict(id=1, ip_policy=None)):
resp = self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(subnet_ids=[1],
exclude=[dict(offset=0, length=256)])))
self.assertEqual(len(resp.keys()), 6)
self.assertEqual(resp["subnet_ids"], [1])
self.assertEqual(resp["network_ids"], [])
self.assertEqual(resp["exclude"],
[dict(offset=0, length=256)])
self.assertEqual(resp["name"], "foo")
self.assertEqual(resp["tenant_id"], 1)
class TestQuarkUpdateIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy, subnets=None, networks=None):
if not subnets:
subnets = []
if not networks:
networks = []
db_mod = "quark.db.api"
with contextlib.nested(
mock.patch("%s.ip_policy_find" % db_mod),
mock.patch("%s.subnet_find" % db_mod),
mock.patch("%s.network_find" % db_mod),
mock.patch("%s.ip_policy_update" % db_mod),
) as (ip_policy_find, subnet_find, network_find, ip_policy_update):
ip_policy_find.return_value = ip_policy
subnet_find.return_value = subnets
network_find.return_value = networks
yield ip_policy_update
def test_update_ip_policy_not_found(self):
with self._stubs(None) as (ip_policy_update):
with self.assertRaises(quark_exceptions.IPPolicyNotFound):
self.plugin.update_ip_policy(self.context, 1,
dict(ip_policy=None))
self.assertEqual(ip_policy_update.called, 0)
def test_update_ip_policy_subnets_not_found(self):
ipp = dict(id=1, subnets=[])
with self._stubs(ipp) as (ip_policy_update):
with self.assertRaises(exceptions.SubnetNotFound):
self.plugin.update_ip_policy(
self.context,
1,
dict(ip_policy=dict(subnet_ids=[100])))
self.assertEqual(ip_policy_update.called, 0)
def test_update_ip_policy_subnets_already_exists(self):
ipp = dict(id=1, subnets=[dict()])
with self._stubs(
ipp, subnets=[dict(id=1, ip_policy=dict(id=1))]
) as (ip_policy_update):
with self.assertRaises(quark_exceptions.IPPolicyAlreadyExists):
self.plugin.update_ip_policy(
self.context,
1,
dict(ip_policy=dict(subnet_ids=[100])))
self.assertEqual(ip_policy_update.called, 0)
def test_update_ip_policy_subnets(self):
ipp = dict(id=1, subnets=[dict()],
exclude=[dict(offset=0, length=256)],
name="foo", tenant_id=1)
with self._stubs(
ipp, subnets=[dict(id=1, ip_policy=None)]
) as (ip_policy_update):
self.plugin.update_ip_policy(
self.context,
1,
dict(ip_policy=dict(subnet_ids=[100])))
self.assertEqual(ip_policy_update.called, 1)
def test_update_ip_policy_networks_not_found(self):
ipp = dict(id=1, networks=[])
with self._stubs(ipp) as (ip_policy_update):
with self.assertRaises(exceptions.NetworkNotFound):
self.plugin.update_ip_policy(
self.context,
1,
dict(ip_policy=dict(network_ids=[100])))
self.assertEqual(ip_policy_update.called, 0)
def test_update_ip_policy_networks(self):
ipp = dict(id=1, networks=[dict()],
exclude=[dict(offset=0, length=256)],
name="foo", tenant_id=1)
with self._stubs(
ipp, networks=[dict(id=1, ip_policy=None)]
) as (ip_policy_update):
self.plugin.update_ip_policy(
self.context,
1,
dict(ip_policy=dict(network_ids=[100])))
self.assertEqual(ip_policy_update.called, 1)
class TestQuarkDeleteIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy):
db_mod = "quark.db.api"
with contextlib.nested(
mock.patch("%s.ip_policy_find" % db_mod),
mock.patch("%s.ip_policy_delete" % db_mod),
) as (ip_policy_find, ip_policy_delete):
ip_policy_find.return_value = ip_policy
yield ip_policy_find, ip_policy_delete
def test_delete_ip_policy_not_found(self):
with self._stubs(None):
with self.assertRaises(quark_exceptions.IPPolicyNotFound):
self.plugin.delete_ip_policy(self.context, 1)
def test_delete_ip_policy_in_use(self):
with self._stubs(dict(networks=True)):
with self.assertRaises(quark_exceptions.IPPolicyInUse):
self.plugin.delete_ip_policy(self.context, 1)
def test_delete_ip_policy(self):
ip_policy = dict(
id=1,
networks=[],
subnets=[])
with self._stubs(ip_policy) as (ip_policy_find, ip_policy_delete):
self.plugin.delete_ip_policy(self.context, 1)
self.assertEqual(ip_policy_find.call_count, 1)
self.assertEqual(ip_policy_delete.call_count, 1)
```
#### File: tests/plugin_modules/test_routes.py
```python
import contextlib
import mock
from neutron.common import exceptions
from quark import exceptions as quark_exceptions
from quark.tests import test_quark_plugin
class TestQuarkGetRoutes(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, routes):
with mock.patch("quark.db.api.route_find") as route_find:
route_find.return_value = routes
yield
def test_get_routes(self):
route = dict(id=1, cidr="192.168.0.0/24", gateway="192.168.0.1",
subnet_id=2)
with self._stubs(routes=[route]):
res = self.plugin.get_routes(self.context)
for key in route.keys():
self.assertEqual(res[0][key], route[key])
def test_get_route(self):
route = dict(id=1, cidr="192.168.0.0/24", gateway="192.168.0.1",
subnet_id=2)
with self._stubs(routes=route):
res = self.plugin.get_route(self.context, 1)
for key in route.keys():
self.assertEqual(res[key], route[key])
def test_get_route_not_found_fails(self):
with self._stubs(routes=None):
with self.assertRaises(quark_exceptions.RouteNotFound):
self.plugin.get_route(self.context, 1)
class TestQuarkCreateRoutes(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, create_route, find_routes, subnet):
db_mod = "quark.db.api"
with contextlib.nested(
mock.patch("%s.route_create" % db_mod),
mock.patch("%s.route_find" % db_mod),
mock.patch("%s.subnet_find" % db_mod)
) as (route_create, route_find, subnet_find):
route_create.return_value = create_route
route_find.return_value = find_routes
subnet_find.return_value = subnet
yield
def test_create_route(self):
subnet = dict(id=2)
create_route = dict(id=1, cidr="172.16.0.0/24", gateway="172.16.0.1",
subnet_id=subnet["id"])
route = dict(id=1, cidr="0.0.0.0/0", gateway="192.168.0.1",
subnet_id=subnet["id"])
with self._stubs(create_route=create_route, find_routes=[route],
subnet=subnet):
res = self.plugin.create_route(self.context,
dict(route=create_route))
for key in create_route.keys():
self.assertEqual(res[key], create_route[key])
def test_create_route_no_subnet_fails(self):
subnet = dict(id=2)
route = dict(id=1, cidr="192.168.0.0/24", gateway="192.168.0.1",
subnet_id=subnet["id"])
with self._stubs(create_route=route, find_routes=[], subnet=None):
with self.assertRaises(exceptions.SubnetNotFound):
self.plugin.create_route(self.context, dict(route=route))
def test_create_no_other_routes(self):
subnet = dict(id=2)
create_route = dict(id=1, cidr="192.168.0.0/24", gateway="192.168.0.1",
subnet_id=subnet["id"])
with self._stubs(create_route=create_route, find_routes=[],
subnet=subnet):
res = self.plugin.create_route(self.context,
dict(route=create_route))
self.assertEqual(res["cidr"], create_route["cidr"])
def test_create_conflicting_route_raises(self):
subnet = dict(id=2)
create_route = dict(id=1, cidr="192.168.0.0/24", gateway="192.168.0.1",
subnet_id=subnet["id"])
route = dict(id=1, cidr="192.168.0.0/24", gateway="192.168.0.1",
subnet_id=subnet["id"])
with self._stubs(create_route=create_route, find_routes=[route],
subnet=subnet):
with self.assertRaises(quark_exceptions.RouteConflict):
self.plugin.create_route(self.context,
dict(route=create_route))
class TestQuarkDeleteRoutes(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, route):
db_mod = "quark.db.api"
with contextlib.nested(
mock.patch("%s.route_delete" % db_mod),
mock.patch("%s.route_find" % db_mod),
) as (route_delete, route_find):
route_find.return_value = route
yield route_delete
def test_delete_route(self):
route = dict(id=1, cidr="192.168.0.0/24", gateway="192.168.0.1",
subnet_id=2)
with self._stubs(route=route) as route_delete:
self.plugin.delete_route(self.context, 1)
self.assertTrue(route_delete.called)
def test_delete_route_not_found_fails(self):
with self._stubs(route=None):
with self.assertRaises(quark_exceptions.RouteNotFound):
self.plugin.delete_route(self.context, 1)
``` |
{
"source": "jkoelker/slurry",
"score": 2
} |
#### File: slurry/tests/test_producers.py
```python
import pytest
import trio
from slurry import Pipeline
from slurry.sections import Repeat
from .fixtures import produce_alphabet
async def test_repeat_valid_args():
with pytest.raises(RuntimeError):
async with Pipeline.create(
Repeat(1)
) as pipeline, pipeline.tap() as aiter:
async for item in aiter:
assert False, 'No items should be emitted due to invalid arguments provided.'
async def test_repeat_args(autojump_clock):
results = []
async with Pipeline.create(
Repeat(1, 'a')
) as pipeline, pipeline.tap() as aiter:
start_time = trio.current_time()
async for item in aiter:
results.append((item, trio.current_time() - start_time))
if len(results) == 5:
break
assert results == [('a', 0), ('a', 1), ('a', 2), ('a', 3), ('a', 4)]
async def test_repeat_kwargs(autojump_clock):
results = []
async with Pipeline.create(
Repeat(1, default='a')
) as pipeline, pipeline.tap() as aiter:
start_time = trio.current_time()
async for item in aiter:
results.append((item, trio.current_time() - start_time))
if len(results) == 5:
break
assert results == [('a', 0), ('a', 1), ('a', 2), ('a', 3), ('a', 4)]
async def test_repeat_input(autojump_clock):
results = []
async with Pipeline.create(
produce_alphabet(1.5, max=3, delay=1),
Repeat(1)
) as pipeline, pipeline.tap() as aiter:
start_time = trio.current_time()
async for item in aiter:
results.append((item, trio.current_time() - start_time))
if len(results) == 5:
break
assert results == [('a', 1), ('a', 2), ('b', 2.5), ('b', 3.5), ('c', 4)]
``` |
{
"source": "jkoelker/stonkers",
"score": 3
} |
#### File: src/stonkers/client.py
```python
from . import convert
class Client(object):
def __init__(self, tda_client):
self.tda = tda_client
@staticmethod
def _accounts(accounts, dataframe=True):
accounts = {a["securitiesAccount"]["accountId"]: a for a in accounts}
if dataframe:
return convert.accounts(accounts)
return accounts
def account(self, account_id, fields=None, dataframe=True):
accounts = self.tda.get_account(account_id, fields=fields).json()
return self._accounts([accounts], dataframe=dataframe)
def accounts(self, fields=None, dataframe=True):
accounts = self.tda.get_accounts(fields=fields).json()
return self._accounts(accounts, dataframe=dataframe)
def options(self, symbol, dataframe=True, **kwargs):
options = self.tda.get_option_chain(symbol, **kwargs).json()
if dataframe:
return convert.options(options)
return options
def quote(self, symbols, dataframe=True):
quotes = self.tda.get_quotes(symbols).json()
if dataframe:
return convert.quote(quotes)
return quotes
def positions(self, account_id, dataframe=True):
account = self.account(
account_id, fields=self.tda.Account.Fields.POSITIONS
)
positions = account["positions"][0]
if dataframe:
return convert.positions(positions)
return positions
```
#### File: src/stonkers/convert.py
```python
import pandas as pd
def accounts(data):
"""accounts as dataframe"""
return pd.concat(
pd.json_normalize(v["securitiesAccount"]) for v in data.values()
).set_index("accountId")
def transactions(data):
"""transaction information as Dataframe"""
return pd.json_normalize(data)
def search(data):
"""search for symbol as a dataframe"""
ret = []
for symbol in data:
ret.append(data[symbol])
return pd.DataFrame(ret)
def instrument(data):
"""instrument info from cusip as dataframe"""
return pd.DataFrame(data)
def quote(data):
"""quote as dataframe"""
return pd.DataFrame(data).T.set_index("symbol")
def history(data):
"""get history as dataframe"""
df = pd.DataFrame(data["candles"])
df["datetime"] = pd.to_datetime(df["datetime"], unit="ms")
return df
def options(data):
"""options chain as dataframe"""
ret = []
for date in data["callExpDateMap"]:
for strike in data["callExpDateMap"][date]:
ret.extend(data["callExpDateMap"][date][strike])
for date in data["putExpDateMap"]:
for strike in data["putExpDateMap"][date]:
ret.extend(data["putExpDateMap"][date][strike])
df = pd.DataFrame(ret)
for col in (
"tradeTimeInLong",
"quoteTimeInLong",
"expirationDate",
"lastTradingDay",
):
if col in df.columns:
df[col] = pd.to_datetime(df[col], unit="ms")
for col in ("delta", "gamma", "theta", "vega", "rho", "volatility"):
if col in df.columns:
df[col] = pd.to_numeric(df[col], errors="coerce")
return df
def positions(data):
"""positions list as a dataframe"""
ret = []
for position in data:
instrument = position.pop("instrument", {})
for col in ("assetType", "cusip", "symbol"):
if col in instrument:
position[col] = instrument[col]
ret.append(position)
return pd.DataFrame(ret).set_index("symbol")
``` |
{
"source": "jkoelker/syncthang",
"score": 2
} |
#### File: syncthang/syncthang/server.py
```python
import functools
import logging
from eventlet.green.OpenSSL import crypto
import eventlet
from .bep import messages
from .bep import protocol
LOG = logging.getLogger(__name__)
def cert_to_device_id(cert):
cert_bytes = crypto.dump_certificate(crypto.FILETYPE_ASN1, cert)
return messages.device_id_from_cert(cert_bytes)
def start_remote(local_device_id, model, sock, addr):
cert = sock.get_peer_certificate()
device_id = cert_to_device_id(cert)
if local_device_id == device_id:
LOG.info('Connected to myself (%s) - should not happen', device_id)
sock.shutdown()
sock.close()
if device_id in model.devices:
LOG.info('Connected to already connected device (%s)', device_id)
sock.shutdown()
sock.close()
remote_device = protocol.RemoteDevice(device_id, sock, model)
model.devices[device_id] = remote_device
remote_device.start()
def serve(sock, device_id, model):
eventlet.serve(sock, functools.partial(start_remote, device_id, model))
``` |
{
"source": "jkoelmel/networking_in_python",
"score": 3
} |
#### File: networking_in_python/server/client_handler.py
```python
import datetime
import pickle
import threading
from random import Random
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.Random import get_random_bytes
from menu import Menu
class ClientHandler:
"""
The client handler class receives and process client requests
and sends responses back to the client linked to this handler.
"""
def __init__(self, server_instance, clientsocket, addr):
"""
:param server_instance: passed as 'self' when the object of this class is created in the server object
:param clientsocket: the accepted client on server side. this handler, by itself, can send and receive data
from/to the client that is linked to.
:param addr: addr[0] = server ip address, addr[1] = client id assigned buy the server
"""
self.server_ip = addr[0]
self.client_id = addr[1]
self.server = server_instance
self.handler = clientsocket
self.print_lock = threading.Lock() # creates the print lock
self.menu = Menu()
self.username = ""
self.messages = {}
self.user_public_key = None
self.chat_private_key = None
self.chat_public_key = None
self.chat_channel = {'channel': None, 'users': '', 'admin': ''}
self.mapping = []
self.mPrime = []
self.names = ["", "Nick", "Bobby", "Mike"]
def process_requests(self):
"""
:return: VOID
"""
while True:
data = self.handler.recv(1024)
if not data:
break
deserialized_data = pickle.loads(data)
self.process_request(deserialized_data)
def process_request(self, request):
"""
:request: the request received from the client. Note that this must be already deserialized
:return: VOID
"""
option = request['headers']['option']
response = {'payload': None, 'headers': {}, 'ack': -1}
if option == 0:
menu = self.menu.get()
response = {'payload': menu, 'headers': {'clientid': self.client_id}, 'ack': 0}
log = "Connected: \tUser: " + request['headers']['username'] + f"\tClient ID: {self.client_id}"
self.username = request['headers']['username']
self.log(log)
elif option == 1:
userList = self.get_users_connected()
response = {'payload': userList, 'ack': 1}
elif option == 2:
message = request['payload']
recipient = request['headers']['recipient']
response['ack'] = self.save_messages(message, recipient)
elif option == 3:
response = {'payload': self.messages, 'ack': 3}
self.messages = {} # once read, clear messages list.
elif option == 4:
response = {'payload': None, 'ack': 4}
elif option == 5:
message = request['payload']
response['ack'] = self.save_messages(message)
elif option == 6:
self.user_public_key = request['headers']['public_key']
self.chat_channel['channel'] = request['headers']['channel']
self.chat_channel['users'] = self.username
self.chat_channel['admin'] = self.username
key = RSA.generate(2048)
self.chat_private_key = key.export_key()
self.chat_public_key = key.public_key().export_key()
recvKey = RSA.importKey(self.user_public_key)
session_key = get_random_bytes(16)
cipher_rsa = PKCS1_OAEP.new(recvKey)
enc_session_key = cipher_rsa.encrypt(session_key)
cipher_aes = AES.new(session_key, AES.MODE_EAX)
ciphertext, tag = cipher_aes.encrypt_and_digest(self.chat_private_key)
keyTransfer = []
for item in (enc_session_key, cipher_aes.nonce, tag, ciphertext, self.chat_public_key):
keyTransfer.append(item)
response = {'payload': keyTransfer, 'ack': 6}
elif option == 7:
self.chat_channel['channel'] = request['headers']['channel']
self.chat_channel['users'] = self.username
self.user_public_key = request['headers']['public_key']
key = RSA.generate(2048)
self.chat_private_key = key.export_key()
self.chat_public_key = key.public_key().export_key()
recvKey = RSA.importKey(self.user_public_key)
session_key = get_random_bytes(16)
cipher_rsa = PKCS1_OAEP.new(recvKey)
enc_session_key = cipher_rsa.encrypt(session_key)
cipher_aes = AES.new(session_key, AES.MODE_EAX)
ciphertext, tag = cipher_aes.encrypt_and_digest(self.chat_private_key)
keyTransfer = []
for item in (enc_session_key, cipher_aes.nonce, tag, ciphertext, self.chat_public_key):
keyTransfer.append(item)
userList = []
for key, value in self.server.handlers.items():
if value.chat_channel['channel'] == self.chat_channel['channel']:
userList.append(value.username)
if value.chat_channel['admin'] != '':
print("found admin!", value.chat_channel['admin'])
self.chat_channel['admin'] = value.chat_channel['admin']
response = {'payload': keyTransfer, 'ack': 7, 'users': userList, 'admin': self.chat_channel['admin']}
elif option == 8:
response = {'payload': None, 'ack': 8, 'name': request['headers']['bot'], 'options': request['headers']['options']}
elif option == 9:
response = {'payload': self.mapping, 'ack': 9, 'names': self.names}
elif option == 10:
destination = []
routes = []
cost = []
for i in range(1, len(self.names)):
destination.append(self.names[i])
routes.append([])
cost.append(0)
response = {'payload': self.mapping, 'ack': 10, 'names': self.names, 'destination': destination, 'routes': routes, 'cost': cost}
elif option == 11:
self.mPrime = self.mapping
row = 0
for j in range(1, len(self.names)):
for k in range(j, len(self.names) - 1):
if j != k:
if (self.mapping[j][k] + self.mapping[0][k]) < self.mapping[0][k - 1]:
self.mPrime[row][k - 1] = self.mapping[j][k] + self.mapping[0][k]
self.mPrime[k - 1][row] = self.mPrime[row][k - 1]
row += 1
response = {'payload': self.mPrime, 'ack': 11, 'names': self.names, 'map': self.mapping}
elif option == 12:
response = {'payload': None, 'ack': 12}
elif option == 13:
self.server.handlers.pop((self.server_ip, self.client_id))
self.log(f'{self.username} has disconnected.')
response = {'payload': None, 'ack': 13}
elif option == 100:
# use user distances generated and populate rest of 2D matrix for 3 other users
distances = request['payload']
self.names[0] = self.username
rand = Random()
user2 = [distances[1], 0, 0, 0]
user3 = [distances[2], 0, 0, 0]
user4 = [distances[3], 0, 0, 0]
self.mapping.append(distances)
self.mapping.append(user2)
self.mapping.append(user3)
self.mapping.append(user4)
# populate the rest of the distance map
for i in range(4):
for j in range(i):
self.mapping[i][j] = rand.randint(1, 20)
self.mapping[j][i] = self.mapping[i][j]
response = {'payload': None, 'ack': 100}
self.send(response)
# after response is sent, if option 11 was chosen, update the mapping
if option == 11:
self.mapping = self.mPrime
def get_users_connected(self):
users = {}
for key, value in self.server.handlers.items():
users[value.username] = key[1]
return users
def save_messages(self, message, recipient=None):
try:
if not recipient:
for key, value in self.server.handlers.items():
recipient_handler = self.server.handlers[key]
messages_list = recipient_handler.messages
if self.client_id not in messages_list.keys():
messages_list[self.client_id] = []
message_info = (datetime.datetime.now().replace(microsecond=0).isoformat(),
message, f'broadcast message from {self.username}', self.client_id)
messages_list[self.client_id].append(message_info)
return 5
else:
recipient_handler = None
for key, value in self.server.handlers.items():
print(key, recipient)
if key[1] == recipient[1]:
recipient_handler = self.server.handlers[key]
if recipient not in self.server.handlers.keys():
return -2
messages_list = recipient_handler.messages
if self.client_id not in messages_list.keys():
messages_list[self.client_id] = []
message_info = (datetime.datetime.now().replace(microsecond=0).isoformat(),
message, f'private message from {self.username}')
messages_list[self.client_id].append(message_info)
except Exception as err:
self.log(err)
return -1
return 2
def send(self, data):
"""
serializes and sends data to server on behalf of client
"""
serialized_data = pickle.dumps(data)
self.handler.send(serialized_data)
def receive(self, max_mem_alloc=4096):
"""
:max_mem_alloc: an integer representing the maximum allocation (in bytes) in memory allowed
for the data that is about to be received. By default is set to 4096 bytes
:return: the deserialized data
"""
deserialized_data = pickle.loads(self.handler.recv(1024))
return deserialized_data
def sendID(self, clientid):
"""
sends clientID to client upon acceptance by server
"""
message = {'clientid': clientid}
self.send(message)
def log(self, message):
"""
log statement of connected persons to server console output
"""
self.print_lock.acquire()
print(message)
self.print_lock.release()
def run(self):
self.process_requests()
```
#### File: networking_in_python/server/menu.py
```python
import json
class Menu:
@staticmethod
def get():
menu = {
'titles': ['****** TCP/UDP Network ******', '------------------------------------', 'Options Available:'],
'options': {'1': 'Get Users List', '2': 'Send A Message', '3': 'Get My Messages',
'4': 'Send A Direct Message via UDP',
'5': 'Broadcast A Message With CDMA', '6': 'Create A Secure Channel To Chat Using PGP',
'7': 'Join An Existing Channel', '8': 'Create A Bot To Manage A Future Channel',
'9': 'Map The Network', '10': 'Get the Routing Table of This Client with LSP',
'11': 'Get the Routing Table of This Network with DVP', '12': 'Turn Web Proxy Server On (WIP)',
'13': 'Disconnect From Server'
}
}
menu_json = json.dumps(menu)
return menu_json
@staticmethod
def option():
"""
:return: an integer representing the option chosen by the user from the menu
"""
option = int(input("\n\nOption <Enter a number>: "))
while option not in range(1, 14):
option = int(input("\nInvalid entry, choose another option:"))
return option
``` |
{
"source": "jkoelndorfer/aerisweather-python-sdk",
"score": 3
} |
#### File: aerisweather-python-sdk/aerisweather/aerisweather.py
```python
from typing import Dict, List
from aerisweather.requests.Endpoint import Endpoint, EndpointType
from aerisweather.requests.ParameterType import ParameterType
from aerisweather.requests.RequestAction import RequestAction
from aerisweather.requests.RequestFilter import RequestFilter
from aerisweather.requests.RequestLocation import RequestLocation
from aerisweather.requests.RequestQuery import RequestQuery
from aerisweather.requests.RequestSort import RequestSort
from aerisweather.responses.AlertsResponse import AlertsResponse
from aerisweather.responses.ConditionsResponse import ConditionsResponse
from aerisweather.responses.CustomResponse import CustomResponse
from aerisweather.responses.ForecastsResponse import ForecastsResponse
from aerisweather.responses.ObservationsResponse import ObservationsResponse
from aerisweather.responses.ObservationsSummaryResponse import ObservationsSummaryResponse
from aerisweather.responses.PlacesResponse import PlacesResponse
from aerisweather.utils.AerisError import AerisError
from aerisweather.utils.AerisNetwork import AerisNetwork
class AerisWeather:
""" Defines the main object for the aerisweather python library. """
def __init__(self,
client_id: str,
client_secret: str,
app_id: str=""):
""" Constructor
Params:
- client_id: AerisWeather API account client id
- client_secret: AerisWeather API account client secret
- app_id: Optional - Namespace or application id of the application using this library
"""
self.app_id = app_id
self.client_id = client_id
self.client_secret = client_secret
self.url_host = "https://api.aerisapi.com/"
self.location = None
self.action = None
self.filter_ = None
self.sort = None
self.params = None
self.query = None
def request(self, endpoint):
""" Makes the request to the Aeris API and returns the appropriate response array.
Builds the API request URL, handles the response json, and raises an AerisError if the API returns an error.
Params:
- endpoint: An Endpoint object containing the EndpointType and any other optional parameters needed for
for the API request.
Returns:
- a list of specific response objects, who's type is based on the request data
- an empty list if there is no data
- an AerisError object if there was an error condition reported by the api response
- a URLError if there was an issue with sending the request
- a generic Exception for all other issues
"""
url = self.url(endpoint_type=endpoint.endpoint_type,
location=endpoint.location,
action=endpoint.action,
filter_=endpoint.filter_,
sort=endpoint.sort,
params=endpoint.params,
query=endpoint.query)
network = AerisNetwork()
json_obj = network.get_json(url, self.app_id)
responses = []
# Determine if we have a valid data response, or if there is an API error
response_error = AerisError.api_error(json_obj)
if response_error is None:
# determine if we have one response or an array
if type(json_obj["response"]) is list:
for resp in json_obj["response"]:
responses.append(self.response(endpoint.endpoint_type, resp))
else:
responses.append(self.response(endpoint.endpoint_type, json_obj["response"]))
else:
# we have some kind of error or major warning from the API
raise response_error
return responses
def url(self,
endpoint_type: EndpointType,
location: RequestLocation = None,
action: RequestAction = None,
filter_: [RequestFilter] = None,
sort: RequestSort = None,
params: Dict[ParameterType, str] = None,
query: Dict[RequestQuery, str]=None) -> str:
""" Generates the appropriate request url for a standard single API request.
Generally called internally from the request method. Builds and returns a full API request URL based on the
attributes passed in.
Params:
- endpoint_type: EndpointType - determines which Aeris API endpoint will be called
- location: Optional - RequestLocation - the location for which the request is processed
- action: Optional - RequestAction - the API request action option
- filter_: Optional - [RequestFilter] - a list of API request filters
- sort: Optional - RequestSort - the API request sort option
- params: Optional - Dict[ParameterType, str] - a list of API request parameters
- query: Optional - Dict[RequestQuery, str] - a list of API request quesries
Returns:
- url string
"""
url = self.url_host
if endpoint_type == EndpointType.CUSTOM:
url += endpoint_type.custom + "/"
else:
url += endpoint_type.value + "/"
if action is not None:
url += action.value + "/"
else:
url += location.location_str()
url += "?client_id=" + self.client_id
url += "&client_secret=" + self.client_secret
if params is not None:
for param, value in params.items():
url += "&" + param.value + "=" + value
if sort is not None:
url += "&sort=" + sort.value
if filter_ is not None:
if len(filter_) > 0:
url += "&filter=" + ",".join(filt.value for filt in filter_)
out_query = self.query_str(query)
if out_query is not None:
url += "&query=" + out_query
return url
@staticmethod
def response(endpoint_type: EndpointType,
response_json):
""" Determines the appropriate response object based on EndpointType and returns the completed response object.
Given the endpoint type and the response json from the Aeris API, the method will return a fullfilled
response object.
Params:
- endpoint_type: EndpointType
- response_json - a single response portion of the json returned from the Aeris API
such as:
json_obj["response"]
Returns:
- a completed/fullfilled response object
"""
if endpoint_type == EndpointType.ALERTS:
return AlertsResponse(response_json)
elif endpoint_type == EndpointType.CONDITIONS:
return ConditionsResponse(response_json)
elif endpoint_type == EndpointType.FORECASTS:
return ForecastsResponse(response_json)
elif endpoint_type == EndpointType.OBSERVATIONS:
return ObservationsResponse(response_json)
elif endpoint_type == EndpointType.OBSERVATIONS_SUMMARY:
return ObservationsSummaryResponse(response_json)
elif endpoint_type == EndpointType.PLACES:
return PlacesResponse(response_json)
else:
return CustomResponse(response_json)
@staticmethod
def query_str(query_dict):
""" Takes a RequestQuery object and returns a proper Aeris API url query
Params:
- query_dict: A dictionary containing a single RequestQuery and its value
Returns:
- str: a correctly formatted query attribute ready to be inserted into an API request url
"""
out_query = ""
if query_dict is not None:
for q, value in query_dict.items():
out_query += "&" + q.value + "=" + value
else:
out_query = None
return out_query
def batch_request(self,
endpoints: List[Endpoint],
global_location: RequestLocation = None,
global_filter_: [RequestFilter] = None,
global_sort: RequestSort = None,
global_params: Dict[ParameterType, str] = None,
global_query: Dict[RequestQuery, str] = None):
"""
Makes the batch request to the Aeris API and returns the appropriate response array.
If successful, the batch_request method will return a list of response objects. The list will contain the
responses in the order they are requested. If a request results in multiple responses, those responses
will be listed before continuing to the next request's response.
Params:
- endpoints: List[Endpoint] - a list of Endpoint objects, one for each request in the batch request
- global_location: RequestLocation - a RequestLocation object that will be applied to each request, unless
the request has a local RequestLocation
- global_filter_: [RequestFilter] - a list of RequestFilters that will be applied to each request, unless
the request has a local RequestFilter
- global_sort: RequestSort - a RequestSort object that will be applied to each request, unless
the request has a local RequestSort
- global_params: Dict[ParameterType, str] - a dictionary of parameters that will be applied to each
request, unless the request has a local parameter dict
- global_query: Dict[RequestQuery, str] - a dictionary of queries that will be applied to each
request, unless the request has a local query dict
Returns:
- a list of specific response objects, who's type is based on the request data, in the order of the requests
- an empty list if there is no data
- an AerisError object if there was an error condition reported by the api response
- a URLError if there was an issue with sending the request
- a generic Exception for all other issues
"""
url = self.batch_url(endpoints=endpoints,
global_location=global_location,
global_filter_=global_filter_,
global_sort=global_sort,
global_params=global_params,
global_query=global_query)
network = AerisNetwork()
json_obj = network.get_json(url, self.app_id)
responses = []
# Determine if we have a valid data response, or if there is an API error
response_error = AerisError.api_error(json_obj)
if response_error is None:
endpoint_counter = 0
for resp in json_obj["response"]["responses"]:
# Batch response json - check for an error response here, for things like the alerts endpoint's
# "warn_no_data" response.
batch_error = AerisError.api_error(json_obj)
if batch_error is None:
# get the appropriate response
for r in resp["response"]:
# check each response within the batch response for an error code
response_error = AerisError.api_error(resp)
if response_error is None:
endpoint_type = endpoints[endpoint_counter].endpoint_type
responses.append(self.response(endpoint_type, r))
else:
raise response_error
else:
raise batch_error
endpoint_counter += 1
else:
# we have some kind of error or major warning from the API
raise response_error
return responses
def batch_url(self,
endpoints: List[Endpoint],
global_location: RequestLocation = None,
global_filter_: [RequestFilter] = None,
global_sort: RequestSort = None,
global_params: Dict[ParameterType, str] = None,
global_query: Dict[RequestQuery, str] = None) -> str:
""" Generate the appropriate batch request url.
The batch request also supports all of the standard endpoint parameters, such as p, limit, and query,
except that when used, these batch parameters are considered global and applied to each individual
request provided with the request's parameters. Note, however, that any parameters included within
an individual request (within the requests parameter) will override those same global options found
in the main batch request.
Parameters can be passed to each individual endpoint as well but must be URL-encoded, use "%3F" for "?"
and "%26" for "&".
Example:
https://api.aerisapi.com/batch?
p=truckee,nv&client_id=###########&client_secret=########################
&requests=
/places/54660,
/advisories%3Flimit=1%26radius=10mi,
/observations%3Fp=54601
Params:
- endpoints: List[Endpoint] - a list of Endpoint objects, one for each request in the batch request
- global_location: RequestLocation - a RequestLocation object that will be applied to each request, unless
the request has a local RequestLocation
- global_filter_: [RequestFilter] - a list of RequestFilters that will be applied to each request, unless
the request has a local RequestFilter
- global_sort: RequestSort - a RequestSort object that will be applied to each request, unless
the request has a local RequestSort
- global_params: Dict[ParameterType, str] - a dictionary of parameters that will be applied to each
request, unless the request has a local parameter dict
- global_query: Dict[RequestQuery, str] - a dictionary of queries that will be applied to each
request, unless the request has a local query dict
Returns:
- url string for the batch_request
"""
url = self.url_host + "batch?client_id=" + self.client_id + "&client_secret=" + self.client_secret
# add the global request parameters - these apply to all endpoints in the batch request
if global_location is not None:
url += "&p=" + global_location.location_str()
if global_filter_ is not None:
if len(global_filter_) > 0:
url += "&filter="
for filt in global_filter_:
url += filt.value + ","
if global_params is not None:
for param, value in global_params.items():
url += "&" + param.value + "=" + value
if global_sort is not None:
url += "&sort=" + global_sort.value
out_query = self.query_str(global_query)
if out_query is not None:
url += "&query=" + out_query
# add the requests section
url += "&requests="
# add the specifc endpoint requests and their parameters
for endpoint in endpoints:
has_param = False
url += "/" + endpoint.endpoint_type.value
if endpoint.action is not None:
url += "/" + endpoint.action.value
if endpoint.location is not None:
url += "%3Fp=" + endpoint.location.location_str()
has_param = True
if endpoint.filter_ is not None and len(endpoint.filter_) > 0:
if has_param:
url += "%26filter="
else:
url += "%3Ffilter="
for filt in endpoint.filter_:
url += filt.value + ","
has_param = True
if endpoint.params is not None:
for param, value in endpoint.params.items():
if has_param:
url += "%26"
else:
url += "%3F"
url += param.value + "=" + value + ","
has_param = True
if endpoint.sort is not None:
if has_param:
url += "%26sort="
else:
url += "%3Fsort="
url += endpoint.sort.value
has_param = True
out_query = self.query_str(endpoint.query)
if out_query is not None:
if has_param:
url += "%26query="
else:
url += "%3Fquery="
url += out_query
# has_param = True
# add a trailing comma in case there are more endpoints
if not url.endswith(","):
url += ","
# strip unused trailing commas
while url.endswith(","):
url = url[:-1]
return url
def alerts(self,
location: RequestLocation = None,
action: RequestAction = None,
filter_: [RequestFilter] = None,
sort: RequestSort = None,
params: Dict[ParameterType, str] = None,
query: Dict[RequestQuery, str] = None):
""" Performs an API request to get alerts data for a specified location.
Params:
- location: Optional - RequestLocation - the location for which the request is processed
- action: Optional - RequestAction - the API request action option
- filter_: Optional - [RequestFilter] - a list of API request filters
- sort: Optional - RequestSort - the API request sort option
- params: Optional - Dict[ParameterType, str] - a list of API request parameters
- query: Optional - Dict[RequestQuery, str] - a list of API request quesries
Returns:
- a list of AlertsResponse objects if successful
- an empty list if there is no data
"""
endpoint = Endpoint(endpoint_type=EndpointType.ALERTS,
location=location,
action=action,
filter_=filter_,
sort=sort,
params=params,
query=query)
return self.request(endpoint=endpoint)
def conditions(self,
location: RequestLocation = None,
action: RequestAction = None,
filter_: [RequestFilter] = None,
sort: RequestSort = None,
params: Dict[ParameterType, str] = None,
query: Dict[RequestQuery, str] = None):
""" Performs an API request to get conditions data for a specified location.
Params:
- location: Optional - RequestLocation - the location for which the request is processed
- action: Optional - RequestAction - the API request action option
- filter_: Optional - [RequestFilter] - a list of API request filters
- sort: Optional - RequestSort - the API request sort option
- params: Optional - Dict[ParameterType, str] - a list of API request parameters
- query: Optional - Dict[RequestQuery, str] - a list of API request quesries
Returns:
- a list of ConditionsResponse objects if successful
- an empty list if there is no data
"""
endpoint = Endpoint(endpoint_type=EndpointType.CONDITIONS,
location=location,
action=action,
filter_=filter_,
sort=sort,
params=params,
query=query)
return self.request(endpoint=endpoint)
def forecasts(self,
location: RequestLocation = None,
action: RequestAction = None,
filter_: [RequestFilter] = None,
sort: RequestSort = None,
params: Dict[ParameterType, str] = None,
query: Dict[RequestQuery, str] = None):
""" Performs an API request to get forecast data for a specified location.
Params:
- location: Optional - RequestLocation - the location for which the request is processed
- action: Optional - RequestAction - the API request action option
- filter_: Optional - [RequestFilter] - a list of API request filters
- sort: Optional - RequestSort - the API request sort option
- params: Optional - Dict[ParameterType, str] - a list of API request parameters
- query: Optional - Dict[RequestQuery, str] - a list of API request quesries
Returns:
- a list of ForecastsResponse objects if successful
- an empty list if there is no data
"""
endpoint = Endpoint(endpoint_type=EndpointType.FORECASTS,
location=location,
action=action,
filter_=filter_,
sort=sort,
params=params,
query=query)
return self.request(endpoint=endpoint)
def observations(self,
location: RequestLocation = None,
action: RequestAction = None,
filter_: [RequestFilter] = None,
sort: RequestSort = None,
params: Dict[ParameterType, str] = None,
query: Dict[RequestQuery, str] = None):
""" Performs an API request to get observation data for a specified location.
Params:
- location: Optional - RequestLocation - the location for which the request is processed
- action: Optional - RequestAction - the API request action option
- filter_: Optional - [RequestFilter] - a list of API request filters
- sort: Optional - RequestSort - the API request sort option
- params: Optional - Dict[ParameterType, str] - a list of API request parameters
- query: Optional - Dict[RequestQuery, str] - a list of API request quesries
Returns:
- a list of ObservationsResponse objects if successful
- an empty list if there is no data
"""
endpoint = Endpoint(endpoint_type=EndpointType.OBSERVATIONS,
location=location,
action=action,
filter_=filter_,
sort=sort,
params=params,
query=query)
return self.request(endpoint=endpoint)
def observations_summary(self,
location: RequestLocation = None,
action: RequestAction = None,
filter_: [RequestFilter] = None,
sort: RequestSort = None,
params: Dict[ParameterType, str] = None,
query: Dict[RequestQuery, str] = None):
""" Performs an API request to get observations summary data for a specified location.
Params:
- location: Optional - RequestLocation - the location for which the request is processed
- action: Optional - RequestAction - the API request action option
- filter_: Optional - [RequestFilter] - a list of API request filters
- sort: Optional - RequestSort - the API request sort option
- params: Optional - Dict[ParameterType, str] - a list of API request parameters
- query: Optional - Dict[RequestQuery, str] - a list of API request quesries
Returns:
- a list of ObservationsSummaryResponse objects if successful
- an empty list if there is no data
"""
endpoint = Endpoint(endpoint_type=EndpointType.OBSERVATIONS_SUMMARY,
location=location,
action=action,
filter_=filter_,
sort=sort,
params=params,
query=query)
return self.request(endpoint=endpoint)
def places(self,
location: RequestLocation = None,
action: RequestAction = None,
filter_: [RequestFilter] = None,
sort: RequestSort = None,
params: Dict[ParameterType, str] = None,
query: Dict[RequestQuery, str] = None):
""" Performs an API request to get places data for a specified location.
Params:
- location: Optional - RequestLocation - the location for which the request is processed
- action: Optional - RequestAction - the API request action option
- filter_: Optional - [RequestFilter] - a list of API request filters
- sort: Optional - RequestSort - the API request sort option
- params: Optional - Dict[ParameterType, str] - a list of API request parameters
- query: Optional - Dict[RequestQuery, str] - a list of API request quesries
Returns:
- a list of PlacesResponse objects if successful
- an empty list if there is no data
"""
endpoint = Endpoint(endpoint_type=EndpointType.PLACES,
location=location,
action=action,
filter_=filter_,
sort=sort,
params=params,
query=query)
return self.request(endpoint=endpoint)
def custom_endpoint(self,
location: RequestLocation = None,
action: RequestAction = None,
filter_: [RequestFilter] = None,
sort: RequestSort = None,
params: Dict[ParameterType, str] = None,
query: Dict[RequestQuery, str] = None):
""" Performs an API request to get custom endpoint data for a specified location.
When calling custom_endpoint, in addition to setting the EndpointType of the Endpoint object to CUSTOM,
the EndpointType.custom value must be set to the string value of the endpoint you are requesting. See
the examples section to see hwo this is done.
Params:
- location: Optional - RequestLocation - the location for which the request is processed
- action: Optional - RequestAction - the API request action option
- filter_: Optional - [RequestFilter] - a list of API request filters
- sort: Optional - RequestSort - the API request sort option
- params: Optional - Dict[ParameterType, str] - a list of API request parameters
- query: Optional - Dict[RequestQuery, str] - a list of API request quesries
Returns:
- a list of CustomResponse objects if successful
- an empty list if there is no data
Examples:
# You can also use the custom endpoint type to request data from a known valid endpoint, for cases
# where new API data fields have not yet been added to an endpoint's response class.
EndpointType.custom = "forecasts"
f_list = awx.request(endpoint=Endpoint(endpoint_type=EndpointType.CUSTOM,
location=RequestLocation(postal_code="54660")))
forecast = f_list[0]
period = forecast.periods[0] # type: ForecastPeriod
# Valid endpoint, not in our Endpoint Enum - run this to test a beta or pre-release endpoint
EndpointType.custom = "stormreports"
endpt = Endpoint(EndpointType.CUSTOM, location=RequestLocation(postal_code="54660"))
resp_list = awx.request(endpt)
response = resp_list[0]
"""
endpoint = Endpoint(endpoint_type=EndpointType.CUSTOM,
location=location,
action=action,
filter_=filter_,
sort=sort,
params=params,
query=query)
return self.request(endpoint=endpoint)
```
#### File: aerisweather/requests/Endpoint.py
```python
from typing import Dict
from aenum import Enum
from aerisweather.requests.ParameterType import ParameterType
from aerisweather.requests.RequestAction import RequestAction
from aerisweather.requests.RequestFilter import RequestFilter
from aerisweather.requests.RequestLocation import RequestLocation
from aerisweather.requests.RequestQuery import RequestQuery
from aerisweather.requests.RequestSort import RequestSort
class EndpointType(Enum):
""" Defines the available endpoints for Aeris API requests.
When requesting data from an unimplemented endpoint, use the CUSTOM type and set the name of the endpoint
using the "custom" property.
Examples:
# ObservationSummary
endpoint = Endpoint(endpoint_type=EndpointType.OBSERVATIONS_SUMMARY)
# Custom Endpoint
EndpointType.custom = "stormreports"
endpt = Endpoint(EndpointType.CUSTOM, location=RequestLocation(postal_code="54660"))
"""
ALERTS = "advisories"
CONDITIONS = "conditions"
CONVECTIVE_OUTLOOK = "convective/outlook"
FORECASTS = "forecasts"
OBSERVATIONS = "observations"
OBSERVATIONS_SUMMARY = "observations/summary"
PLACES = "places"
CUSTOM = "custom"
__custom_endpoint_type_name = ""
@property
def custom(self):
""" Returns the string name of the custom/generic endpoint used when CUSTOM is the endpoint type """
return self.__custom_endpoint_type_name
@custom.setter
def custom(self, endpoint_type: str):
""" Sets the string name of the custom/generic endpoint used when CUSTOM is the endpoint type """
self.__custom_endpoint_type_name = endpoint_type
class Endpoint:
""" Defines an object used to hold and transfer information regarding a specific Aeris API endpoint """
def __init__(self,
endpoint_type: EndpointType = None,
location: RequestLocation = None,
action: RequestAction = None,
filter_: [RequestFilter] = None,
sort: RequestSort = None,
params: Dict[ParameterType, str] = None,
query: Dict[RequestQuery, str] = None):
""" Constructor
The Endpoint class can be instantiated with no parameters if configuration is handled later. EndpointTypes
that have been implemented are defined in the EndpointType enum. Undefined EndpointTypes can be
requested using the Custom EndpointType.
Params:
- endpoint_type: Optional - EndpointType - determines which Aeris API endpoint will be called
- location: Optional - RequestLocation - the location for which the request is processed
- action: Optional - RequestAction - the API request action option
- filter_: Optional - [RequestFilter] - a list of API request filters
- sort: Optional - RequestSort - the API request sort option
- params: Optional - Dict[ParameterType, str] - a list of API request parameters
- query: Optional - Dict[RequestQuery, str] - a list of API request quesries
"""
self.endpoint_type = endpoint_type
self.location = location
self.action = action
self.filter_ = filter_
self.sort = sort
self.params = params
self.query = query
```
#### File: aerisweather/responses/AlertTimestamps.py
```python
class AlertTimestamps:
"""
Defines an object that stores an individual alert's timestamp data returned within an Alerts/Advisories
endpoint request.
...
},
"timestamps": {
"issued": 1522696200,
"issuedISO": "2018-04-02T14:10:00-05:00",
"begins": 1522696200,
"beginsISO": "2018-04-02T14:10:00-05:00",
"expires": 1522821600,
"expiresISO": "2018-04-04T01:00:00-05:00",
"added": 1522696239,
"addedISO": "2018-04-02T14:10:39-05:00"
},
...
"""
def __init__(self, alert):
self.alert_data = alert
@property
def issued(self) -> int:
""" UNIX timestamp when the advisory was issued by the NWS. """
return self.alert_data["issued"]
@property
def issuedISO(self) -> str:
""" ISO 8601 date of the time when the advisory was issued. """
return self.alert_data["issuedISO"]
@property
def begins(self) -> int:
""" UNIX timestamp when the advisory goes into effect. """
return self.alert_data["begins"]
@property
def beginsISO(self) -> str:
""" ISO 8601 date of the time when the advisory goes into effect. """
return self.alert_data["beginsISO"]
@property
def expires(self) -> int:
""" UNIX timestamp when the advisory expires. """
return self.alert_data["expires"]
@property
def expiresISO(self) -> str:
""" ISO 8601 date of the time when the advisory expires. """
return self.alert_data["expiresISO"]
@property
def added(self) -> int:
""" UNIX timestamp when the advisory was stored. """
return self.alert_data["added"]
@property
def addedISO(self) -> str:
""" ISO 8601 date of the time when the advisory was stored. """
return self.alert_data["addedISO"]
```
#### File: aerisweather/responses/ConditionsResponse.py
```python
from typing import List
from aerisweather.responses.AerisProfile import AerisProfileConditions
from aerisweather.responses.ConditionsPeriod import ConditionsPeriod
from aerisweather.responses.Response import Response
class ConditionsResponse(Response):
"""
Defines the object that stores conditions for a location.
"""
# URL / response body for conditions follows:
#
# https://api.aerisapi.com/conditions?p=33.953,-84.55&client_id=$AERIS_CLIENT_ID&client_secret=$AERIS_CLIENT_SECRET
#
#
# {
# "success": true,
# "error": null,
# "response": [
# {
# "loc": {
# "lat": 33.953,
# "long": -84.55
# },
# "place": {
# "name": "marietta",
# "state": "ga",
# "country": "us"
# },
# "periods": [
# {
# "timestamp": 1612215840,
# "dateTimeISO": "2021-02-01T16:44:00-05:00",
# "tempC": 2.58,
# "tempF": 36.64,
# "feelslikeC": -1.26,
# "feelslikeF": 29.74,
# "dewpointC": -3.26,
# "dewpointF": 26.13,
# "humidity": 62,
# "pressureMB": 1014.5,
# "pressureIN": 29.96,
# "windDir": "NW",
# "windDirDEG": 320,
# "windSpeedKTS": 17.95,
# "windSpeedKPH": 33.24,
# "windSpeedMPH": 20.66,
# "windGustKTS": 26.9,
# "windGustKPH": 49.82,
# "windGustMPH": 30.96,
# "precipMM": 0,
# "precipIN": 0,
# "snowCM": 0,
# "snowIN": 0,
# "visibilityKM": 19.911,
# "visibilityMI": 12.372,
# "sky": 95,
# "cloudsCoded": "OV",
# "weather": "Cloudy",
# "weatherCoded": "::OV",
# "weatherPrimary": "Cloudy",
# "weatherPrimaryCoded": "::OV",
# "icon": "cloudy.png",
# "solradWM2": 83,
# "uvi": 0,
# "isDay": true
# }
# ],
# "profile": {
# "tz": "America/New_York",
# "tzname": "EST",
# "tzoffset": -18000,
# "isDST": false,
# "elevFT": null,
# "elevM": null
# }
# }
# ]
# }
def __init__(self, json_data):
super().__init__(json_data=json_data)
profile_data = self.data.get("profile", None)
if profile_data is not None:
self._profile = AerisProfileConditions(profile_data)
period_data = self.data.get("periods", None)
if period_data is not None:
self._periods = [ConditionsPeriod(d) for d in period_data]
@property
def profile(self) -> AerisProfileConditions:
return self._profile
@property
def periods(self) -> List[ConditionsPeriod]:
return self._periods.copy()
```
#### File: aerisweather/responses/ObservationsData.py
```python
class ObservationsData:
"""
Defines the object that stores the obs data returned within an Observation endpoint requests
{
"success": true,
"error": null,
"responses": {
...
"ob": {
"timestamp": 1520535180,
"dateTimeISO": 2018-03-08T12:53:00-06:00",
"tempC": -1.1
...
"""
def __init__(self, ob):
self.ob_data = ob
@property
def timestamp(self):
return self.ob_data["timestamp"]
@property
def dateTimeISO(self):
return self.ob_data["dateTimeISO"]
@property
def tempC(self):
return self.ob_data["tempC"]
@property
def tempF(self):
return self.ob_data["tempF"]
@property
def dewpointC(self):
return self.ob_data["dewpointC"]
@property
def dewpointF(self):
return self.ob_data["dewpointF"]
@property
def humidity(self):
return self.ob_data["humidity"]
@property
def pressureMB(self):
return self.ob_data["pressureMB"]
@property
def pressureIN(self):
return self.ob_data["pressureIN"]
@property
def spressureMB(self):
return self.ob_data["spressureMB"]
@property
def spressureIN(self):
return self.ob_data["spressureIN"]
@property
def altimeterMB(self):
return self.ob_data["altimeterMB"]
@property
def altimeterIN(self):
return self.ob_data["altimeterIN"]
@property
def windKTS(self):
return self.ob_data["windKTS"]
@property
def windKPH(self):
return self.ob_data["windKPH"]
@property
def windMPH(self):
return self.ob_data["windMPH"]
@property
def windSpeedKTS(self):
return self.ob_data["windSpeedKTS"]
@property
def windSpeedKPH(self):
return self.ob_data["windSpeedKPH"]
@property
def windSpeedMPH(self):
return self.ob_data["windSpeedMPH"]
@property
def windDirDEG(self):
return self.ob_data["windDirDEG"]
@property
def windDir(self):
return self.ob_data["windDir"]
@property
def windGustKTS(self):
return self.ob_data["windGustKTS"]
@property
def windGustKPH(self):
return self.ob_data["windGustKPH"]
@property
def windGustMPH(self):
return self.ob_data["windGustMPH"]
@property
def flightRule(self):
return self.ob_data["flightRule"]
@property
def visibilityKM(self):
return self.ob_data["visibilityKM"]
@property
def visibilityMI(self):
return self.ob_data["visibilityMI"]
@property
def weather(self):
return self.ob_data["weather"]
@property
def weatherShort(self):
return self.ob_data["weatherShort"]
@property
def weatherCoded(self):
return self.ob_data["weatherCoded"]
@property
def weatherPrimary(self):
return self.ob_data["weatherPrimary"]
@property
def weatherPrimaryCoded(self):
return self.ob_data["weatherPrimaryCoded"]
@property
def cloudsCoded(self):
return self.ob_data["cloudsCoded"]
@property
def icon(self):
return self.ob_data["icon"]
@property
def heatindexC(self):
return self.ob_data["heatindexC"]
@property
def heatindexF(self):
return self.ob_data["heatindexF"]
@property
def windchillC(self):
return self.ob_data["windchillC"]
@property
def windchillF(self):
return self.ob_data["windchillF"]
@property
def feelslikeC(self):
return self.ob_data["feelslikeC"]
@property
def feelslikeF(self):
return self.ob_data["feelslikeF"]
@property
def isDay(self):
return self.ob_data["isDay"]
@property
def sunrise(self):
return self.ob_data["sunrise"]
@property
def sunriseISO(self):
return self.ob_data["sunriseISO"]
@property
def sunset(self):
return self.ob_data["sunset"]
@property
def sunsetISO(self):
return self.ob_data["sunsetISO"]
@property
def snowDepthCM(self):
return self.ob_data["snowDepthCM"]
@property
def snowDepthIN(self):
return self.ob_data["snowDepthIN"]
@property
def precipMM(self):
return self.ob_data["precipMM"]
@property
def precipIN(self):
return self.ob_data["precipIN"]
@property
def solradWM2(self):
return self.ob_data["solradWM2"]
@property
def solradMethod(self):
return self.ob_data["solradMethod"]
@property
def ceilingFT(self):
return self.ob_data["ceilingFT"]
@property
def ceilingM(self):
return self.ob_data["ceilingM"]
@property
def light(self):
return self.ob_data["light"]
@property
def QC(self):
return self.ob_data["QC"]
@property
def QCcode(self):
return self.ob_data["QCcode"]
@property
def sky(self):
return self.ob_data["sky"]
```
#### File: aerisweather/responses/ObservationsSummaryDewPt.py
```python
class ObservationsSummaryDewPt:
""" Defines an object for the observations summary period temp data. """
dewpt = {}
def __init__(self, dewpt_json):
"""
Constructor - this takes an individual observations summary period's dewpoint json.
{
maxC": 5,
"maxF": 41,
"minC": -3,
"minF": 26,
"avgC": -0.6,
"avgF": 30.9,
"count": 23
},
"""
self.dewpt = dewpt_json
@property
def maxC(self) -> float:
""" The maximum dew point in Celsius. Null if unavailable. """
return self.dewpt["maxC"]
@property
def maxF(self) -> float:
""" The maximum dew point in Fahrenheit. Null if unavailable. """
return self.dewpt["maxF"]
@property
def minC(self) -> float:
""" The minimum dew point in Celsius. Null if unavailable. """
return self.dewpt["minC"]
@property
def minF(self) -> float:
""" The minimum dew point in Fahrenheit. Null if unavailable. """
return self.dewpt["minF"]
@property
def avgC(self) -> float:
""" The average dew point in Celsius. Null if unavailable. """
return self.dewpt["avgC"]
@property
def avgF(self) -> float:
""" The average dew point in Fahrenheit. Null if unavailable. """
return self.dewpt["avgF"]
@property
def count(self) -> int:
""" The total number of observations that included dew point information. """
return self.dewpt["count"]
```
#### File: aerisweather/responses/ObservationsSummaryResponse.py
```python
from typing import List
from aerisweather.responses.AerisLocation import AerisLocation
from aerisweather.responses.AerisPlace import AerisPlace
from aerisweather.responses.AerisProfile import AerisProfileObservationsSummary
from aerisweather.responses.ObservationsSummaryPeriod import ObservationsSummaryPeriod
from aerisweather.responses.Response import Response
class ObservationsSummaryResponse(Response):
""" Defines an object for the data returned in an Aeris API ObservationsSummary responses."""
data = {}
def __init__(self, json_data=None):
"""Constructor
Takes a single response json object from an Aeris API data response.
Examples would be:
either of the responses in the following response array:
"response": [
{
...
},
{
...
}
or
the contents of a single response:
"response": {
"loc": {
"lat": 43.80136,
"""
super().__init__(json_data=json_data)
self.data = json_data
@property
def id(self) -> str:
"""Returns the id value of the observation station."""
return self.data["id"]
@property
def loc(self) -> AerisLocation:
"""Returns an AerisLocation object."""
return AerisLocation(self.data["loc"])
@property
def place(self):
"""Returns an AerisPlace object."""
place = AerisPlace(self.data["place"])
return place
@property
def periods(self) -> List[ObservationsSummaryPeriod]:
""" Returns an array of ObservationsSummaryPeriod objects """
periods = self.data["periods"]
p_list = [] # type: [ObservationsSummaryPeriod]
for per in periods:
osp = ObservationsSummaryPeriod(per)
p_list.append(osp)
return p_list
@property
def profile(self):
"""Returns an AerisProfile object."""
profile = AerisProfileObservationsSummary(self.data["profile"])
return profile
```
#### File: aerisweather/responses/RiversCrestsHistoric.py
```python
from aerisweather.utils import AerisDateTime
class RiversCrestsHistoric:
""" Defines an object for the Aeris API rivers historic crests data returned in an Aeris API responses """
data = {}
def __init__(self, json_data):
"""Constructor"""
self.data = json_data
@property
def timestamp(self):
"""Returns the unix timestamp of the date of the crest"""
return self.data["timestamp"]
@property
def dateTimeISO(self):
"""Returns the a Python DateTime date of the crest"""
return AerisDateTime.AerisDateTime().get_datetime_from_aeris_iso(self.data["dateTimeISO"])
@property
def heightFT(self):
"""Returns the height in feet of the crest"""
return self.data["heightFT"]
@property
def heightM(self):
"""Returns the height in meters of the crest"""
return self.data["heightM"]
```
#### File: aerisweather/responses/RiversImpacts.py
```python
class RiversImpacts:
"""Defines an object for the Aeris API rivers impacts data returned in an Aeris API responses"""
data = {}
def __init__(self, json_data):
""" Constructor """
self.data = json_data
@property
def heightFT(self):
"""Returns the height in feet that the impact begins to occur."""
return self.data["heightFT"]
@property
def getHeightM(self):
"""Returns the height in meters that the impact begins to occur."""
return self.data["heightM"]
@property
def getText(self):
"""Returns the impact description"""
return self.data["text"]
```
#### File: aerisweather-python-sdk/tests/test_alerts.py
```python
import json
import os
import unittest
from urllib.error import URLError
from aerisweather.aerisweather import AerisWeather
from aerisweather.requests.Endpoint import Endpoint, EndpointType
from aerisweather.requests.RequestFilter import RequestFilter
from aerisweather.requests.RequestLocation import RequestLocation
from aerisweather.responses.AerisPlace import AerisPlace
from aerisweather.responses.AerisProfile import AerisProfileAlerts
from aerisweather.responses.AlertsResponse import AlertsResponse
from aerisweather.responses.AlertDetails import AlertDetails
from aerisweather.responses.AlertIncludes import AlertIncludes
from aerisweather.responses.AlertTimestamps import AlertTimestamps
from aerisweather.utils.AerisError import AerisError
from tests.keys import client_id, client_secret, app_id
script_dir = os.path.dirname(__file__)
class TestAlerts(unittest.TestCase):
""" Defines tests modules for the Aeris API Alerts/Advisories class """
def test_static_data(self):
""" Test the code against a known source of data """
file = open(os.path.join(script_dir, "responses/alerts.txt"), "r")
try:
json_obj = json.loads(file.read())
alerts = AlertsResponse(json_obj["response"][0])
assert alerts is not None
assert alerts.id == "2ac0f0296cf81497a20c826d36f50305"
details = alerts.details
assert type(details) == AlertDetails
assert details.type == "BH.S"
assert details.name == "STATEMENT"
assert details.loc == "TXZ238"
assert details.emergency is False
assert details.color == "40E0D0"
assert details.cat == "beach"
assert details.body == "...HIGH RIP CURRENT RISK TODAY...\n\n.ELEVATED SURF AND A HIGH RISK OF RIP " + \
"CURRENTS WILL CONTINUE"
assert details.bodyFull == "WHUS44 KHGX 290914\nCFWHGX\n\nCOASTAL HAZARD MESSAGE\nNATIONAL " + \
"WEATHER SERVICE HOUSTON/GALVESTON"
timestamps = alerts.timestamps
assert type(timestamps) == AlertTimestamps
assert timestamps.issued == 1522314840
assert timestamps.issuedISO == "2018-03-29T04:14:00-05:00"
assert timestamps.begins == 1522314840
assert timestamps.beginsISO == "2018-03-29T04:14:00-05:00"
assert timestamps.expires == 1522357200
assert timestamps.expiresISO == "2018-03-29T16:00:00-05:00"
assert timestamps.added == 1522318202
assert timestamps.addedISO == "2018-03-29T05:10:02-05:00"
assert alerts.poly == ""
assert alerts.geoPoly is None
includes = alerts.includes
assert type(includes) == AlertIncludes
assert includes.counties == []
assert includes.fips == ["48039", "48071"]
assert includes.wxzones == ["TXZ214", "TXZ236"]
assert includes.zipcodes == [77404, 77414]
place = alerts.place
assert type(place) == AerisPlace
assert place.name == "galveston"
assert place.state == "tx"
assert place.country == "us"
profile = alerts.profile
assert type(profile) == AerisProfileAlerts
assert profile.tz == "America/Chicago"
assert alerts.active is True
except URLError as url_err:
print("URL Error: " + url_err.reason)
raise url_err
except AerisError as aeris_err:
print("AerisError: " + aeris_err.__str__())
raise aeris_err
except Exception as ex:
print(ex.args)
raise ex
finally:
file.close()
def test_api_response(self):
""" Test the Alerts code against a live response from the API """
try:
awx = AerisWeather(app_id=app_id,
client_id=client_id,
client_secret=client_secret)
endpoint = Endpoint(endpoint_type=EndpointType.ALERTS,
location=RequestLocation(postal_code="55124"),
action=None,
filter_=[RequestFilter.ALERTS.ALL],
sort=None,
params=None,
query=None)
alerts_list = awx.request(endpoint=endpoint)
for alert in alerts_list: # type: AlertsResponse
assert alert.place is not None
timestamps = alert.timestamps
assert type(timestamps) == AlertTimestamps
assert timestamps.issued is not None
includes = alert.includes
assert type(includes) is AlertIncludes
assert includes.wxzones is not None
assert alert.active is True
except URLError as url_err:
print("URL Error: " + url_err.reason)
raise url_err
except AerisError as aeris_err:
print("AerisError: " + str(aeris_err))
raise aeris_err
except Exception as ex:
print(ex.args)
raise ex
def test_alerts_method(self):
""" Test the AerisWeather.alerts method """
try:
awx = AerisWeather(app_id=app_id,
client_id=client_id,
client_secret=client_secret)
alerts_list = awx.alerts(location=RequestLocation(postal_code="55124"),
action=None,
filter_=[RequestFilter.ALERTS.ALL],
sort=None,
params=None,
query=None)
for alert in alerts_list: # type: AlertsResponse
assert alert.place is not None
timestamps = alert.timestamps
assert type(timestamps) == AlertTimestamps
assert timestamps.issued is not None
includes = alert.includes
assert type(includes) is AlertIncludes
assert includes.wxzones is not None
assert alert.active is True
except URLError as url_err:
print("URL Error: " + url_err.reason)
raise url_err
except AerisError as aeris_err:
print("AerisError: " + str(aeris_err))
raise aeris_err
except Exception as ex:
print(ex.args)
raise ex
suite = unittest.TestLoader().loadTestsFromTestCase(TestAlerts)
unittest.TextTestRunner(verbosity=2).run(suite)
``` |
{
"source": "jkoeter/aws-mqtt-mirror",
"score": 2
} |
#### File: jkoeter/aws-mqtt-mirror/aws-mqtt-mirror.py
```python
import json
from time import sleep
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
CLIENT_ID = "MQTT-mirror"
AWS_ENDPOINT = "XXXXX.iot.eu-west-1.amazonaws.com"
AWS_ENDPOINT_PORT = 443
AWS_ACCESS_KEY = "XXXXX"
AWS_ACCESS_SECRET = "XXXXX"
ROOT_CA_PATH = "certificates\\AmazonRootCA1.pem"
def mqtt_callback(_unused_client, _unused_userdata, message):
mqtt_message = json.loads(message.payload)
print('{} : {}'.format(int(message.timestamp), mqtt_message))
if __name__ == '__main__':
print('AWS MQTT mirror')
aws_iot_mqtt = AWSIoTMQTTClient(CLIENT_ID, useWebsocket=True)
aws_iot_mqtt.configureEndpoint(AWS_ENDPOINT, AWS_ENDPOINT_PORT)
aws_iot_mqtt.configureCredentials(ROOT_CA_PATH)
aws_iot_mqtt.configureIAMCredentials(AWS_ACCESS_KEY, AWS_ACCESS_SECRET)
result = aws_iot_mqtt.connect()
result = aws_iot_mqtt.subscribe("#", 1, mqtt_callback)
print("Press CTRL+C to abort")
try:
while True:
sleep(60)
except KeyboardInterrupt:
print("Aborted by user")
finally:
aws_iot_mqtt.disconnect()
``` |
{
"source": "jkoeter/nexperia-guest-wifi",
"score": 2
} |
#### File: jkoeter/nexperia-guest-wifi/nexperia-guest-wifi.py
```python
from datetime import datetime, timezone
from hashlib import sha256
import re
import xml.etree.ElementTree as ET
import requests
import argparse
class Router(object):
"""This class executes commands on a Nexperia V10 router."""
def __init__(self, host, username, password):
self.host = host
self.username = username
self.password = password
self.session = None
self.session_token = None
self.session_token_ext = None
def login(self):
"""Login the Nexperia router"""
self.session = requests.Session()
token_url = 'http://{}/function_module/login_module/login_page/logintoken_lua.lua'.format(self.host)
login_url = 'http://{}'.format(self.host)
# 1st stage login to get the required cookies; result can be ignored
self.session.get(login_url)
# 2nd stage login to get the password token
result = self.session.get(token_url)
self.session_token = re.findall(r'\d+', result.text)[0]
data = {
"Username": self.username,
"Password": sha256((self.password + self.session_token).encode('utf-8')).hexdigest(),
"action": "login"
}
# 3rd stage login; send the username plus hashed password
result = self.session.post(login_url, data)
def logout(self):
logout_url = 'http://{}'.format(self.host)
self.session.post(logout_url, data={
"IF_LogOff": 1,
"IF_LanguageSwitch": "",
"IF_ModeSwitch": ""
})
self.session = None
def get_guest_wifi_enabled(self):
if not self.session:
self.login()
# Get the guest-wifi data
# Generate a timestamp for the request
ts = round(datetime.now(timezone.utc).timestamp() * 1000)
# you need to get this page before getting the data otherwise it fails with page not found...
# and you need the secret seesionTmpToken when writing
guest_wifi_page = self.session.get('http://{}/getpage.lua?pid=123&nextpage=Localnet_Wlan_GuestWiFi_t.lp&Menu3Location=0&_={}'.format(self.host, ts))
# Get the extended session-key
session_token_ext_string = re.findall('_sessionTmpToken = \"(.+)\"', guest_wifi_page.text)[0]
# format the string-encoded byte-string in a real character-string
self.session_token_ext = ''
char_count = len(session_token_ext_string)//4
for count in range(char_count):
self.session_token_ext = self.session_token_ext + chr(int(session_token_ext_string[(count*4)+2:(count*4)+4], 16))
# Update the timestamp and add a little delay as the returned time is in seconds and we use miliseconds
ts = round(datetime.now(timezone.utc).timestamp() * 1000) + 48
# Get the page with the guest-wifi data
data_page = self.session.get('http://{}/common_page/Localnet_Wlan_GuestWiFiOnOff_lua.lua?_={}'.format(self.host, ts+35))
# Parse the XML to get the current status
result_root = ET.fromstring(data_page.text)
guest_wifi_switch = result_root.find('OBJ_GUESTWIFISWITCH_ID')
guest_wifi_settings = {}
if guest_wifi_switch:
param_list = guest_wifi_switch.findall('Instance/ParaName')
value_list = guest_wifi_switch.findall('Instance/ParaValue')
for count, parameter in enumerate(param_list):
guest_wifi_settings[parameter.text] = int(value_list[count].text)
return guest_wifi_settings['Enable'] == 1
def set_guest_wifi_enable(self, enable=True):
if not self.session:
self.login()
current_state = self.get_guest_wifi_enabled()
if current_state != enable:
set_data_url = 'http://{}/common_page/Localnet_Wlan_GuestWiFiOnOff_lua.lua'.format(self.host)
result = self.session.post(set_data_url, data={
"IF_ACTION": "Apply",
"_InstID": "",
"Enable": 1 if enable else 0,
"Btn_cancel_GuestWiFiOnOff": "",
"Btn_apply_GuestWiFiOnOff": "",
"_sessionTOKEN": self.session_token_ext
})
if __name__ == '__main__':
parser = argparse.ArgumentParser('nexperia-guest-wifi')
parser.add_argument('-i', '--host', help='the host-name/IP-address of the Nexperia router')
parser.add_argument('-u', '--user', help='the user-name for login (Admin)')
parser.add_argument('-p', '--pwd', help='the <PASSWORD>')
parser.add_argument('state', nargs='?', help='set the state of the guest-wifi (on or off); \
if not specified the current state is obtained')
args = parser.parse_args()
if not args.host:
print('error: no host specified')
exit()
if not args.user:
print('error: no user-name specified')
exit()
if not args.pwd:
print('error: no password specified')
exit()
my_router = Router(args.host, args.user, args.pwd)
if not args.state:
guest_wifi_on = my_router.get_guest_wifi_enabled()
print('Guest-Wifi is: {}'.format('on' if guest_wifi_on else 'off'))
else:
if args.state.upper() == 'ON':
my_router.set_guest_wifi_enable(True)
elif args.state.upper() == 'OFF':
my_router.set_guest_wifi_enable(False)
else:
print('Invalid state: \'{}\'. Use \'On\' or \'Off\''.format(args.state))
``` |
{
"source": "jkogut/simple-python-rest-api-v1",
"score": 3
} |
#### File: simple-python-rest-api-v1/app/rest_server.py
```python
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__status__ = "Beta"
from flask import Flask, request, jsonify, abort
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
#####################
app = Flask(__name__)
engine = create_engine('sqlite:///chinook.db', echo=True)
Base = declarative_base(engine)
class Employees(Base):
""" Class for declarative_base ORM db access """
__tablename__ = 'employees'
__table_args__ = {'autoload':True}
def loadSession():
"""
---> Create session
<--- Return session object
"""
metadata = Base.metadata
Session = sessionmaker(bind=engine)
session = Session()
return session
## API STATUS
@app.route("/api/status", methods = ['GET'])
def getStatus():
"""
---> Check API Status
<--- Return JSON with API_status
"""
result = {'API_status':'OK'}
return jsonify(result)
## GET employees
@app.route("/api/v1/employees", methods = ['GET'])
def getEmployees():
"""
---> Select all employees
<--- Return JSON with employeeID
"""
result = { "employees": dict(session.query(Employees.EmployeeId,Employees.LastName).all()) }
return jsonify(result)
## GET employeeId
@app.route("/api/v1/employees/<int:employeeId>", methods = ['GET'])
def getEmployeeIdData(employeeId):
"""
---> Select employee depending on employeeId
<--- Return JSON with employeeIds data
"""
filterQuery = session.query(Employees).filter(Employees.EmployeeId==employeeId).all()
result = { x: getattr(filterQuery[0], x) for x in Employees.__table__.columns.keys() }
return jsonify(result)
## POST create employee
@app.route("/api/v1/employees/new", methods = ['POST'])
def insertNewEmployee():
"""
---> Add (create) new employee from JSON payload
<--- Return added JSON payload with response code
"""
payload = request.json
if not payload or not 'LastName' in payload:
abort(400)
newEmp = Employees(**dateNormalizer(payload))
session.add(newEmp)
session.flush()
session.commit()
return jsonify(payload),201
## DELETE
@app.route("/api/v1/employees/delete/<int:employeeId>", methods = ['DELETE'])
def deleteEmployee(employeeId):
"""
---> Delete existing employee
<--- Return deleted response code
"""
if len(session.query(Employees).filter(Employees.EmployeeId==employeeId).all()) == 0:
result = {employeeId:"NOT EXISTS"}
return jsonify(result),400
delQuery = session.query(Employees).filter(Employees.EmployeeId==employeeId)
delQuery.delete()
delQuery.session.commit()
result = {employeeId:"DELETED"}
return jsonify(result),200
if __name__ == "__main__":
session = loadSession()
def dateNormalizer(payload):
"""
---> SQLite DateTime type only accepts Python datetime
<--- Return normalized JSON payload with Python datetime
"""
import timestring
normalizedBirthDate = timestring.Date(payload['BirthDate']).date
normalizedHireDate = timestring.Date(payload['HireDate']).date
payload['BirthDate'] = normalizedBirthDate
payload['HireDate'] = normalizedHireDate
return payload
app.run(host="0.0.0.0", port=int("5002"), debug=True)
``` |
{
"source": "jkohhokj/AutoCrack",
"score": 3
} |
#### File: jkohhokj/AutoCrack/BruteForceLetter.py
```python
import string
from datetime import datetime
def bruteForceLetter(length, beg=""):
oldBeg = beg.upper()
if length != 1:
for x in string.ascii_uppercase:
yield from bruteForceLetter(length-1, beg=oldBeg+x)
else:
for x in string.ascii_uppercase:
yield oldBeg+x
```
#### File: jkohhokj/AutoCrack/Vigenere.py
```python
import string
from datetime import datetime
from BruteForceLetter import bruteForceLetter
def vigenereEncode(message,key):
#print("tried",key)
message = message.upper()
key = key.upper()
numToLet = dict(zip([x for x in range(26)],list(string.ascii_uppercase)))
letToNum = dict(zip(list(string.ascii_uppercase),[x for x in range(26)]))
# set up the table
encoded = []
for charIndex in range(len(message)):
if message[charIndex] in string.ascii_uppercase:
#human readable one is commented
#firstLetNum = letToNum[str(message[charIndex])]
#secondLetNum = letToNum[str(key[charIndex%len(key)])]
#newNum = (firstLetNum + secondLetNum)%26
#newLet = numToLet[newNum]
#encoded.append(newLet)
encoded.append(numToLet[(letToNum[message[charIndex]]+letToNum[key[charIndex%len(key)]])%26])
else:
encoded.append(message[charIndex])
return ''.join(encoded)
def vigenereDecode(message,key): # literally change the plus to minus
message = message.upper()
key = key.upper()
numToLet = dict(zip([x for x in range(26)],list(string.ascii_uppercase)))
letToNum = dict(zip(list(string.ascii_uppercase),[x for x in range(26)]))
# set up the table
decoded = []
for charIndex in range(len(message)):
if message[charIndex] in string.ascii_uppercase:
# human readable one is commented
#firstLetNum = letToNum[str(message[charIndex])]
#secondLetNum = letToNum[str(key[charIndex%len(key)])]
#newNum = (firstLetNum - secondLetNum)%26
#newLet = numToLet[newNum]
#encoded.append(newLet)
decoded.append(numToLet[(letToNum[message[charIndex]]-letToNum[key[charIndex%len(key)]])%26])
else:
decoded.append(message[charIndex])
return ''.join(decoded)
def autoVigenere(message,keyword,min=2,max=5,showTime=False): #3=.8;4=21;5=10:40;6=4:40:00
old = datetime.now()
message = message.upper()
max += 1
for length in range(min,max):
for key in bruteForceLetter(length):
if vigenereEncode(keyword,key) in message:
print("decoded: ",vigenereDecode(message,key)," ","key: ",key)
if showTime:
print(datetime.now()-old)
return vigenereDecode(message,key)
``` |
{
"source": "jkohhokj/Games",
"score": 3
} |
#### File: jkohhokj/Games/Tetrisbutitworks.py
```python
import pygame
import numpy as np
import random as rand
import time
def reset_grid(dead_subpiece_place_list):
grid = np.zeros((25,13),dtype='int32')
grid[:,0] = True
grid[:,-2] = True
grid[-1,:] = True
for i in range(25):
if i < 4:
grid[i,-1] = i
else:
grid[i,-1] = i-4
for dead_subpiece_place in dead_subpiece_place_list:
grid[dead_subpiece_place[1]][dead_subpiece_place[0]] = 1
return grid
def test_grid_maker(dead_subpiece_place_list):
test_grid = np.zeros((25,12),dtype='int32')
test_grid[:,0] = True
test_grid[:,-1] = True
test_grid[-1,:] = True
for dead_subpiece_place in dead_subpiece_place_list:
grid[dead_subpiece_place[1]][dead_subpiece_place[0]] = 1
return test_grid #already occupied = 5
class Subpiece(pygame.sprite.Sprite): # class with sprite attribute
def __init__(self,x=0,y=0,color=(255,255,255)):
super().__init__()
self.image = pygame.Surface([20,20])
self.image.fill(color)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def getxy(self):
return (self.rect.x, self.rect.y)
def getcenterxy(self):
return (self.rect.x-10,self.rect.y-10)
def setxy(self,x,y):
self.rect.x = x
self.rect.y = y
def getcorners(self):
return [[self.rect.x,self.rect.y],[self.rect.x+10,self.rect.y], [self.rect.x+10,self.rect.y+10],[self.rect.x,self.rect.y+10]]
def setcenterxy(self,x,y):
self.rect.x = x - 10
self.rect.y = y - 10
def getgridplace(self):
return (int(self.rect.x/20)+1,int(self.rect.y/20)+4)
class Piece(pygame.sprite.Sprite): # class with group attribute of 4 subpieces with a shape
def __init__(self,a,b,c,d,pointofrotation,color=(255,255,255)):
super().__init__()
self.Apiece = Subpiece(a[0],a[1],color)
self.Bpiece = Subpiece(b[0],b[1],color)
self.Cpiece = Subpiece(c[0],c[1],color)
self.Dpiece = Subpiece(d[0],d[1],color)
self.group = pygame.sprite.Group(self.Apiece,self.Bpiece,self.Cpiece,self.Dpiece)
self.por = pointofrotation
self.rotation = 0
self.shape = ''
def getgridplace(self):
self.gridplace = [subpiece.getgridplace() for subpiece in self.group]
return self.gridplace
def set_shape(self,a,b,c,d):
self.Apiece = Subpiece(a[0],a[1])
self.Bpiece = Subpiece(b[0],b[1])
self.Cpiece = Subpiece(c[0],c[1])
self.Dpiece = Subpiece(d[0],d[1])
def set_gravity(self):
for subpiece in self.group:
subpiece.rect.y += 20
self.por[1] += 20
def set_reverse_gravity(self):
for subpiece in self.group:
subpiece.rect.y -= 20
self.por[1] -= 20
def move_right(self):
for subpiece in self.group:
subpiece.rect.x += 20
self.por[0] += 20
def move_left(self):
for subpiece in self.group:
subpiece.rect.x -= 20
self.por[0] -= 20
def check_right(self):
for subpiece in self.group:
# print(grid[piece_place[i][1]+5][piece_place[i][0]+2])
if grid[subpiece.getgridplace()[1]][subpiece.getgridplace()[0]+1] == 1:
grid[subpiece.getgridplace()[1]][subpiece.getgridplace()[0]+1] = 3
# print(grid)
return False
return True
def check_left(self):
for subpiece in self.group:
if grid[subpiece.getgridplace()[1]][subpiece.getgridplace()[0]-1] == 1:
grid[subpiece.getgridplace()[1]][subpiece.getgridplace()[0]-1] = 3
return False
return True
def check_bottom(self):
for subpiece in self.group:
if grid[subpiece.getgridplace()[1]+1][subpiece.getgridplace()[0]] == 1:
grid[subpiece.getgridplace()[1]+1][subpiece.getgridplace()[0]] = 3
return False
return True
def check_ccw_rotation(self):
self.rotateccw()
for subpiece in self.group:
if grid[subpiece.getgridplace()[1]][subpiece.getgridplace()[0]] == 1:
grid[subpiece.getgridplace()[1]][subpiece.getgridplace()[0]] = 3
self.rotatecw()
return False
self.rotatecw()
return True
def check_cw_rotation(self):
self.rotatecw()
for subpiece in self.group:
if grid[subpiece.getgridplace()[1]][subpiece.getgridplace()[0]] == 1:
grid[subpiece.getgridplace()[1]][subpiece.getgridplace()[0]] = 3
self.rotateccw()
return False
self.rotateccw()
return True
def rotateccw(self):
if self.shape == 'I' or self.shape == 'Z' or self.shape == 'S': # Z,S, and I piece only have 2 positions
if self.rotation >= 0:
for subpiece in self.group:
#x,y -> y,-x
subpiece.setxy(subpiece.rect.y - self.por[1] + self.por[0],-1*(subpiece.rect.x - self.por[0])+self.por[1]-20)
self.rotation -= 1
else:
self.rotatecw()
else:
for subpiece in self.group:
#x,y -> y,-x
subpiece.setxy(subpiece.rect.y - self.por[1] + self.por[0],-1*(subpiece.rect.x - self.por[0])+self.por[1]-20)
def rotatecw(self):
if self.shape == 'I' or self.shape == 'Z' or self.shape == 'S': # Z,S, and I piece only have 2 positions
if self.rotation <= 0:
for subpiece in self.group:
#x,y -> -y,x
subpiece.setxy(-1*(subpiece.rect.y - self.por[1]) + self.por[0]-20,subpiece.rect.x - self.por[0]+self.por[1])
self.rotation += 1
else:
self.rotateccw()
else:
for subpiece in self.group:
#x,y -> -y,x
subpiece.setxy(-1*(subpiece.rect.y - self.por[1]) + self.por[0]-20,subpiece.rect.x - self.por[0]+self.por[1])
def random_shape_gen(por): #starting point
letters_list = ['L','J','I','O','Z','S','T']
next_letter = rand.choice(letters_list)
if next_letter == 'L':
next_piece = Piece([por[0]-10, por[1]-30],[por[0]-10, por[1]-10],[por[0]-10, por[1]+10],[por[0]+10, por[1]+10],[por[0], por[1]],(3, 65, 174))
elif next_letter == 'O':
por[0]+=10
por[1]+=10
next_piece = Piece([por[0]-20,por[1]-20],[por[0],por[1]-20],[por[0],por[1]],[por[0]-20,por[1]],[por[0],por[1]],(255, 151, 28))
elif next_letter == 'J':
next_piece = Piece([por[0]-10, por[1]-30],[por[0]-10, por[1]-10],[por[0]-10, por[1]+10],[por[0]-30, por[1]+10],[por[0], por[1]],(255, 213, 0))
elif next_letter == 'I':
next_piece = Piece([por[0]-10, por[1]-30],[por[0]-10, por[1]-10],[por[0]-10, por[1]+10],[por[0]-10, por[1]+30],[por[0], por[1]],(146, 211, 202))
elif next_letter == 'Z':
por[0]+=10
por[1]+=10
next_piece = Piece([por[0]-20, por[1]-20],[por[0], por[1]-20],[por[0], por[1]],[por[0]+20, por[1]],[por[0]+10, por[1]+10],(255, 50, 19))
elif next_letter == 'S':
por[0]+=10
por[1]+=10
next_piece = Piece([por[0], por[1]-20],[por[0]-20, por[1]-20],[por[0]-20, por[1]],[por[0]-40, por[1]],[por[0]-10, por[1]-10],(114, 203, 59))
elif next_letter == 'T':
next_piece = Piece([por[0]-10, por[1]-30],[por[0]-10, por[1]-10],[por[0]-10, por[1]+10],[por[0]+10, por[1]-10],[por[0], por[1]],(255,0,255))
next_piece.shape = next_letter
return next_piece
def gravity(speed, speed_counter): #,moved_down
if speed_counter >= speed:
if current_piece.check_bottom() == False:
return 999999999999999999999999999999
else:
current_piece.set_gravity()
speed_counter = 0
else:
speed_counter += 1
return speed_counter
def draw_it():
for i in range(1,11):
pygame.draw.line(screen,(255,255,255),(i*20,0),(i*20,400),1)
for i in range(1,20):
pygame.draw.line(screen,(255,255,255),(0,i*20),(200,i*20),1)
def kill_piece(current_dead_piece):
dead_subpiece_group.add(current_dead_piece.Apiece,current_dead_piece.Bpiece,current_dead_piece.Cpiece,current_dead_piece.Dpiece)
for subpiece_place in piece_place:
dead_subpiece_place_list.append(subpiece_place)
current_dead_piece.group = pygame.sprite.Group()
next_piece = random_shape_gen([startingx+140,startingy+100])
return next_piece
def check_line(grid):
lines = []
for line in grid[4:-1]:
this_line = True
for char in line[1:-2]:
if char != 1:
this_line = False
if this_line == True:
lines.append(line[-1])
while len(lines) != 4:
lines.append(False)
return lines
def move_down_line_list(dead_line,dead_subpiece_place_list):
new_dead_subpiece_place_list = []
new_new_dead_subpiece_place_list = []
for dead_subpiece in dead_subpiece_place_list:
if dead_subpiece[1] != dead_line + 4: # accomodate for extra 4 on top
new_dead_subpiece_place_list.append(dead_subpiece)
for dead_subpiece in new_dead_subpiece_place_list:
if dead_subpiece[1] < dead_line + 4:
new_new_dead_subpiece_place_list.append((dead_subpiece[0],dead_subpiece[1]+1))
else:
new_new_dead_subpiece_place_list.append((dead_subpiece[0],dead_subpiece[1]))
return new_new_dead_subpiece_place_list
def move_down_line_group(dead_line,dead_subpiece_group):
new_dead_subpiece_group = pygame.sprite.Group()
new_new_dead_subpiece_group = pygame.sprite.Group()
for dead_subpiece in dead_subpiece_group:
if dead_subpiece.getgridplace()[1] != dead_line + 4: # accomodate for extra 4 on top
new_dead_subpiece_group.add(dead_subpiece)
for dead_subpiece in new_dead_subpiece_group:
if dead_subpiece.getgridplace()[1] < dead_line + 4:
dead_subpiece.setxy(dead_subpiece.getxy()[0],(dead_subpiece.getgridplace()[1]-3)*20)
new_new_dead_subpiece_group.add(dead_subpiece)
else:
new_new_dead_subpiece_group.add(dead_subpiece)
return new_new_dead_subpiece_group
def check_line_grouping(lines):
lines.sort(reverse=True)
line_group = []
while lines != [False,False,False,False]:
if lines[0] - 1 == lines[1]:
if lines[1] - 1 == lines[2]:
if lines[2] -1 == lines[3]:
line_group = [[lines[0],lines[1],lines[2],lines[3]]]
lines = [False,False,False,False]
else:
line_group = [[lines[0],lines[1],lines[2]],[lines[3]]]
lines = [False,False,False,False]
else:
line_group.append([lines[0],lines[1]])
lines.pop(0)
lines.pop(0)
lines.append(False)
lines.append(False)
else:
line_group.append([lines[0]])
lines.pop(0)
lines.append(False)
return line_group
def scoring(current_score,level,line_grouping):
base_score = 0
for line_set in line_grouping:
if len(line_set) == 1:
base_score += 40
elif len(line_set) == 2:
base_score += 100
elif len(line_set) == 3:
base_score += 300
elif len(line_set) == 4:
base_score += 1200
return (level+1) * base_score + current_score
def check_level_up(total_lines,level):
if level * 10 <= total_lines:
return level + 1
else:
return level
def speed_check(level,initial_speed):
if level < 9:
speed = initial_speed - level * 5
else:
speed = initial_speed - 9 * 5 - 2
if level > 9:
speed -= 1
if level > 12:
speed -= 1
if level > 15:
speed -= 1
if level > 18:
speed -= 1
if level > 28:
speed -= 1
return speed
#YOU CAN CHANGE THESE
pygame.init()
screen = pygame.display.set_mode([300,400])
startingx = 90
startingy = -50
next_piece = random_shape_gen([startingx+140,startingy+100])
current_piece = random_shape_gen([startingx,startingy])
initial_speed = 60
drawings = True
cool_down = 4
level = 0
#DONT CHANGE THESE
score = 0
speed = initial_speed
total_lines = 0
cool_down_counter = 0
speed_counter = 0
dead_subpiece_place_list = []
dead_subpiece_group = pygame.sprite.Group()
clock = pygame.time.Clock()
running = True
while running:
grid = reset_grid(dead_subpiece_place_list) #grid[y+4][x+1]
#1 = border 2 = piece_place 3 = test
piece_place = current_piece.getgridplace()
for subpiece in current_piece.group:
grid[subpiece.getgridplace()[1]][subpiece.getgridplace()[0]] = 2
#kill tetris
if pygame.event.get([pygame.QUIT]):
running = False
if pygame.key.get_pressed():
if pygame.key.get_pressed()[pygame.K_ESCAPE]:
running = False
if cool_down_counter == cool_down:
if pygame.key.get_pressed()[pygame.K_SPACE]:
if current_piece.check_bottom():
current_piece.set_gravity()
else:
next_piece_save = next_piece
next_piece = kill_piece(current_piece)
current_piece = next_piece_save
for i in range(7):
current_piece.move_left()
for i in range(4):
current_piece.set_reverse_gravity()
if pygame.key.get_pressed()[pygame.K_RIGHT]:
if current_piece.check_right():
current_piece.move_right()
# np.where(grid == 1,grid+1,grid)
elif pygame.key.get_pressed()[pygame.K_LEFT]:
if current_piece.check_left():
current_piece.move_left()
elif pygame.key.get_pressed()[pygame.K_UP]:
if current_piece.check_cw_rotation():
current_piece.rotatecw()
elif pygame.key.get_pressed()[pygame.K_DOWN]:
if current_piece.check_ccw_rotation():
current_piece.rotateccw()
elif pygame.key.get_pressed()[pygame.K_m]:
print(grid)
elif pygame.key.get_pressed()[pygame.K_k]:
for x in dead_subpiece_group:
print(x)
elif pygame.key.get_pressed()[pygame.K_s]:
drawings = True
elif pygame.key.get_pressed()[pygame.K_r]:
current_piece.set_reverse_gravity()
elif pygame.key.get_pressed()[pygame.K_q]:
pass
elif pygame.key.get_pressed()[pygame.K_p]:
print(current_piece.getgridplace())
cool_down_counter = 0
else:
cool_down_counter += 1
grid = reset_grid(dead_subpiece_place_list) #grid[y+4][x+1]
#1 = border 2 = piece_place 3 = test
piece_place = current_piece.getgridplace()
for subpiece in current_piece.group:
grid[subpiece.getgridplace()[1]][subpiece.getgridplace()[0]] = 2
# setting gravity
speed_counter = gravity(speed,speed_counter) #,moved_down
if speed_counter == 999999999999999999999999999999: # and killed == False
next_piece_save = next_piece
next_piece = kill_piece(current_piece)
current_piece = next_piece_save
for i in range(7):
current_piece.move_left()
for i in range(5):
current_piece.set_reverse_gravity()
#update screen
check = check_line(grid)
if check != [False,False,False,False]:
for dead_line in check:
if dead_line != False:
total_lines += 1
dead_subpiece_place_list = move_down_line_list(dead_line,dead_subpiece_place_list)
dead_subpiece_group = move_down_line_group(dead_line,dead_subpiece_group)
grouping = check_line_grouping(check)
score = scoring(score,1,grouping)
print('this is the score',score)
print('this is the total lines',total_lines)
level = check_level_up(total_lines,level)
speed = speed_check(level,initial_speed)
print('level',level)
if 1 in grid[0:4,1:-2]:
running = False
screen.fill((0,0,0))
if drawings == True:
draw_it()
current_piece.group.draw(screen)
next_piece.group.draw(screen)
dead_subpiece_group.draw(screen)
pygame.display.update()
clock.tick(60)
pygame.quit()
``` |
{
"source": "jkohrman/dnstwister",
"score": 3
} |
#### File: dnstwister/auth/__init__.py
```python
from functools import wraps
from flask import request, redirect, url_for, session
from urllib2 import Request, urlopen, URLError
import os
import json
from time import time
from base64 import b64decode
GOOGLE_AUTH = os.getenv('GOOGLE_AUTH')
CLIENT_ID = os.getenv('GOOGLE_CLIENT_ID')
EMAIL_DOMAIN = os.getenv('GOOGLE_EMAIL_DOMAIN')
def login_required(view):
@wraps(view)
def is_authenticated(*args, **kwargs):
if GOOGLE_AUTH == 'false':
return view (*args, **kwargs)
try:
access_token = session.get('access_token')[0]
id_token = session.get('id_token')[0]
except:
return redirect(url_for('login'))
if True in {access_token is None, id_token is None}:
return redirect(url_for('login'))
try:
token_body = id_token.split('.')[1]
parsed_token = json.loads(b64decode(token_body + '==='))
token_issuer = parsed_token['iss'] == 'accounts.google.com'
token_audience = parsed_token['aud'] == CLIENT_ID
token_domain = parsed_token['hd'] == EMAIL_DOMAIN
token_expires = int(parsed_token['exp']) >= int(time())
except:
session.pop('access_token', None)
session.pop('id_token', None)
return 'Unauthorized exception'
if False in {token_issuer, token_audience, token_domain}:
return 'Unauthorized!<br>issuer: ' + str(token_issuer) + '<br>audience: ' + str(token_audience) + '<br>domain: ' + str(token_domain)
if token_expires is False:
return redirect(url_for('login'))
if access_token is None:
return redirect(url_for('login'))
headers = { 'Authorization': 'OAuth ' + access_token }
req = Request('https://www.googleapis.com/oauth2/v1/userinfo', None, headers)
try:
res = urlopen(req)
except URLError, e:
if e.code == 401:
session.pop('id_token', None)
session.pop('access_token', None)
return redirect(url_for('login', next=request.url))
return view(*args, **kwargs)
return is_authenticated
```
#### File: views/www/login.py
```python
import flask
import os
import json
from time import time
from base64 import urlsafe_b64encode, b64decode
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from flask import request, session, redirect, url_for
from dnstwister import app
import dnstwister.auth as auth
GOOGLE_AUTH = os.getenv('GOOGLE_AUTH')
CLIENT_ID = os.getenv('GOOGLE_CLIENT_ID')
CLIENT_SECRET = os.getenv('GOOGLE_CLIENT_SECRET')
REDIRECT_URI = os.getenv('GOOGLE_REDIRECT_URI')
EMAIL_DOMAIN = os.getenv('GOOGLE_EMAIL_DOMAIN')
SCOPE = os.getenv('GOOGLE_SCOPE')
@app.route(r'/login')
def login():
if GOOGLE_AUTH is 'false':
return redirect(url_for('index'))
state = urlsafe_b64encode(os.urandom(24))
session['google_state'] = state
oauthurl = 'https://accounts.google.com/o/oauth2/auth?client_id=' + CLIENT_ID + '&redirect_uri=' + REDIRECT_URI + '&scope=' + SCOPE + '&response_type=code&state=' + state
if EMAIL_DOMAIN is not None:
oauthurl = oauthurl + '&hd=' + EMAIL_DOMAIN
next_dest = request.args.get('next')
if next_dest is not None:
session['next'] = next_dest
oauthurl = oauthurl + '&next=' + next_dest
return redirect(oauthurl)
@app.route(r'/login/callback')
def authorized():
if GOOGLE_AUTH is 'false':
return redirect(url_for('index'))
if request.args.get('error') is not None:
session.pop('next', None)
session.pop('access_token', None)
return request.args.get('error')
return redirect(url_for('index'))
google_state = request.args.get('state')
session_state = session['google_state']
if google_state != session_state:
session.pop('google_state', None)
session.pop('next', None)
session.pop('access_token', None)
return 'CSRF Error: Token mismatch!<br>' + session_state + '<br>' + google_state
next_dest = request.args.get('next')
auth_code = request.args.get('code')
data = {
'code': auth_code,
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'redirect_uri': REDIRECT_URI,
'grant_type': 'authorization_code'
}
url = 'https://accounts.google.com/o/oauth2/token'
req = Request(url, urlencode(data).encode())
res = json.loads(urlopen(req).read().decode())
access_token = res['access_token']
id_token = res['id_token']
try:
token_body = id_token.split('.')[1]
parsed_token = json.loads(b64decode(token_body + '==='))
token_issuer = parsed_token['iss'] == 'accounts.google.com'
token_audience = parsed_token['aud'] == CLIENT_ID
token_domain = parsed_token['hd'] == EMAIL_DOMAIN
token_issued = int(time()) - 10 <= int(parsed_token['iat']) <= int(time()) + 10
token_expires = int(parsed_token['exp']) >= int(time())
except:
session.pop('google_state', None)
session.pop('next', None)
session.pop('access_token', None)
session.pop('id_token', None)
return 'Unauthorized exception: '
if False in {token_issuer, token_audience, token_domain, token_issued, token_expires}:
return 'Unauthorized:<br>issuer: ' + str(token_issuer) + '<br>audience: ' + str(token_audience) + '<br>domain: ' + str(token_domain) + '<br>issued: ' + str(token_issued) + '<br>expires: ' + str(token_expires)
session['id_token'] = id_token, ''
session['access_token'] = access_token, ''
if 'next' in session:
next_dest = session['next']
session.pop('next', None)
try:
dest = url_for(next_dest)
except:
dest = redirect(url_for('index'))
else:
dest = url_for('index')
return redirect(dest)
@app.route(r'/logout')
def logout():
session.pop('access_token', None)
session.pop('id_token', None)
del session['access_token']
del session['id_token']
session.modified = True
return redirect('/')
```
#### File: tests/api/test_api.py
```python
import pytest
import webtest.app
from dnstwister import tools
def test_api_root(webapp):
"""Test the API root."""
assert webapp.get('/api/').json == {
'domain_fuzzer_url': 'http://localhost:80/api/fuzz/{domain_as_hexadecimal}',
'domain_to_hexadecimal_url': 'http://localhost:80/api/to_hex/{domain}',
'ip_resolution_url': 'http://localhost:80/api/ip/{domain_as_hexadecimal}',
'parked_check_url': 'http://localhost:80/api/parked/{domain_as_hexadecimal}',
'google_safe_browsing_url': 'http://localhost:80/api/safebrowsing/{domain_as_hexadecimal}',
'whois_url': 'http://localhost:80/api/whois/{domain_as_hexadecimal}',
'url': 'http://localhost:80/api/',
}
def test_api_root_redirect(webapp):
"""Test the /api -> /api/ redirect."""
request = webapp.get('/api')
assert request.status_code == 301
assert request.headers['location'] == 'http://localhost/api/'
def test_api_domain_validation(webapp):
"""Test that domains are validated on all API endpoints."""
malformed_domain = 'example'
endpoints = ('fuzz', 'to_hex', 'ip', 'parked', 'safebrowsing', 'whois')
for endpoint in endpoints:
with pytest.raises(webtest.app.AppError) as err:
webapp.get('/api/{}/{}'.format(
endpoint,
tools.encode_domain(malformed_domain)
))
assert '400 BAD REQUEST' in err.value.message
def test_unicode_basics(webapp):
"""Test that Unicode domains work on all endpoints."""
unicode_domain = 'xn--sterreich-z7a.icom.museum'.decode('idna')
endpoints = ('fuzz', 'ip', 'parked', 'safebrowsing', 'whois')
for endpoint in endpoints:
webapp.get('/api/{}/{}'.format(
endpoint,
tools.encode_domain(unicode_domain),
))
webapp.get('/api/to_hex/{}'.format(unicode_domain.encode('idna')))
```
#### File: dnstwister/tests/test_search_outliers.py
```python
import binascii
def test_no_domains_key(webapp):
"""Test a POST without 'domains' being set fails."""
response = webapp.post('/search')
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost:80/error/2'
def test_empty_domains_key(webapp):
"""Test a POST with 'domains' being set to whitespace fails."""
response = webapp.post('/search', {'domains': ' '})
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost:80/error/2'
def test_suggestion(webapp):
"""Test that submitting no valid domains fails.
Where a domain could be reasonably suggested, it is.
"""
response = webapp.post('/search', {'domains': 'example'}).follow()
assert response.status_code == 302
domain = 'example.com'
enc_domain = binascii.hexlify(domain)
expected_redirect = 'http://localhost:80/error/0?suggestion=' + enc_domain
assert response.headers['location'] == expected_redirect
def test_no_valid_domains_only(webapp):
"""Test invalid domains not in suggestions."""
query = 'abc ?@<>.'
response = webapp.post('/search', {'domains': query}).follow()
assert response.status_code == 302
assert response.headers['location'].endswith('=6162632e636f6d')
assert binascii.unhexlify('6162632e636f6d') == 'abc.com'
def test_suggestion_rendered(webapp):
"""Test suggestion rendered on index."""
response = webapp.post('/search', {'domains': 'example'}).follow().follow()
assert 'example.com' in response.body
def test_get_errors(webapp):
"""Test funny URLs for a GET search."""
response = webapp.get('/search/__<<>')
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost:80/error/0'
def test_no_suggestion_many_words(webapp):
"""Test many search terms are dropped in suggestions."""
query = 'j s d f i j s'
response = webapp.post('/search', {'domains': query}).follow()
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost:80/error/0'
def test_suggestion_bad_data(webapp):
"""Test that submitting invalid data in suggestion doesn't crash the page.
"""
response = webapp.get('/error/0?suggestion')
assert response.status_code == 200
response = webapp.get('/error/0?suggestion=')
assert response.status_code == 200
response = webapp.get('/error/0?suggestion=sdlkfjsdlfkjsdf')
assert response.status_code == 200
response = webapp.get('/error/0?suggestion=<script>...<?')
assert response.status_code == 200
response = webapp.get('/error/0?suggestion=a.com&cat=2')
assert response.status_code == 200
def test_fix_comma_typo(webapp):
"""Test accidentally entering in a comma instead of period is corrected.
"""
malformed_domain = 'example,com'
expected_suggestion = 'example.com'
response = webapp.post('/search', {'domains': malformed_domain}).follow().follow()
assert expected_suggestion in response.body
def test_fix_slash_typo(webapp):
"""Test accidentally entering in a slash instead of period is corrected.
"""
malformed_domain = 'example/com'
expected_suggestion = 'example.com'
response = webapp.post('/search', {'domains': malformed_domain}).follow().follow()
assert expected_suggestion in response.body
def test_fix_space_typo(webapp):
"""Test accidentally entering in a space instead of period is corrected.
"""
malformed_domain = 'example com'
expected_suggestion = 'example.com'
response = webapp.post('/search', {'domains': malformed_domain}).follow().follow()
assert expected_suggestion in response.body
def test_post_unicode(webapp):
"""Test of end-to-end unicode."""
unicode_domain = 'höt.com'
assert unicode_domain == 'h\xc3\xb6t.com'
expected_punycode = 'xn--ht-fka.com'
expected_hex = binascii.hexlify(expected_punycode)
assert expected_hex == '786e2d2d68742d666b612e636f6d'
response = webapp.post('/search', {'domains': unicode_domain}).follow()
assert response.status_code == 200
assert response.request.url == 'http://localhost/search/{}'.format(expected_hex)
assert unicode_domain in response.body
assert 'höt.com (xn--ht-fka.com)' in response.body
``` |
{
"source": "jkokoruda/mt940",
"score": 2
} |
#### File: mt940/mt940_tests/test_tags.py
```python
import pytest
import mt940
from mt940 import tags
from mt940 import models
import pprint
@pytest.fixture
def long_statement_number():
with open('mt940_tests/self-provided/long_statement_number.sta') as fh:
return fh.read()
class MyStatementNumber(tags.Tag):
'''Statement number / sequence number
Pattern: 10n
'''
id = 28
pattern = r'''
(?P<statement_number>\d{1,10}) # 10n
$'''
def test_specify_different_tag_classes(long_statement_number):
tag_parser = MyStatementNumber()
transactions = mt940.models.Transactions(tags={
tag_parser.id: tag_parser
})
transactions.parse(long_statement_number)
assert transactions.data.get('statement_number') == '1810118101'
@pytest.fixture
def ASNB_mt940_data():
with open('mt940_tests/ASNB/0708271685_09022020_164516.940.txt') as fh:
return fh.read()
def test_ASNB_tags(ASNB_mt940_data):
tag_parser = tags.StatementASNB()
trs = mt940.models.Transactions(tags={
tag_parser.id: tag_parser
})
trs.parse(ASNB_mt940_data)
assert trs.data == {
'account_identification': 'NL81ASNB9999999999',
'transaction_reference': '0000000000',
'statement_number': '31',
'sequence_number': '1',
'final_opening_balance': models.Balance(
status='C',
amount=models.Amount('404.81', 'C', 'EUR'),
date=models.Date(2020, 1, 31),
),
'final_closing_balance': models.Balance(
status='C',
amount=models.Amount('501.23', 'C', 'EUR'),
date=models.Date(2020, 1, 31),
),
}
assert len(trs) == 8
# test first entry
td = trs.transactions[0].data.pop('transaction_details')
pprint.pprint(trs.data)
pprint.pprint(trs.data['final_opening_balance'])
pprint.pprint(type(trs.data['final_opening_balance']))
pprint.pprint(trs.data['final_opening_balance'].__dict__)
assert trs.transactions[0].data == {
'status': 'D',
'funds_code': None,
'amount': models.Amount('65.00', 'D', 'EUR'),
'id': 'NOVB',
'customer_reference': 'NL47INGB9999999999',
'bank_reference': None,
'extra_details': 'hr gjlm paulissen',
'currency': 'EUR',
'date': models.Date(2020, 1, 1),
'entry_date': models.Date(2020, 1, 1),
'guessed_entry_date': models.Date(2020, 1, 1),
}
assert td == 'NL47INGB9999999999 hr gjlm paulissen\nBetaling sieraden'
assert trs.transactions[1].data['amount'] == models.Amount(
'1000.00', 'C', 'EUR')
assert trs.transactions[2].data['amount'] == models.Amount(
'801.55', 'D', 'EUR')
assert trs.transactions[3].data['amount'] == models.Amount(
'1.65', 'D', 'EUR')
assert trs.transactions[4].data['amount'] == models.Amount(
'828.72', 'C', 'EUR')
assert trs.transactions[5].data['amount'] == models.Amount(
'1000.00', 'D', 'EUR')
assert trs.transactions[6].data['amount'] == models.Amount(
'1000.18', 'C', 'EUR')
td = trs.transactions[7].data.pop('transaction_details')
assert trs.transactions[7].data == {
'status': 'D',
'funds_code': None,
'amount': models.Amount('903.76', 'D', 'EUR'),
'id': 'NIDB',
'customer_reference': 'NL08ABNA9999999999',
'bank_reference': None,
'extra_details': 'international card services',
'currency': 'EUR',
'date': models.Date(2020, 1, 31),
'entry_date': models.Date(2020, 1, 31),
'guessed_entry_date': models.Date(2020, 1, 31),
}
assert td[0:46] == 'NL08ABNA9999999999 international card services'
assert td[47:112] == \
'000000000000000000000000000000000 0000000000000000 Betaling aan I'
assert td[113:176] == \
'CS 99999999999 ICS Referentie: 2020-01-31 21:27 000000000000000'
``` |
{
"source": "jkokosar/resolwe",
"score": 2
} |
#### File: resolwe/flow/finders.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import import_string
class BaseProcessesFinder(object):
"""
A base processes loader to be used for custom staticfiles finder
classes.
"""
def find_processes(self):
raise NotImplementedError('subclasses of BaseProcessesLoader must provide a find_processes() method')
def find_descriptors(self):
raise NotImplementedError('subclasses of BaseProcessesLoader must provide a find_descriptors() method')
class FileSystemProcessesFinder(BaseProcessesFinder):
def find_processes(self):
return getattr(settings, 'FLOW_PROCESSES_DIRS', ())
def find_descriptors(self):
return getattr(settings, 'FLOW_DESCRIPTORS_DIRS', ())
class AppDirectoriesFinder(BaseProcessesFinder):
def _find_folders(self, folder_name):
found_folders = []
for app_config in apps.get_app_configs():
folder_path = os.path.join(app_config.path, folder_name)
if os.path.isdir(folder_path):
found_folders.append(folder_path)
return found_folders
def find_processes(self):
return self._find_folders('processes')
def find_descriptors(self):
return self._find_folders('descriptors')
def get_finders():
for finder_path in settings.FLOW_PROCESSES_FINDERS:
yield get_finder(finder_path)
def get_finder(import_path):
Finder = import_string(import_path)
if not issubclass(Finder, BaseProcessesFinder):
raise ImproperlyConfigured(
'Finder "{}" is not a subclass of "{}"'.format(Finder, BaseProcessesFinder))
return Finder()
```
#### File: resolwe/flow/models.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import json
import jsonschema
import os
import re
import six
from django import template
from django.db import models
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.contrib.postgres.fields import ArrayField, JSONField
from django.contrib.staticfiles import finders
from versionfield import VersionField
from autoslug import AutoSlugField
VERSION_NUMBER_BITS = (8, 10, 14)
class BaseModel(models.Model):
"""Abstract model that ncludes common fields for other models."""
class Meta:
"""BaseModel Meta options."""
abstract = True
unique_together = ('slug', 'version')
default_permissions = ()
#: URL slug
slug = AutoSlugField(populate_from='name', unique_with='version', editable=True, max_length=100)
#: process version
version = VersionField(number_bits=VERSION_NUMBER_BITS, default=0)
#: object name
name = models.CharField(max_length=100)
#: creation date and time
created = models.DateTimeField(auto_now_add=True)
#: modified date and time
modified = models.DateTimeField(auto_now=True)
#: user that created the entry
contributor = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT)
def __str__(self):
return self.name
class Process(BaseModel):
"""Postgres model for storing processs."""
class Meta(BaseModel.Meta):
"""Process Meta options."""
permissions = (
("view_process", "Can view process"),
("share_process", "Can share process"),
("owner_process", "Is owner of the process"),
)
PERSISTENCE_RAW = 'RAW'
PERSISTENCE_CACHED = 'CAC'
PERSISTENCE_TEMP = 'TMP'
PERSISTENCE_CHOICES = (
(PERSISTENCE_RAW, 'Raw'),
(PERSISTENCE_CACHED, 'Cached'),
(PERSISTENCE_TEMP, 'Temp'),
)
PRIORITY_HIGH = 'HI'
PRIORITY_NORMAL = 'NO'
PRIORITY_CHOICES = (
(PRIORITY_NORMAL, 'Normal'),
(PRIORITY_HIGH, 'High'),
)
#: data type
type = models.CharField(max_length=100, validators=[
RegexValidator(
regex=r'^data:[a-z0-9:]+:$',
message='Type may be alphanumerics separated by colon',
code='invalid_type'
)
])
#: category
category = models.CharField(max_length=200, default='other', validators=[
RegexValidator(
regex=r'^([a-z0-9]+[:\-])*[a-z0-9]+:$',
message='Category may be alphanumerics separated by colon',
code='invalid_category'
)
])
persistence = models.CharField(max_length=3, choices=PERSISTENCE_CHOICES, default=PERSISTENCE_RAW)
"""
data PERSISTENCE, cached and temp must be idempotent
- :attr:`Processor.PERSISTENCE_RAW` / ``'raw'``
- :attr:`Processor.PERSISTENCE_CACHED` / ``'cached'``
- :attr:`Processor.PERSISTENCE_TEMP` / ``'temp'``
"""
priority = models.CharField(max_length=2, choices=PRIORITY_CHOICES, default=PRIORITY_NORMAL)
"""
data PRIORITY
- :attr:`Processor.PRIORITY_NORMAL` / ``'normal'``
- :attr:`Processor.PRIORITY_HIGH` / ``'high'``
"""
#: detailed description
description = models.TextField(default='')
#: template for name of Data object created with Process
data_name = models.CharField(max_length=200, null=True, blank=True)
input_schema = JSONField(blank=True, default=list)
"""
process input schema (describes input parameters, form layout **"Inputs"** for :attr:`Data.input`)
Handling:
- schema defined by: *dev*
- default by: *user*
- changable by: *none*
"""
output_schema = JSONField(blank=True, default=list)
"""
process output schema (describes output JSON, form layout **"Results"** for :attr:`Data.output`)
Handling:
- schema defined by: *dev*
- default by: *dev*
- changable by: *dev*
Implicitly defined fields (by :meth:`server.management.commands.register` or :meth:`server.tasks.manager`):
- ``progress`` of type ``basic:float`` (from 0.0 to 1.0)
- ``proc`` of type ``basic:group`` containing:
- ``stdout`` of type ``basic:text``
- ``rc`` of type ``basic:integer``
- ``task`` of type ``basic:string`` (celery task id)
- ``worker`` of type ``basic:string`` (celery worker hostname)
- ``runtime`` of type ``basic:string`` (runtime instance hostname)
- ``pid`` of type ``basic:integer`` (process ID)
"""
flow_collection = models.CharField(max_length=100, null=True, blank=True)
"""
Automatically add Data object created with this processor to a
special collection representing a data-flow. If all input Data
objects belong to the same collection, add newly created Data object
to it, otherwise create a new collection.
If `DescriptorSchema` object with `type` matching this field
exists, reference it in the collection's `descriptor_schema` field.
"""
run = JSONField(default=dict)
"""
process command and environment description for internal use
Handling:
- schema defined by: *dev*
- default by: *dev*
- changable by: *dev*
Required definitions:
- ``engine`` .. engine to run the processor with
- ``script`` .. script with code to run
"""
def render_template(template_string, context):
"""Render template based on Dango template language."""
template_headers = [
'{% load resource_filters %}',
'{% load process_fields %}',
'{% load mathfilters %}',
]
custom_template_tags = getattr(settings, 'RESOLWE_CUSTOM_TEMPLATE_TAGS', [])
if not isinstance(custom_template_tags, list):
raise KeyError("`RESOLWE_CUSTOM_TEMPLATE_TAGS` setting must be a list.")
template_headers.extend(
['{{% load {} %}}'.format(template_tag) for template_tag in custom_template_tags]
)
return template.Template(''.join(template_headers) + template_string).render(context)
def render_descriptor(data):
"""Render data descriptor.
The rendering is based on descriptor schema and input context.
:param data: data instance
:type data: :obj:`server.models.Data` or :obj:`dict`
"""
if not data.descriptor_schema or not data.process.input_schema:
return
inputs = data.input.copy()
hydrate_input_references(inputs, data.process.input_schema, hydrate_values=False)
template_context = template.Context(inputs)
# Set default values
for field_schema, _, path in iterate_schema(data.descriptor, data.descriptor_schema.schema, 'descriptor'):
if 'default' in field_schema:
tmpl = field_schema['default']
if field_schema['type'].startswith('list:'):
tmpl = [render_template(tmp, template_context)
if isinstance(tmp, six.string_types) else tmp
for tmp in tmpl]
elif isinstance(tmpl, six.string_types):
tmpl = render_template(tmpl, template_context)
dict_dot(data, path, tmpl)
class Data(BaseModel):
"""Postgres model for storing data."""
class Meta(BaseModel.Meta):
"""Data Meta options."""
permissions = (
("view_data", "Can view data"),
("edit_data", "Can edit data"),
("share_data", "Can share data"),
("download_data", "Can download files from data"),
("owner_data", "Is owner of the data"),
)
STATUS_UPLOADING = 'UP'
STATUS_RESOLVING = 'RE'
STATUS_WAITING = 'WT'
STATUS_PROCESSING = 'PR'
STATUS_DONE = 'OK'
STATUS_ERROR = 'ER'
STATUS_DIRTY = 'DR'
STATUS_CHOICES = (
(STATUS_UPLOADING, 'Uploading'),
(STATUS_RESOLVING, 'Resolving'),
(STATUS_WAITING, 'Waiting'),
(STATUS_PROCESSING, 'Processing'),
(STATUS_DONE, 'Done'),
(STATUS_ERROR, 'Error'),
(STATUS_DIRTY, 'Dirty')
)
#: processor started date and time (set by :meth:`server.tasks.manager`)
started = models.DateTimeField(blank=True, null=True)
#: processor finished date date and time (set by :meth:`server.tasks.manager`)
finished = models.DateTimeField(blank=True, null=True)
#: checksum field calculated on inputs
checksum = models.CharField(max_length=40, validators=[
RegexValidator(
regex=r'^[0-9a-f]{40}$',
message='Checksum is exactly 40 alphanumerics',
code='invalid_checksum'
)
], blank=True, null=True)
status = models.CharField(max_length=2, choices=STATUS_CHOICES, default=STATUS_RESOLVING)
"""
:class:`Data` status
- :attr:`Data.STATUS_UPLOADING` / ``'uploading'``
- :attr:`Data.STATUS_RESOLVING` / ``'resolving'``
- :attr:`Data.STATUS_WAITING` / ``'waiting'``
- :attr:`Data.STATUS_PROCESSING` / ``'processing'``
- :attr:`Data.STATUS_DONE` / ``'done'``
- :attr:`Data.STATUS_ERROR` / ``'error'``
"""
#: process used to compute the data object
process = models.ForeignKey('Process', on_delete=models.PROTECT)
#: process id
process_pid = models.PositiveIntegerField(blank=True, null=True)
#: progress
process_progress = models.PositiveSmallIntegerField(default=0)
#: return code
process_rc = models.PositiveSmallIntegerField(blank=True, null=True)
#: info log message
process_info = ArrayField(models.CharField(max_length=255), default=[])
#: warning log message
process_warning = ArrayField(models.CharField(max_length=255), default=[])
#: error log message
process_error = ArrayField(models.CharField(max_length=255), default=[])
#: actual inputs used by the processor
input = JSONField(default=dict)
#: actual outputs of the processor
output = JSONField(default=dict)
#: data descriptor schema
descriptor_schema = models.ForeignKey('DescriptorSchema', blank=True, null=True, on_delete=models.PROTECT)
#: actual descriptor
descriptor = JSONField(default=dict)
# track if user set the data name explicitly
named_by_user = models.BooleanField(default=False)
def __init__(self, *args, **kwargs):
super(Data, self).__init__(*args, **kwargs)
self._original_name = self.name
def save_storage(self, instance, schema):
"""Save basic:json values to a Storage collection."""
for field_schema, fields in iterate_fields(instance, schema):
name = field_schema['name']
value = fields[name]
if field_schema.get('type', '').startswith('basic:json:'):
if value and not self.pk:
raise ValidationError(
'Data object must be `created` before creating `basic:json:` fields')
if isinstance(value, int):
# already in Storage
continue
storage = Storage.objects.create(
name='Storage for data id {}'.format(self.pk),
contributor=self.contributor,
data_id=self.pk,
json=value,
)
# `value` is copied by value, so `fields[name]` must be changed
fields[name] = storage.pk
def save(self, render_name=False, *args, **kwargs):
# Generate the descriptor if one is not already set.
if self.name != self._original_name:
self.named_by_user = True
create = self.pk is None
if create:
# Default values for INPUT
input_schema = self.process.input_schema # pylint: disable=no-member
for field_schema, fields, path in iterate_schema(self.input, input_schema):
if 'default' in field_schema and field_schema['name'] not in fields:
dict_dot(self.input, path, field_schema['default'])
if not self.name:
self._render_name()
else:
self.named_by_user = True
elif render_name:
self._render_name()
if not self.descriptor:
render_descriptor(self)
self.save_storage(self.output, self.process.output_schema) # pylint: disable=no-member
hydrate_size(self)
if create:
validate_schema(self.input, self.process.input_schema) # pylint: disable=no-member
if self.descriptor_schema:
validate_schema(self.descriptor, self.descriptor_schema.schema) # pylint: disable=no-member
elif self.descriptor and self.descriptor != {}:
raise ValueError("`descriptor_schema` must be defined if `descriptor` is given")
path_prefix = os.path.join(settings.FLOW_EXECUTOR['DATA_DIR'], str(self.pk))
output_schema = self.process.output_schema # pylint: disable=no-member
if self.status == Data.STATUS_DONE:
validate_schema(self.output, output_schema, path_prefix=path_prefix)
else:
validate_schema(self.output, output_schema, path_prefix=path_prefix,
test_required=False)
super(Data, self).save(*args, **kwargs)
def _render_name(self):
"""Render data name.
The rendering is based on name template (`process.data_name`) and
input context.
"""
if not self.process.data_name or self.named_by_user: # pylint: disable=no-member
return
inputs = self.input.copy() # pylint: disable=no-member
hydrate_input_references(inputs, self.process.input_schema, hydrate_values=False) # pylint: disable=no-member
template_context = template.Context(inputs)
self.name = render_template(self.process.data_name, template_context) # pylint: disable=no-member
class DescriptorSchema(BaseModel):
"""Postgres model for storing descriptors."""
class Meta(BaseModel.Meta):
"""DescriptorSchema Meta options."""
permissions = (
("view_descriptorschema", "Can view descriptor schema"),
("edit_descriptorschema", "Can edit descriptor schema"),
("share_descriptorschema", "Can share descriptor schema"),
("owner_descriptorschema", "Is owner of the description schema"),
)
#: detailed description
description = models.TextField(blank=True)
#: user descriptor schema represented as a JSON object
schema = JSONField(default=dict)
class Trigger(BaseModel):
"""Postgres model for storing triggers."""
class Meta(BaseModel.Meta):
"""Data Meta options."""
permissions = (
("view_trigger", "Can view trigger"),
("edit_trigger", "Can edit trigger"),
("share_trigger", "Can share trigger"),
("owner_trigger", "Is owner of the trigger"),
)
#: data type of triggering data objects
type = models.CharField(max_length=100, validators=[
RegexValidator(
regex=r'^data:[a-z0-9:]+:$',
message='Type may be alphanumerics separated by colon',
code='invalid_type'
)
])
#: trigger condition
trigger = models.CharField(max_length=500)
#: path to where the id is inserted
trigger_input = models.CharField(max_length=100)
#: process used
process = models.ForeignKey('Process', blank=True, null=True, on_delete=models.SET_NULL)
#: input settings of the processor
input = JSONField(default=dict)
#: corresponding collection
collection = models.ForeignKey('Collection')
#: does the trigger run on its own
autorun = models.BooleanField(default=False)
class Storage(BaseModel):
"""Postgres model for storing storages."""
#: corresponding data object
data = models.ForeignKey('Data')
#: actual JSON stored
json = JSONField()
class LazyStorageJSON(object):
"""Lazy load `json` attribute of `Storage` object."""
def __init__(self, **kwargs):
self._kwargs = kwargs
self._json = None
def _get_storage(self):
"""Load `json` field from `Storage` object."""
if self._json is None:
self._json = Storage.objects.get(**self._kwargs).json
def __getitem__(self, key):
self._get_storage()
return self._json[key]
def __repr__(self):
self._get_storage()
return self._json.__repr__()
class BaseCollection(BaseModel):
"""Template for Postgres model for storing collection."""
class Meta(BaseModel.Meta):
"""Collection Meta options."""
abstract = True
permissions = (
("view_collection", "Can view collection"),
("edit_collection", "Can edit collection"),
("share_collection", "Can share collection"),
("download_collection", "Can download files from collection"),
("add_collection", "Can add data objects to collection"),
("owner_collection", "Is owner of the collection"),
)
#: detailed description
description = models.TextField(blank=True)
settings = JSONField(default=dict)
public_processes = models.ManyToManyField(Process)
data = models.ManyToManyField(Data)
#: collection descriptor schema
descriptor_schema = models.ForeignKey(DescriptorSchema, blank=True, null=True, on_delete=models.PROTECT)
#: collection descriptor
descriptor = JSONField(default=dict)
class Collection(BaseCollection):
"""Postgres model for storing collection."""
pass
def iterate_fields(fields, schema, path_prefix=None):
"""Iterate over all field values sub-fields.
This will iterate over all field values. Some fields defined in the schema
might not be visited.
:param fields: field values to iterate over
:type fields: dict
:param schema: schema to iterate over
:type schema: dict
:return: (field schema, field value)
:rtype: tuple
"""
if path_prefix is not None and path_prefix != '' and path_prefix[-1] != '.':
path_prefix += '.'
schema_dict = {val['name']: val for val in schema}
for field_id, properties in fields.items():
path = '{}{}'.format(path_prefix, field_id) if path_prefix is not None else None
if field_id not in schema_dict:
raise KeyError("Field definition ({}) missing in schema".format(field_id))
if 'group' in schema_dict[field_id]:
for rvals in iterate_fields(properties, schema_dict[field_id]['group'], path):
yield (rvals if path_prefix is not None else rvals[:2])
else:
rvals = (schema_dict[field_id], fields, path)
yield (rvals if path_prefix is not None else rvals[:2])
def iterate_schema(fields, schema, path_prefix=''):
"""Iterate over all schema sub-fields.
This will iterate over all field definitions in the schema. Some field v
alues might be None.
:param fields: field values to iterate over
:type fields: dict
:param schema: schema to iterate over
:type schema: dict
:param path_prefix: dot separated path prefix
:type path_prefix: str
:return: (field schema, field value, field path)
:rtype: tuple
"""
if path_prefix and path_prefix[-1] != '.':
path_prefix += '.'
for field_schema in schema:
name = field_schema['name']
if 'group' in field_schema:
for rvals in iterate_schema(fields[name] if name in fields else {},
field_schema['group'], '{}{}'.format(path_prefix, name)):
yield rvals
else:
yield (field_schema, fields, '{}{}'.format(path_prefix, name))
def validation_schema(name):
"""Return json schema for json validation."""
schemas = {
'processor': 'processorSchema.json',
'descriptor': 'descriptorSchema.json',
'field': 'fieldSchema.json',
'type': 'typeSchema.json',
}
if name not in schemas:
raise ValueError()
field_schema_file = finders.find('flow/{}'.format(schemas['field']), all=True)[0]
field_schema = open(field_schema_file, 'r').read()
if name == 'field':
return json.loads(field_schema.replace('{{PARENT}}', ''))
schema_file = finders.find('flow/{}'.format(schemas[name]), all=True)[0]
schema = open(schema_file, 'r').read()
return json.loads(schema.replace('{{FIELD}}', field_schema).replace('{{PARENT}}', '/field'))
TYPE_SCHEMA = validation_schema('type')
def validate_schema(instance, schema, test_required=True, path_prefix=None):
"""Check if DictField values are consistent with our data types.
Perform basic JSON schema validation and our custom validations:
* check that required fields are given (if `test_required` is set
to ``True``)
* check if ``basic:file:`` and ``list:basic:file`` fields match
regex given in schema (only if ``validate_regex`` is defined in
schema for coresponding fields) and exists (only if
``path_prefix`` is given)
* check if directories referenced in ``basic:dir:`` and
``list:basic:dir``fields exist (only if ``path_prefix`` is
given)
* check that referenced ``Data`` objects (in ``data:<data_type>``
and ``list:data:<data_type>`` fields) exists and are of type
``<data_type>``
* check that referenced ``Storage`` objects (in ``basic:json``
fields) exists
:param list instance: Instance to be validated
:param list schema: Schema for validation
:param bool test_required: Flag for testing if all required fields
are present. It is usefule if validation is run before ``Data``
object is finished and there are some field stil missing
(default: ``False``)
:param str path_prefix: path prefix used for checking if files and
directories exist (default: ``None``)
:rtype: None
:raises ValidationError: if ``instance`` doesn't match schema
defined in ``schema``
"""
def validate_refs(field):
if 'refs' in field:
for refs_filename in field['refs']:
refs_path = os.path.join(path_prefix, refs_filename)
if not (os.path.isfile(refs_path) or os.path.isdir(refs_path)):
raise ValidationError(
"File referenced in `refs` ({}) does not exist".format(refs_path))
def validate_file(field, regex):
"""Validate file name (and check that it exists)."""
filename = field['file']
if regex and not re.search(regex, filename):
raise ValidationError(
"File name {} does not match regex {}".format(filename, regex))
if path_prefix:
path = os.path.join(path_prefix, filename)
if not os.path.isfile(path):
raise ValidationError("Referenced file ({}) does not exist".format(path))
validate_refs(field)
def validate_dir(field):
"""Check that dirs and referenced files exists."""
dirname = field['dir']
if path_prefix:
path = os.path.join(path_prefix, dirname)
if not os.path.isdir(path):
raise ValidationError("Referenced dir ({}) does not exist".format(path))
validate_refs(field)
def validate_data(data_pk, type_):
""""Check that `Data` objects exist and is of right type."""
data_qs = Data.objects.filter(pk=data_pk).values('process__type')
if not data_qs.exists():
raise ValidationError(
"Referenced `Data` object does not exist (id:{})".format(data_pk))
data = data_qs.first()
if not data['process__type'].startswith(type_):
raise ValidationError(
"Data object of type `{}` is required, but type `{}` is given. "
"(id:{})".format(type_, data['process__type'], data_pk))
for _schema, _fields, _ in iterate_schema(instance, schema):
name = _schema['name']
if test_required and _schema.get('required', True) and name not in _fields:
raise ValidationError("Required field \"{}\" not given.".format(name))
if name in _fields:
field = _fields[name]
type_ = _schema.get('type', "")
try:
jsonschema.validate([{"type": type_, "value": field}], TYPE_SCHEMA)
except jsonschema.exceptions.ValidationError as ex:
raise ValidationError(ex.message)
if type_ == 'basic:file:':
validate_file(field, _schema.get('validate_regex'))
elif type_ == 'list:basic:file:':
for obj in field:
validate_file(obj, _schema.get('validate_regex'))
elif type_ == 'basic:dir:':
validate_dir(field)
elif type_ == 'list:basic:dir:':
for obj in field:
validate_dir(obj)
elif type_ == 'basic:json:' and not Storage.objects.filter(pk=field).exists():
raise ValidationError(
"Referenced `Storage` object does not exist (id:{})".format(field))
elif type_.startswith('data:'):
validate_data(field, type_)
elif type_.startswith('list:data:'):
for data_id in field:
validate_data(data_id, type_[5:]) # remove `list:` from type
for field_schema, fields in iterate_fields(instance, schema):
pass # check that schema definitions exist for all fields
def _hydrate_values(output, output_schema, data):
"""Hydrate basic:file and basic:json values.
Find fields with basic:file type and assign a full path to the file.
Find fields with basic:json type and assign a JSON object from storage.
"""
def hydrate_path(file_name):
id_ = "{}/".format(data.id) # needs trailing slash
if id_ in file_name:
file_name = file_name[file_name.find(id_) + len(id_):] # remove id from filename
return os.path.join(settings.FLOW_EXECUTOR['DATA_DIR'], id_, file_name)
def hydrate_storage(storage_id):
return LazyStorageJSON(pk=storage_id)
for field_schema, fields in iterate_fields(output, output_schema):
name = field_schema['name']
value = fields[name]
if 'type' in field_schema:
if field_schema['type'].startswith('basic:file:'):
value['file'] = hydrate_path(value['file'])
elif field_schema['type'].startswith('list:basic:file:'):
for obj in value:
obj['file'] = hydrate_path(obj['file'])
if field_schema['type'].startswith('basic:dir:'):
value['dir'] = hydrate_path(value['dir'])
elif field_schema['type'].startswith('list:basic:dir:'):
for obj in value:
obj['dir'] = hydrate_path(obj['dir'])
elif field_schema['type'].startswith('basic:json:'):
fields[name] = hydrate_storage(value)
elif field_schema['type'].startswith('list:basic:json:'):
fields[name] = [hydrate_storage(storage_id) for storage_id in value]
def hydrate_size(data):
""""Add file and dir sizes.
Add sizes to ``basic:file:``, ``list:basic:file``, ``basic:dir:``
and ``list:basic:dir:`` fields.
"""
def add_file_size(obj):
if data.status in [Data.STATUS_DONE, Data.STATUS_ERROR] and 'size' in obj:
return
path = os.path.join(settings.FLOW_EXECUTOR['DATA_DIR'], str(data.pk), obj['file'])
if not os.path.isfile(path):
raise ValidationError("Referenced file does not exist ({})".format(path))
obj['size'] = os.path.getsize(path)
def get_dir_size(path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def add_dir_size(obj):
if data.status in [Data.STATUS_DONE, Data.STATUS_ERROR] and 'size' in obj:
return
path = os.path.join(settings.FLOW_EXECUTOR['DATA_DIR'], str(data.pk), obj['dir'])
if not os.path.isdir(path):
raise ValidationError("Referenced dir does not exist ({})".format(path))
obj['size'] = get_dir_size(path)
for field_schema, fields in iterate_fields(data.output, data.process.output_schema):
name = field_schema['name']
value = fields[name]
if 'type' in field_schema:
if field_schema['type'].startswith('basic:file:'):
add_file_size(value)
elif field_schema['type'].startswith('list:basic:file:'):
for obj in value:
add_file_size(obj)
elif field_schema['type'].startswith('basic:dir:'):
add_dir_size(value)
elif field_schema['type'].startswith('list:basic:dir:'):
for obj in value:
add_dir_size(obj)
def hydrate_input_uploads(input_, input_schema, hydrate_values=True):
"""Hydrate input basic:upload types with upload location
Find basic:upload fields in input.
Add the upload location for relative paths.
"""
files = []
for field_schema, fields in iterate_fields(input_, input_schema):
name = field_schema['name']
value = fields[name]
if 'type' in field_schema:
if field_schema['type'] == 'basic:file:':
files.append(value)
elif field_schema['type'] == 'list:basic:file:':
files.extend(value)
urlregex = re.compile(r'^(https?|ftp)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]')
for value in files:
if 'file_temp' in value:
if isinstance(value['file_temp'], six.string_types):
# If file_temp not url, nor absolute path: hydrate path
if not os.path.isabs(value['file_temp']) and not urlregex.search(value['file_temp']):
value['file_temp'] = os.path.join(settings.FLOW_EXECUTOR['UPLOAD_DIR'], value['file_temp'])
else:
# Something very strange happened
value['file_temp'] = 'Invalid value for file_temp in DB'
def hydrate_input_references(input_, input_schema, hydrate_values=True):
"""Hydrate ``input_`` with linked data.
Find fields with complex data:<...> types in ``input_``.
Assign an output of corresponding data object to those fields.
"""
for field_schema, fields in iterate_fields(input_, input_schema):
name = field_schema['name']
value = fields[name]
if 'type' in field_schema:
if field_schema['type'].startswith('data:'):
# if re.match('^[0-9a-fA-F]{24}$', str(value)) is None:
# print "ERROR: data:<...> value in field \"{}\", type \"{}\" not ObjectId but {}.".format(
# name, field_schema['type'], value)
if value is None:
continue
data = Data.objects.get(id=value)
output = data.output.copy()
# static = Data.static.to_python(data.static)
if hydrate_values:
_hydrate_values(output, data.process.output_schema, data)
# _hydrate_values(static, data.static_schema, data)
output["__id"] = data.id
output["__type"] = data.process.type
fields[name] = output
elif field_schema['type'].startswith('list:data:'):
outputs = []
for val in value:
# if re.match('^[0-9a-fA-F]{24}$', str(val)) is None:
# print "ERROR: data:<...> value in {}, type \"{}\" not ObjectId but {}.".format(
# name, field_schema['type'], val)
if val is None:
continue
data = Data.objects.get(id=val)
output = data.output.copy()
# static = Data.static.to_python(data.static)
if hydrate_values:
_hydrate_values(output, data.process.output_schema, data)
# _hydrate_values(static, data.static_schema, data)
output["__id"] = data.id
output["__type"] = data.process.type
outputs.append(output)
fields[name] = outputs
def dict_dot(d, k, val=None, default=None):
"""Get or set value using a dot-notation key in a multilevel dict."""
if val is None and k == '':
return d
def set_default(dict_or_model, key, default_value):
if isinstance(dict_or_model, models.Model):
if not hasattr(dict_or_model, key):
setattr(dict_or_model, key, default_value)
return getattr(dict_or_model, key)
else:
return dict_or_model.setdefault(key, default_value)
def get_item(dict_or_model, key):
if isinstance(dict_or_model, models.Model):
return getattr(dict_or_model, key)
else:
return dict_or_model[key]
def set_item(dict_or_model, key, value):
if isinstance(dict_or_model, models.Model):
setattr(dict_or_model, key, value)
else:
dict_or_model[key] = value
if val is None and callable(default):
# Get value, default for missing
return functools.reduce(lambda a, b: set_default(a, b, default()), k.split('.'), d)
elif val is None:
# Get value, error on missing
return functools.reduce(lambda a, b: get_item(a, b), k.split('.'), d)
else:
# Set value
try:
k, k_last = k.rsplit('.', 1)
set_item(dict_dot(d, k, default=dict), k_last, val)
except ValueError:
set_item(d, k, val)
return val
```
#### File: flow/templatetags/test_tags.py
```python
from django import template
register = template.Library()
@register.filter
def increase(value):
return value + 1
```
#### File: flow/tests/test_backend.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import shutil
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase
from resolwe.flow.managers import manager
from resolwe.flow.models import Data, Process
class BackendTest(TestCase):
def setUp(self):
u = get_user_model().objects.create_superuser('test', '<EMAIL>', 'test')
self.p = Process(slug='test-processor',
name='Test Process',
contributor=u,
type='data:test',
version=1)
self.p.save()
self.d = Data(slug='test-data',
name='Test Data',
contributor=u,
process=self.p)
self.d.save()
def tearDown(self):
for data in Data.objects.all():
data_dir = os.path.join(settings.FLOW_EXECUTOR['DATA_DIR'], str(data.id))
shutil.rmtree(data_dir, ignore_errors=True)
def test_manager(self):
manager.communicate(verbosity=0)
def test_dtlbash(self):
self.p.slug = 'test-processor-dtlbash'
self.p.run = {'script': """
gen-info \"Test processor info\"
gen-warning \"Test processor warning\"
echo '{"proc.info": "foo"}'
"""}
self.p.save()
self.d.slug = 'test-data-dtlbash'
self.d.process = self.p
self.d.save()
self.d = Data(id=self.d.id)
```
#### File: resolwe/resolwe/utils.py
```python
class BraceMessage(object):
"""Helper class that can be used to construct log messages with
the new {}-string formatting syntax.
NOTE: When using this helper class, one pays no signigicant
performance penalty since the actual formatting only happens when
(and if) the logged message is actually outputted to a log by a
handler.
Example usage:
from genesis.utils.formatters import BraceMessage as __
logger.error(__("Message with {0} {name}", 2, name="placeholders"))
Source:
https://docs.python.org/3/howto/logging-cookbook.html#use-of-alternative-formatting-styles
"""
def __init__(self, fmt, *args, **kwargs):
self.fmt = fmt
self.args = args
self.kwargs = kwargs
def __str__(self):
return self.fmt.format(*self.args, **self.kwargs)
``` |
{
"source": "jkolczasty/arduino_expander",
"score": 3
} |
#### File: arduino_expander/examples/linux-i2c-input-output.py
```python
import smbus
import time
import random
bus = smbus.SMBus(0)
address = 0x29
# config as follows:
# 7 as OUTPUT as 1 initial output
# 8 as INPUT
config = [1, 7, 1, 0, 2, 8, 0, 0]
def print_gpio(data):
print("GPIOs:", end="")
for i in range(0, int(len(data)/4)):
print(" | ", data[i*4+1], "=", data[i*4+2] + data[i*4+3]*256, end="")
print("")
bus.write_i2c_block_data(address, 7, config)
v = 1
while 1:
time.sleep(0.5)
v = 0 if v else 1
data = [0, v, 0]
# SET gpio item 0 to value
bus.write_i2c_block_data(address, 8, data)
# read values
data = bus.read_i2c_block_data(address, 0)
print_gpio(data)
``` |
{
"source": "JKolios/battlewhoosh",
"score": 3
} |
#### File: battlescribe_parser/bsdata/element.py
```python
import os.path
class Element:
INTERNAL_ATTRS = {'id', 'characteristicTypeId', 'hidden', 'TypeId', 'profileTypeId'}
DIR_TO_GAME_MAPPING = {
'wh40k-killteam': 'Warhammer 40K: Kill Team',
}
def __init__(self, xml_element, catalogue_file, profile_type=None):
self.element = xml_element
self.catalogue_file = catalogue_file
self.profile_type = profile_type
def attrib(self):
returned_keys = set(self.element.attrib.keys()) - self.INTERNAL_ATTRS
return {k: v for k, v in self.element.attrib.items() if k in returned_keys}
def iterate_children(self):
children_iterator = self.element.iter('*')
return (Element(child, self.catalogue_file) for child in children_iterator)
def has_child(self):
return self.child() is not None
def has_link(self):
return self.link_target() is not None
def link_target(self):
return self.element.get('targetId')
def child(self):
return self.element.get('childId')
def resolve_link(self):
if not self.has_link():
return None
return self._resolve_associated_element(self.link_target())
def resolve_child(self):
if not self.has_child():
return None
return self._resolve_associated_element(self.child())
def faction(self):
file_name = os.path.split(self.catalogue_file.file_name())[1]
return os.path.splitext(file_name)[0]
def game(self):
_, game_dir = os.path.split(os.path.dirname(self.catalogue_file.file_name()))
return self.DIR_TO_GAME_MAPPING[game_dir]
def _resolve_associated_element(self, element_id):
associated_element = self.catalogue_file.get_element_by_id(element_id)
return Element(associated_element, self.catalogue_file)
```
#### File: search_ui/catalogues/form.py
```python
import npyscreen
from search_ui.catalogues.term_text import TermText
from search_ui.catalogues.result_text import ResultText
class SearchForm(npyscreen.FormBaseNew):
def create(self):
self.add_handlers({
"^Q": self.exit,
"^W": self.jump_to_term_text,
"^E": self.jump_to_result_text,
"^R": self.jump_to_detail_text
})
self.term_text = self.add(
TermText,
name="Name:",
height=1,
scroll_exit=True)
self.result_text = self.add(
ResultText,
name="Results:",
values=[],
height=10,
scroll_exit=True)
self.detail_text = self.add(
npyscreen.TitlePager,
name="Details:",
scroll_exit=True,
autowrap=True)
def jump_to_term_text(self, _input):
self.term_text.edit()
def jump_to_result_text(self, _input):
self.result_text.edit()
def jump_to_detail_text(self, _input):
self.detail_text.edit()
def exit(self, _input):
quit(0)
```
#### File: search_ui/catalogues/term_text.py
```python
import npyscreen
import curses
class TermText(npyscreen.TitleText):
def when_value_edited(self):
search_results = self.parent.parentApp.run_search(self.value)
self.parent.result_text.values = search_results
self.parent.result_text.display()
``` |
{
"source": "JKolios/EPaperDashboard",
"score": 3
} |
#### File: JKolios/EPaperDashboard/display.py
```python
from epd import EPD
from text_render.text_render import draw_paragraph
from PIL import Image, ImageFont, ImageDraw, ImageChops
VERTICAL_MODE = 0
HORIZONTAL_MODE = 1
class Display:
def __init__(self, mode=VERTICAL_MODE, line_width=20):
self.mode = mode
self.line_width = line_width
self._epd = EPD()
self._epd.init()
self._init_images()
def draw_paragraph(self, text):
self.cursor_height = draw_paragraph(self._image_draw, self.cursor_height, text, line_width=self.line_width)
def show(self):
self._update()
self._init_images()
def sleep(self):
self._sleep()
def _update(self):
if self.mode == HORIZONTAL_MODE:
self._epd.smart_update(self._image.rotate(90, expand=True))
return
self._epd.smart_update(self._image)
def _init_images(self):
self.cursor_height = 0
self._image = Image.new('1', (self._epd.width, self._epd.height), 255)
self._image_draw = ImageDraw.Draw(self._image)
def _sleep(self):
self._epd.sleep()
``` |
{
"source": "jkolive/gerenciador-sybase-gui",
"score": 3
} |
#### File: jkolive/gerenciador-sybase-gui/trayIcon.py
```python
import main
import os
import sys
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from subprocess import run
from time import time
class TrayIcon(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self)
self.APPIND = 1
try:
gi.require_version('AppIndicator3', '0.1')
from gi.repository import AppIndicator3
except ImportError:
self.APPIND = 0
if self.APPIND == 1:
timestamp = time()
self.indicator = AppIndicator3.Indicator.new(f"_id_{timestamp}", sys.path[0] +
'/images/trayicon/app-gerenciador.png',
AppIndicator3.IndicatorCategory.APPLICATION_STATUS)
self.active = AppIndicator3.IndicatorStatus.ACTIVE
self.passive = AppIndicator3.IndicatorStatus.PASSIVE
self.indicator.set_status(self.active)
self.indicator.set_title('Gerenciador Sybase')
self.indicator.set_menu(self.add_menu_indicator())
else:
self.statusIcon = Gtk.StatusIcon()
self.statusIcon.set_from_file(sys.path[0] + '/images/trayicon/app-gerenciador.png')
self.statusIcon.set_tooltip_text('Gerenciador Sybase')
self.statusIcon.connect('popup-menu', self.on_right_click)
def add_menu_indicator(self):
menu = Gtk.Menu()
miApp = Gtk.MenuItem()
miApp.set_label('Abrir Gerenciador')
miApp.connect('activate', self.show_app)
menu.append(miApp)
miExit = Gtk.MenuItem()
miExit.set_label('Sair')
miExit.connect('activate', self.close_app)
menu.append(miExit)
menu.show_all()
return menu
def on_right_click(self, icon, button, time_active):
menu = Gtk.Menu()
miApp = Gtk.MenuItem()
miApp.set_label('Abri Gerenciador')
miApp.connect('activate', self.show_app)
menu.append(miApp)
miExit = Gtk.MenuItem()
miExit.set_label('Sair')
miExit.connect('activate', self.close_app)
menu.append(miExit)
menu.show_all()
menu.popup(None, None, None, self.statusIcon, button, time_active)
def show_app(self, *args):
if self.APPIND == 1:
self.indicator.set_status(self.passive)
else:
self.statusIcon.set_visible(False)
main.Main()
def close_app(self, *args):
cmd = run('pidof -s dbsrv16', shell=True)
if cmd.returncode == 0:
dialog = Gtk.MessageDialog(parent=self, flags=0, message_type=Gtk.MessageType.QUESTION,
buttons=Gtk.ButtonsType.YES_NO, text='ATENÇÃO!')
dialog.format_secondary_text('Banco de Dados ainda em execução! Deseja parar o Banco de dados?')
dialog.set_position(Gtk.WindowPosition.CENTER_ALWAYS)
response = dialog.run()
if response == Gtk.ResponseType.YES:
run(f'echo {os.environ["ENTRY_PASS"]} | sudo -k -S killall -w -s 15 dbsrv16', shell=True)
raise SystemExit()
if response == Gtk.ResponseType.NO:
dialog.destroy()
raise SystemExit()
``` |
{
"source": "jkoloda/CarND-Capstone",
"score": 3
} |
#### File: tl_detector/light_classification/tl_classifier.py
```python
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import numpy as np
import rospy
import cv2
import os
MAX_IMAGE_WIDTH = 300
MAX_IMAGE_HEIGHT = 300
class TLClassifier(object):
"""Traffic light classifier based on a tensorflow model."""
def __init__(self, is_site=True):
"""Build, load and prepare traffic light classifier object.
Loads classifier trained on simulator or real data, depending on the
is_site flag coming from the configuration file.
"""
self.session = None
self.detection_graph = None
self.classes = {1: TrafficLight.RED,
2: TrafficLight.YELLOW,
3: TrafficLight.GREEN,
4: TrafficLight.UNKNOWN}
self.light_labels = ['RED', 'YELLOW', 'GREEN', 'UNKNOWN']
temp = os.path.dirname(os.path.realpath(__file__))
temp = temp.replace(
'ros/src/tl_detector/light_classification',
'models',
)
if is_site is False:
self.model_path = os.path.join(temp,
'frozen_inference_graph_sim.pb')
else:
self.model_path = os.path.join(temp,
'frozen_inference_graph_real.pb')
self.load_model(model_path=self.model_path)
def get_classification(self, image):
"""Determine the color of the traffic light in the image.
Args
----
image (cv::Mat): image containing the traffic light
Returns
-------
int: ID of traffic light color
(specified in styx_msgs/TrafficLight)
"""
class_idx, confidence = self.predict(image)
return class_idx
def load_model(self, model_path):
"""Load classifier (graph and session)."""
self.detection_graph = tf.Graph()
with tf.Session(graph=self.detection_graph) as sess:
self.session = sess
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
def predict(self, image_np, min_score_thresh=0.5):
"""Predict traffic light state from image.
Parameters
----------
image_np : ndarray
Input image.
min_score_threshold : float
Confidence threshold for traffic light classification.
Returns
-------
light : TrafficLight
Light color of traffic light detected on input image.
score : float
Classification confidence score.
"""
image_tensor = self.detection_graph.\
get_tensor_by_name('image_tensor:0')
detection_boxes = self.detection_graph.\
get_tensor_by_name('detection_boxes:0')
detection_scores = self.detection_graph.\
get_tensor_by_name('detection_scores:0')
detection_classes = self.detection_graph.\
get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.\
get_tensor_by_name('num_detections:0')
image_np = self.process_image(image_np)
input = [detection_boxes, detection_scores, detection_classes]
(boxes, scores, classes) = self.session.run(
input,
feed_dict={image_tensor: np.expand_dims(image_np, axis=0)})
scores = np.squeeze(scores)
classes = np.squeeze(classes)
boxes = np.squeeze(boxes)
# Traffic light state decision
# In case mutliple traffic lights are detected (as e.g. is the case of
# the simulator) we select the light with the highest accumulated score
accumulated_scores = np.zeros(len(self.classes))
accumulated_classes = np.zeros(len(self.classes))
for ii, score in enumerate(scores):
if score > min_score_thresh:
# light_class = self.classes[classes[ii]]
# return light_class, score
rospy.loginfo(self.light_labels[int(classes[ii] - 1)])
accumulated_scores[classes[ii] - 1] += score
accumulated_classes[classes[ii] - 1] += 1
if np.sum(accumulated_scores) > 0:
light_class_idx = np.argmax(accumulated_scores) + 1
confidence = accumulated_scores[light_class_idx - 1] / \
float(accumulated_classes[light_class_idx - 1])
return self.classes[light_class_idx], confidence
else:
return None, None
def process_image(self, img):
"""Pre-process imae so it can be passed directly to classifier.
Pre-processing consists of shrinkng the image to default maximum size
and converting in to RGB format (assuming that input is BGR).
Parameters
----------
img : ndarray
Input image to be processed.
Returns
-------
img : ndarray
Processed image.
"""
img = cv2.resize(img, (MAX_IMAGE_WIDTH, MAX_IMAGE_HEIGHT))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def shrink_image(self, img):
"""Shrink image if bigger than default maximum dimensions.
Aspect ratio is kept. If the image is smaller it is return as it is.
Parameters
----------
img : ndarray
Input image to be shrinked if necessary.
Returns
-------
img : ndarray
Shrinked image.
"""
height, width = img.shape[:2]
if MAX_IMAGE_HEIGHT < height or MAX_IMAGE_WIDTH < width:
scaling_factor = np.min(MAX_IMAGE_HEIGHT / float(height),
MAX_IMAGE_WIDTH / float(width))
img = cv2.resize(img, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
return img
``` |
{
"source": "jkoloda/deivos",
"score": 3
} |
#### File: deivos/test/test_squeezenet.py
```python
import unittest
import numpy as np
import tensorflow as tf
from deivos.architectures.squeezenet import (
default_preprocessor,
expand,
fire_module,
get_model_v10,
get_model_v11,
squeeze,
)
class TestLayers(unittest.TestCase):
"""Tester for SqueezeNet architecture."""
# pylint: disable=too-many-instance-attributes
def setUp(self):
self.batch_size = 16
self.rows = 24
self.cols = 32
self.channels = 20
self.input_shape = (self.batch_size, self.rows,
self.cols, self.channels)
self.default_input_shape = (self.batch_size, 227, 227, 3)
self.inputs = tf.random.normal(shape=self.input_shape)
self.default_inputs = tf.random.normal(shape=self.default_input_shape)
def test_squeeze(self):
"""Test squeeze module."""
for _ in range(0, 100):
filters = np.random.randint(low=1, high=2*self.channels)
output_shape = (self.batch_size, self.rows, self.cols, filters)
# Check shape after squeezing
if filters < self.channels:
outputs = squeeze(self.inputs, name='', filters=filters)
self.assertTrue(outputs.shape == tf.TensorShape(output_shape))
else:
# Squeeze module cannot expand input
with self.assertRaises(AssertionError):
outputs = squeeze(self.inputs, name='', filters=filters)
def test_expand(self):
"""Test expand module."""
for _ in range(0, 100):
filters = np.random.randint(low=1, high=2*self.channels)
output_shape = (self.batch_size, self.rows, self.cols, 2*filters)
# Check shape after expanding
if filters > self.channels:
outputs = expand(self.inputs, name='', filters=filters)
self.assertTrue(outputs.shape == tf.TensorShape(output_shape))
else:
# Expand module cannot squeeze input
with self.assertRaises(AssertionError):
outputs = expand(self.inputs, name='', filters=filters)
def test_fire_module(self):
"""Test fire module."""
# Only test for one number of filters
# Filter variety is tested by expand and squeeze tests
filters_in = 10
for squeeze_expand_ratio in [2, 3, 4]:
# Expand squeezed dimension for both 1x1 and 3x3 filters
filters_out = squeeze_expand_ratio * 2 * filters_in
# No bypass
output_shape = (self.batch_size, self.rows, self.cols, filters_out)
outputs = fire_module(self.inputs, name='',
squeeze_filters=filters_in, bypass=False,
squeeze_expand_ratio=squeeze_expand_ratio)
self.assertTrue(outputs.shape == tf.TensorShape(output_shape))
# Complex bypass
outputs = fire_module(self.inputs, name='',
squeeze_filters=filters_in, bypass=True,
squeeze_expand_ratio=squeeze_expand_ratio)
self.assertTrue(outputs.shape == tf.TensorShape(output_shape))
# Simple bypass
for squeeze_expand_ratio in [2, 4, 5]:
filters_in = self.channels//squeeze_expand_ratio
# Expand squeezed dimension for both 1x1 and 3x3 filters
filters_out = squeeze_expand_ratio * 2 * filters_in
output_shape = (self.batch_size, self.rows, self.cols, filters_out)
outputs = fire_module(self.inputs, name='',
squeeze_filters=filters_in, bypass=True,
squeeze_expand_ratio=squeeze_expand_ratio)
self.assertTrue(outputs.shape == tf.TensorShape(output_shape))
def test_default_preprocessor(self):
"""Test deafult preprocessor."""
# Version 1.0, default input
outputs = default_preprocessor(self.default_inputs, version='1.0')
self.assertTrue(outputs.shape == (self.batch_size, 55, 55, 96))
# Not default input
with self.assertRaises(AssertionError):
outputs = default_preprocessor(self.inputs, '1.0')
# Version 1.1, default input
outputs = default_preprocessor(self.default_inputs, version='1.1')
self.assertTrue(outputs.shape == (self.batch_size, 56, 56, 64))
# Not default input
with self.assertRaises(AssertionError):
outputs = default_preprocessor(self.inputs, version='1.1')
def test_get_model_v10(self):
"""Test SqueezeNet v1.0."""
layers = {'fire2_concat': (None, 55, 55, 128),
'fire3_concat': (None, 55, 55, 128),
'fire4_concat': (None, 55, 55, 256),
'fire5_concat': (None, 27, 27, 256),
'fire6_concat': (None, 27, 27, 384),
'fire7_concat': (None, 27, 27, 384),
'fire8_concat': (None, 27, 27, 512),
'fire9_concat': (None, 13, 13, 512)}
for num_classes in [10, 100, 100]:
for bypass_type in [None, 'simple', 'complex']:
model = get_model_v10(num_classes=num_classes,
bypass_type=bypass_type)
for name, shape in layers.items():
layer = model.get_layer(name)
self.assertTrue(layer.output_shape == shape)
self.assertTrue(model.output_shape == (None, num_classes))
del model
def test_get_model_v11(self):
"""Test SqueezeNet v1.1."""
layers = {'fire2_concat': (None, 56, 56, 128),
'fire3_concat': (None, 56, 56, 128),
'fire4_concat': (None, 28, 28, 256),
'fire5_concat': (None, 28, 28, 256),
'fire6_concat': (None, 14, 14, 384),
'fire7_concat': (None, 14, 14, 384),
'fire8_concat': (None, 14, 14, 512),
'fire9_concat': (None, 14, 14, 512)}
for num_classes in [10, 100, 100]:
for bypass_type in [None, 'simple', 'complex']:
model = get_model_v11(num_classes=num_classes,
bypass_type=bypass_type)
for name, shape in layers.items():
layer = model.get_layer(name)
self.assertTrue(layer.output_shape == shape)
self.assertTrue(model.output_shape == (None, num_classes))
del model
def test_squeezenet(self):
# TODO: Check that corresponding get models have been called
pass
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jkolokotronis/ds_mod_tools",
"score": 2
} |
#### File: site-packages/chardet/sjisprober.py
```python
from mbcharsetprober import MultiByteCharSetProber
from codingstatemachine import CodingStateMachine
from chardistribution import SJISDistributionAnalysis
from jpcntx import SJISContextAnalysis
from mbcssm import SJISSMModel
import constants, sys
from constants import eStart, eError, eItsMe
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == eError:
if constants._debug:
sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n')
self._mState = constants.eNotMe
break
elif codingState == eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen :], charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen : i + 3 - charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1 : i + 1], charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if self._mContextAnalyzer.got_enough_data() and \
(self.get_confidence() > constants.SHORTCUT_THRESHOLD):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
```
#### File: Crypto/Hash/HMAC.py
```python
__revision__ = "$Id$"
__all__ = ['new', 'digest_size', 'HMAC' ]
from Crypto.Util.strxor import strxor_c
from Crypto.Util.py3compat import *
#: The size of the authentication tag produced by the MAC.
#: It matches the digest size on the underlying
#: hashing module used.
digest_size = None
class HMAC:
"""Class that implements HMAC"""
#: The size of the authentication tag produced by the MAC.
#: It matches the digest size on the underlying
#: hashing module used.
digest_size = None
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
:Parameters:
key : byte string
secret key for the MAC object.
It must be long enough to match the expected security level of the
MAC. However, there is no benefit in using keys longer than the
`digest_size` of the underlying hash algorithm.
msg : byte string
The very first chunk of the message to authenticate.
It is equivalent to an early call to `update()`. Optional.
:Parameter digestmod:
The hash algorithm the HMAC is based on.
Default is `Crypto.Hash.MD5`.
:Type digestmod:
A hash module or object instantiated from `Crypto.Hash`
"""
if digestmod is None:
import MD5
digestmod = MD5
self.digestmod = digestmod
self.outer = digestmod.new()
self.inner = digestmod.new()
try:
self.digest_size = digestmod.digest_size
except AttributeError:
self.digest_size = len(self.outer.digest())
try:
# The block size is 128 bytes for SHA384 and SHA512 and 64 bytes
# for the others hash function
blocksize = digestmod.block_size
except AttributeError:
blocksize = 64
ipad = 0x36
opad = 0x5C
if len(key) > blocksize:
key = digestmod.new(key).digest()
key = key + bchr(0) * (blocksize - len(key))
self.outer.update(strxor_c(key, opad))
self.inner.update(strxor_c(key, ipad))
if (msg):
self.update(msg)
def update(self, msg):
"""Continue authentication of a message by consuming the next chunk of data.
Repeated calls are equivalent to a single call with the concatenation
of all the arguments. In other words:
>>> m.update(a); m.update(b)
is equivalent to:
>>> m.update(a+b)
:Parameters:
msg : byte string
The next chunk of the message being authenticated
"""
self.inner.update(msg)
def copy(self):
"""Return a copy ("clone") of the MAC object.
The copy will have the same internal state as the original MAC
object.
This can be used to efficiently compute the MAC of strings that
share a common initial substring.
:Returns: An `HMAC` object
"""
other = HMAC(b(""))
other.digestmod = self.digestmod
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def digest(self):
"""Return the **binary** (non-printable) MAC of the message that has
been authenticated so far.
This method does not change the state of the MAC object.
You can continue updating the object after calling this function.
:Return: A byte string of `digest_size` bytes. It may contain non-ASCII
characters, including null bytes.
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h.digest()
def hexdigest(self):
"""Return the **printable** MAC of the message that has been
authenticated so far.
This method does not change the state of the MAC object.
:Return: A string of 2* `digest_size` bytes. It contains only
hexadecimal ASCII digits.
"""
return "".join(["%02x" % bord(x)
for x in tuple(self.digest())])
def new(key, msg = None, digestmod = None):
"""Create a new HMAC object.
:Parameters:
key : byte string
key for the MAC object.
It must be long enough to match the expected security level of the
MAC. However, there is no benefit in using keys longer than the
`digest_size` of the underlying hash algorithm.
msg : byte string
The very first chunk of the message to authenticate.
It is equivalent to an early call to `HMAC.update()`.
Optional.
:Parameter digestmod:
The hash to use to implement the HMAC. Default is `Crypto.Hash.MD5`.
:Type digestmod:
A hash module or instantiated object from `Crypto.Hash`
:Returns: An `HMAC` object
"""
return HMAC(key, msg, digestmod)
```
#### File: Crypto/Protocol/Chaffing.py
```python
__revision__ = "$Id$"
from Crypto.Util.number import bytes_to_long
class Chaff:
"""Class implementing the chaff adding algorithm.
Methods for subclasses:
_randnum(size):
Returns a randomly generated number with a byte-length equal
to size. Subclasses can use this to implement better random
data and MAC generating algorithms. The default algorithm is
probably not very cryptographically secure. It is most
important that the chaff data does not contain any patterns
that can be used to discern it from wheat data without running
the MAC.
"""
def __init__(self, factor=1.0, blocksper=1):
"""Chaff(factor:float, blocksper:int)
factor is the number of message blocks to add chaff to,
expressed as a percentage between 0.0 and 1.0. blocksper is
the number of chaff blocks to include for each block being
chaffed. Thus the defaults add one chaff block to every
message block. By changing the defaults, you can adjust how
computationally difficult it could be for an adversary to
brute-force crack the message. The difficulty is expressed
as:
pow(blocksper, int(factor * number-of-blocks))
For ease of implementation, when factor < 1.0, only the first
int(factor*number-of-blocks) message blocks are chaffed.
"""
if not (0.0<=factor<=1.0):
raise ValueError, "'factor' must be between 0.0 and 1.0"
if blocksper < 0:
raise ValueError, "'blocksper' must be zero or more"
self.__factor = factor
self.__blocksper = blocksper
def chaff(self, blocks):
"""chaff( [(serial-number:int, data:string, MAC:string)] )
: [(int, string, string)]
Add chaff to message blocks. blocks is a list of 3-tuples of the
form (serial-number, data, MAC).
Chaff is created by choosing a random number of the same
byte-length as data, and another random number of the same
byte-length as MAC. The message block's serial number is
placed on the chaff block and all the packet's chaff blocks
are randomly interspersed with the single wheat block. This
method then returns a list of 3-tuples of the same form.
Chaffed blocks will contain multiple instances of 3-tuples
with the same serial number, but the only way to figure out
which blocks are wheat and which are chaff is to perform the
MAC hash and compare values.
"""
chaffedblocks = []
# count is the number of blocks to add chaff to. blocksper is the
# number of chaff blocks to add per message block that is being
# chaffed.
count = len(blocks) * self.__factor
blocksper = range(self.__blocksper)
for i, wheat in zip(range(len(blocks)), blocks):
# it shouldn't matter which of the n blocks we add chaff to, so for
# ease of implementation, we'll just add them to the first count
# blocks
if i < count:
serial, data, mac = wheat
datasize = len(data)
macsize = len(mac)
addwheat = 1
# add chaff to this block
for j in blocksper:
import sys
chaffdata = self._randnum(datasize)
chaffmac = self._randnum(macsize)
chaff = (serial, chaffdata, chaffmac)
# mix up the order, if the 5th bit is on then put the
# wheat on the list
if addwheat and bytes_to_long(self._randnum(16)) & 0x40:
chaffedblocks.append(wheat)
addwheat = 0
chaffedblocks.append(chaff)
if addwheat:
chaffedblocks.append(wheat)
else:
# just add the wheat
chaffedblocks.append(wheat)
return chaffedblocks
def _randnum(self, size):
from Crypto import Random
return Random.new().read(size)
if __name__ == '__main__':
text = """\
We hold these truths to be self-evident, that all men are created equal, that
they are endowed by their Creator with certain unalienable Rights, that among
these are Life, Liberty, and the pursuit of Happiness. That to secure these
rights, Governments are instituted among Men, deriving their just powers from
the consent of the governed. That whenever any Form of Government becomes
destructive of these ends, it is the Right of the People to alter or to
abolish it, and to institute new Government, laying its foundation on such
principles and organizing its powers in such form, as to them shall seem most
likely to effect their Safety and Happiness.
"""
print 'Original text:\n=========='
print text
print '=========='
# first transform the text into packets
blocks = [] ; size = 40
for i in range(0, len(text), size):
blocks.append( text[i:i+size] )
# now get MACs for all the text blocks. The key is obvious...
print 'Calculating MACs...'
from Crypto.Hash import HMAC, SHA
key = 'Jefferson'
macs = [HMAC.new(key, block, digestmod=SHA).digest()
for block in blocks]
assert len(blocks) == len(macs)
# put these into a form acceptable as input to the chaffing procedure
source = []
m = zip(range(len(blocks)), blocks, macs)
print m
for i, data, mac in m:
source.append((i, data, mac))
# now chaff these
print 'Adding chaff...'
c = Chaff(factor=0.5, blocksper=2)
chaffed = c.chaff(source)
from base64 import encodestring
# print the chaffed message blocks. meanwhile, separate the wheat from
# the chaff
wheat = []
print 'chaffed message blocks:'
for i, data, mac in chaffed:
# do the authentication
h = HMAC.new(key, data, digestmod=SHA)
pmac = h.digest()
if pmac == mac:
tag = '-->'
wheat.append(data)
else:
tag = ' '
# base64 adds a trailing newline
print tag, '%3d' % i, \
repr(data), encodestring(mac)[:-1]
# now decode the message packets and check it against the original text
print 'Undigesting wheat...'
# PY3K: This is meant to be text, do not change to bytes (data)
newtext = "".join(wheat)
if newtext == text:
print 'They match!'
else:
print 'They differ!'
```
#### File: Crypto/Protocol/KDF.py
```python
__revision__ = "$Id$"
import math
import struct
from Crypto.Util.py3compat import *
from Crypto.Hash import SHA as SHA1, HMAC
from Crypto.Util.strxor import strxor
def PBKDF1(password, salt, dkLen, count=1000, hashAlgo=None):
"""Derive one key from a password (or passphrase).
This function performs key derivation according an old version of
the PKCS#5 standard (v1.5).
This algorithm is called ``PBKDF1``. Even though it is still described
in the latest version of the PKCS#5 standard (version 2, or RFC2898),
newer applications should use the more secure and versatile `PBKDF2` instead.
:Parameters:
password : string
The secret password or pass phrase to generate the key from.
salt : byte string
An 8 byte string to use for better protection from dictionary attacks.
This value does not need to be kept secret, but it should be randomly
chosen for each derivation.
dkLen : integer
The length of the desired key. Default is 16 bytes, suitable for instance for `Crypto.Cipher.AES`.
count : integer
The number of iterations to carry out. It's recommended to use at least 1000.
hashAlgo : module
The hash algorithm to use, as a module or an object from the `Crypto.Hash` package.
The digest length must be no shorter than ``dkLen``.
The default algorithm is `SHA1`.
:Return: A byte string of length `dkLen` that can be used as key.
"""
if not hashAlgo:
hashAlgo = SHA1
password = <PASSWORD>(password)
pHash = hashAlgo.new(password+salt)
digest = pHash.digest_size
if dkLen>digest:
raise ValueError("Selected hash algorithm has a too short digest (%d bytes)." % digest)
if len(salt)!=8:
raise ValueError("Salt is not 8 bytes long.")
for i in xrange(count-1):
pHash = pHash.new(pHash.digest())
return pHash.digest()[:dkLen]
def PBKDF2(password, salt, dkLen=16, count=1000, prf=None):
"""Derive one or more keys from a password (or passphrase).
This performs key derivation according to the PKCS#5 standard (v2.0),
by means of the ``PBKDF2`` algorithm.
:Parameters:
password : string
The secret password or pass phrase to generate the key from.
salt : string
A string to use for better protection from dictionary attacks.
This value does not need to be kept secret, but it should be randomly
chosen for each derivation. It is recommended to be at least 8 bytes long.
dkLen : integer
The cumulative length of the desired keys. Default is 16 bytes, suitable for instance for `Crypto.Cipher.AES`.
count : integer
The number of iterations to carry out. It's recommended to use at least 1000.
prf : callable
A pseudorandom function. It must be a function that returns a pseudorandom string
from two parameters: a secret and a salt. If not specified, HMAC-SHA1 is used.
:Return: A byte string of length `dkLen` that can be used as key material.
If you wanted multiple keys, just break up this string into segments of the desired length.
"""
password = <PASSWORD>(password)
if prf is None:
prf = lambda p,s: HMAC.new(p,s,SHA1).digest()
key = b('')
i = 1
while len(key)<dkLen:
U = previousU = prf(password,salt+struct.pack(">I", i))
for j in xrange(count-1):
previousU = t = prf(password,previousU)
U = strxor(U,t)
key += U
i = i + 1
return key[:dkLen]
```
#### File: Crypto/PublicKey/DSA.py
```python
__revision__ = "$Id$"
__all__ = ['generate', 'construct', 'error', 'DSAImplementation', '_DSAobj']
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
from Crypto.PublicKey import _DSA, _slowmath, pubkey
from Crypto import Random
try:
from Crypto.PublicKey import _fastmath
except ImportError:
_fastmath = None
class _DSAobj(pubkey.pubkey):
"""Class defining an actual DSA key.
:undocumented: __getstate__, __setstate__, __repr__, __getattr__
"""
#: Dictionary of DSA parameters.
#:
#: A public key will only have the following entries:
#:
#: - **y**, the public key.
#: - **g**, the generator.
#: - **p**, the modulus.
#: - **q**, the order of the sub-group.
#:
#: A private key will also have:
#:
#: - **x**, the private key.
keydata = ['y', 'g', 'p', 'q', 'x']
def __init__(self, implementation, key):
self.implementation = implementation
self.key = key
def __getattr__(self, attrname):
if attrname in self.keydata:
# For backward compatibility, allow the user to get (not set) the
# DSA key parameters directly from this object.
return getattr(self.key, attrname)
else:
raise AttributeError("%s object has no %r attribute" % (self.__class__.__name__, attrname,))
def sign(self, M, K):
"""Sign a piece of data with DSA.
:Parameter M: The piece of data to sign with DSA. It may
not be longer in bit size than the sub-group order (*q*).
:Type M: byte string or long
:Parameter K: A secret number, chosen randomly in the closed
range *[1,q-1]*.
:Type K: long (recommended) or byte string (not recommended)
:attention: selection of *K* is crucial for security. Generating a
random number larger than *q* and taking the modulus by *q* is
**not** secure, since smaller values will occur more frequently.
Generating a random number systematically smaller than *q-1*
(e.g. *floor((q-1)/8)* random bytes) is also **not** secure. In general,
it shall not be possible for an attacker to know the value of `any
bit of K`__.
:attention: The number *K* shall not be reused for any other
operation and shall be discarded immediately.
:attention: M must be a digest cryptographic hash, otherwise
an attacker may mount an existential forgery attack.
:Return: A tuple with 2 longs.
.. __: http://www.di.ens.fr/~pnguyen/pub_NgSh00.htm
"""
return pubkey.pubkey.sign(self, M, K)
def verify(self, M, signature):
"""Verify the validity of a DSA signature.
:Parameter M: The expected message.
:Type M: byte string or long
:Parameter signature: The DSA signature to verify.
:Type signature: A tuple with 2 longs as return by `sign`
:Return: True if the signature is correct, False otherwise.
"""
return pubkey.pubkey.verify(self, M, signature)
def _encrypt(self, c, K):
raise TypeError("DSA cannot encrypt")
def _decrypt(self, c):
raise TypeError("DSA cannot decrypt")
def _blind(self, m, r):
raise TypeError("DSA cannot blind")
def _unblind(self, m, r):
raise TypeError("DSA cannot unblind")
def _sign(self, m, k):
return self.key._sign(m, k)
def _verify(self, m, sig):
(r, s) = sig
return self.key._verify(m, r, s)
def has_private(self):
return self.key.has_private()
def size(self):
return self.key.size()
def can_blind(self):
return False
def can_encrypt(self):
return False
def can_sign(self):
return True
def publickey(self):
return self.implementation.construct((self.key.y, self.key.g, self.key.p, self.key.q))
def __getstate__(self):
d = {}
for k in self.keydata:
try:
d[k] = getattr(self.key, k)
except AttributeError:
pass
return d
def __setstate__(self, d):
if not hasattr(self, 'implementation'):
self.implementation = DSAImplementation()
t = []
for k in self.keydata:
if not d.has_key(k):
break
t.append(d[k])
self.key = self.implementation._math.dsa_construct(*tuple(t))
def __repr__(self):
attrs = []
for k in self.keydata:
if k == 'p':
attrs.append("p(%d)" % (self.size()+1,))
elif hasattr(self.key, k):
attrs.append(k)
if self.has_private():
attrs.append("private")
# PY3K: This is meant to be text, do not change to bytes (data)
return "<%s @0x%x %s>" % (self.__class__.__name__, id(self), ",".join(attrs))
class DSAImplementation(object):
"""
A DSA key factory.
This class is only internally used to implement the methods of the
`Crypto.PublicKey.DSA` module.
"""
def __init__(self, **kwargs):
"""Create a new DSA key factory.
:Keywords:
use_fast_math : bool
Specify which mathematic library to use:
- *None* (default). Use fastest math available.
- *True* . Use fast math.
- *False* . Use slow math.
default_randfunc : callable
Specify how to collect random data:
- *None* (default). Use Random.new().read().
- not *None* . Use the specified function directly.
:Raise RuntimeError:
When **use_fast_math** =True but fast math is not available.
"""
use_fast_math = kwargs.get('use_fast_math', None)
if use_fast_math is None: # Automatic
if _fastmath is not None:
self._math = _fastmath
else:
self._math = _slowmath
elif use_fast_math: # Explicitly select fast math
if _fastmath is not None:
self._math = _fastmath
else:
raise RuntimeError("fast math module not available")
else: # Explicitly select slow math
self._math = _slowmath
self.error = self._math.error
# 'default_randfunc' parameter:
# None (default) - use Random.new().read
# not None - use the specified function
self._default_randfunc = kwargs.get('default_randfunc', None)
self._current_randfunc = None
def _get_randfunc(self, randfunc):
if randfunc is not None:
return randfunc
elif self._current_randfunc is None:
self._current_randfunc = Random.new().read
return self._current_randfunc
def generate(self, bits, randfunc=None, progress_func=None):
"""Randomly generate a fresh, new DSA key.
:Parameters:
bits : int
Key length, or size (in bits) of the DSA modulus
*p*.
It must be a multiple of 64, in the closed
interval [512,1024].
randfunc : callable
Random number generation function; it should accept
a single integer N and return a string of random data
N bytes long.
If not specified, a new one will be instantiated
from ``Crypto.Random``.
progress_func : callable
Optional function that will be called with a short string
containing the key parameter currently being generated;
it's useful for interactive applications where a user is
waiting for a key to be generated.
:attention: You should always use a cryptographically secure random number generator,
such as the one defined in the ``Crypto.Random`` module; **don't** just use the
current time and the ``random`` module.
:Return: A DSA key object (`_DSAobj`).
:Raise ValueError:
When **bits** is too little, too big, or not a multiple of 64.
"""
# Check against FIPS 186-2, which says that the size of the prime p
# must be a multiple of 64 bits between 512 and 1024
for i in (0, 1, 2, 3, 4, 5, 6, 7, 8):
if bits == 512 + 64*i:
return self._generate(bits, randfunc, progress_func)
# The March 2006 draft of FIPS 186-3 also allows 2048 and 3072-bit
# primes, but only with longer q values. Since the current DSA
# implementation only supports a 160-bit q, we don't support larger
# values.
raise ValueError("Number of bits in p must be a multiple of 64 between 512 and 1024, not %d bits" % (bits,))
def _generate(self, bits, randfunc=None, progress_func=None):
rf = self._get_randfunc(randfunc)
obj = _DSA.generate_py(bits, rf, progress_func) # TODO: Don't use legacy _DSA module
key = self._math.dsa_construct(obj.y, obj.g, obj.p, obj.q, obj.x)
return _DSAobj(self, key)
def construct(self, tup):
"""Construct a DSA key from a tuple of valid DSA components.
The modulus *p* must be a prime.
The following equations must apply:
- p-1 = 0 mod q
- g^x = y mod p
- 0 < x < q
- 1 < g < p
:Parameters:
tup : tuple
A tuple of long integers, with 4 or 5 items
in the following order:
1. Public key (*y*).
2. Sub-group generator (*g*).
3. Modulus, finite field order (*p*).
4. Sub-group order (*q*).
5. Private key (*x*). Optional.
:Return: A DSA key object (`_DSAobj`).
"""
key = self._math.dsa_construct(*tup)
return _DSAobj(self, key)
_impl = DSAImplementation()
generate = _impl.generate
construct = _impl.construct
error = _impl.error
# vim:set ts=4 sw=4 sts=4 expandtab:
```
#### File: Random/OSRNG/posix.py
```python
__revision__ = "$Id$"
__all__ = ['DevURandomRNG']
import errno
import os
import stat
from rng_base import BaseRNG
from Crypto.Util.py3compat import b
class DevURandomRNG(BaseRNG):
def __init__(self, devname=None):
if devname is None:
self.name = "/dev/urandom"
else:
self.name = devname
# Test that /dev/urandom is a character special device
f = open(self.name, "rb", 0)
fmode = os.fstat(f.fileno())[stat.ST_MODE]
if not stat.S_ISCHR(fmode):
f.close()
raise TypeError("%r is not a character special device" % (self.name,))
self.__file = f
BaseRNG.__init__(self)
def _close(self):
self.__file.close()
def _read(self, N):
# Starting with Python 3 open with buffering=0 returns a FileIO object.
# FileIO.read behaves like read(2) and not like fread(3) and thus we
# have to handle the case that read returns less data as requested here
# more carefully.
data = b("")
while len(data) < N:
try:
d = self.__file.read(N - len(data))
except IOError, e:
# read(2) has been interrupted by a signal; redo the read
if e.errno == errno.EINTR:
continue
raise
if d is None:
# __file is in non-blocking mode and no data is available
return data
if len(d) == 0:
# __file is in blocking mode and arrived at EOF
return data
data += d
return data
def new(*args, **kwargs):
return DevURandomRNG(*args, **kwargs)
# vim:set ts=4 sw=4 sts=4 expandtab:
```
#### File: site-packages/PIL/ImageFont.py
```python
import Image
import os, string, sys
class _imagingft_not_installed:
# module placeholder
def __getattr__(self, id):
raise ImportError("The _imagingft C module is not installed")
try:
import _imagingft
core = _imagingft
del _imagingft
except ImportError:
core = _imagingft_not_installed()
# FIXME: add support for pilfont2 format (see FontFile.py)
# --------------------------------------------------------------------
# Font metrics format:
# "PILfont" LF
# fontdescriptor LF
# (optional) key=value... LF
# "DATA" LF
# binary data: 256*10*2 bytes (dx, dy, dstbox, srcbox)
#
# To place a character, cut out srcbox and paste at dstbox,
# relative to the character position. Then move the character
# position according to dx, dy.
# --------------------------------------------------------------------
##
# The <b>ImageFont</b> module defines a class with the same name.
# Instances of this class store bitmap fonts, and are used with the
# <b>text</b> method of the <b>ImageDraw</b> class.
# <p>
# PIL uses it's own font file format to store bitmap fonts. You can
# use the <b>pilfont</b> utility to convert BDF and PCF font
# descriptors (X window font formats) to this format.
# <p>
# Starting with version 1.1.4, PIL can be configured to support
# TrueType and OpenType fonts. For earlier version, TrueType
# support is only available as part of the imToolkit package
#
# @see ImageDraw#ImageDraw.text
# @see pilfont
class ImageFont:
"PIL font wrapper"
def _load_pilfont(self, filename):
file = open(filename, "rb")
for ext in (".png", ".gif", ".pbm"):
try:
fullname = os.path.splitext(filename)[0] + ext
image = Image.open(fullname)
except:
pass
else:
if image and image.mode in ("1", "L"):
break
else:
raise IOError("cannot find glyph data file")
self.file = fullname
return self._load_pilfont_data(file, image)
def _load_pilfont_data(self, file, image):
# read PILfont header
if file.readline() != "PILfont\n":
raise SyntaxError("Not a PILfont file")
d = string.split(file.readline(), ";")
self.info = [] # FIXME: should be a dictionary
while True:
s = file.readline()
if not s or s == "DATA\n":
break
self.info.append(s)
# read PILfont metrics
data = file.read(256*20)
# check image
if image.mode not in ("1", "L"):
raise TypeError("invalid font image mode")
image.load()
self.font = Image.core.font(image.im, data)
# delegate critical operations to internal type
self.getsize = self.font.getsize
self.getmask = self.font.getmask
##
# Wrapper for FreeType fonts. Application code should use the
# <b>truetype</b> factory function to create font objects.
class FreeTypeFont:
"FreeType font wrapper (requires _imagingft service)"
def __init__(self, file, size, index=0, encoding=""):
# FIXME: use service provider instead
self.font = core.getfont(file, size, index, encoding)
def getname(self):
return self.font.family, self.font.style
def getmetrics(self):
return self.font.ascent, self.font.descent
def getsize(self, text):
return self.font.getsize(text)[0]
def getmask(self, text, mode=""):
return self.getmask2(text, mode)[0]
def getmask2(self, text, mode="", fill=Image.core.fill):
size, offset = self.font.getsize(text)
im = fill("L", size, 0)
self.font.render(text, im.id, mode=="1")
return im, offset
##
# Wrapper that creates a transposed font from any existing font
# object.
#
# @param font A font object.
# @param orientation An optional orientation. If given, this should
# be one of Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM,
# Image.ROTATE_90, Image.ROTATE_180, or Image.ROTATE_270.
class TransposedFont:
"Wrapper for writing rotated or mirrored text"
def __init__(self, font, orientation=None):
self.font = font
self.orientation = orientation # any 'transpose' argument, or None
def getsize(self, text):
w, h = self.font.getsize(text)
if self.orientation in (Image.ROTATE_90, Image.ROTATE_270):
return h, w
return w, h
def getmask(self, text, mode=""):
im = self.font.getmask(text, mode)
if self.orientation is not None:
return im.transpose(self.orientation)
return im
##
# Load font file. This function loads a font object from the given
# bitmap font file, and returns the corresponding font object.
#
# @param filename Name of font file.
# @return A font object.
# @exception IOError If the file could not be read.
def load(filename):
"Load a font file."
f = ImageFont()
f._load_pilfont(filename)
return f
##
# Load a TrueType or OpenType font file, and create a font object.
# This function loads a font object from the given file, and creates
# a font object for a font of the given size.
# <p>
# This function requires the _imagingft service.
#
# @param filename A truetype font file. Under Windows, if the file
# is not found in this filename, the loader also looks in Windows
# <b>fonts</b> directory
# @param size The requested size, in points.
# @param index Which font face to load (default is first available face).
# @param encoding Which font encoding to use (default is Unicode). Common
# encodings are "unic" (Unicode), "symb" (Microsoft Symbol), "ADOB"
# (Adobe Standard), "ADBE" (Adobe Expert), and "armn" (Apple Roman).
# See the FreeType documentation for more information.
# @return A font object.
# @exception IOError If the file could not be read.
def truetype(filename, size, index=0, encoding=""):
"Load a truetype font file."
try:
return FreeTypeFont(filename, size, index, encoding)
except IOError:
if sys.platform == "win32":
# check the windows font repository
# NOTE: must use uppercase WINDIR, to work around bugs in
# 1.5.2's os.environ.get()
windir = os.environ.get("WINDIR")
if windir:
filename = os.path.join(windir, "fonts", filename)
return FreeTypeFont(filename, size, index, encoding)
raise
##
# Load font file. Same as load, but searches for a bitmap font along
# the Python path.
#
# @param filename Name of font file.
# @return A font object.
# @exception IOError If the file could not be read.
# @see #load
def load_path(filename):
"Load a font file, searching along the Python path."
for dir in sys.path:
if Image.isDirectory(dir):
try:
return load(os.path.join(dir, filename))
except IOError:
pass
raise IOError("cannot find font file")
##
# Load a (probably rather ugly) default font.
#
# @return A font object.
def load_default():
"Load a default font."
from StringIO import StringIO
import base64
f = ImageFont()
f._load_pilfont_data(
# courB08
StringIO(base64.decodestring('''
UElMZm9udAo7Ozs7OzsxMDsKREFUQQoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAA//8AAQAAAAAAAAABAAEA
BgAAAAH/+gADAAAAAQAAAAMABgAGAAAAAf/6AAT//QADAAAABgADAAYAAAAA//kABQABAAYAAAAL
AAgABgAAAAD/+AAFAAEACwAAABAACQAGAAAAAP/5AAUAAAAQAAAAFQAHAAYAAP////oABQAAABUA
AAAbAAYABgAAAAH/+QAE//wAGwAAAB4AAwAGAAAAAf/5AAQAAQAeAAAAIQAIAAYAAAAB//kABAAB
ACEAAAAkAAgABgAAAAD/+QAE//0AJAAAACgABAAGAAAAAP/6AAX//wAoAAAALQAFAAYAAAAB//8A
BAACAC0AAAAwAAMABgAAAAD//AAF//0AMAAAADUAAQAGAAAAAf//AAMAAAA1AAAANwABAAYAAAAB
//kABQABADcAAAA7AAgABgAAAAD/+QAFAAAAOwAAAEAABwAGAAAAAP/5AAYAAABAAAAARgAHAAYA
AAAA//kABQAAAEYAAABLAAcABgAAAAD/+QAFAAAASwAAAFAABwAGAAAAAP/5AAYAAABQAAAAVgAH
AAYAAAAA//kABQAAAFYAAABbAAcABgAAAAD/+QAFAAAAWwAAAGAABwAGAAAAAP/5AAUAAABgAAAA
ZQAHAAYAAAAA//kABQAAAGUAAABqAAcABgAAAAD/+QAFAAAAagAAAG8ABwAGAAAAAf/8AAMAAABv
AAAAcQAEAAYAAAAA//wAAwACAHEAAAB0AAYABgAAAAD/+gAE//8AdAAAAHgABQAGAAAAAP/7AAT/
/gB4AAAAfAADAAYAAAAB//oABf//AHwAAACAAAUABgAAAAD/+gAFAAAAgAAAAIUABgAGAAAAAP/5
AAYAAQCFAAAAiwAIAAYAAP////oABgAAAIsAAACSAAYABgAA////+gAFAAAAkgAAAJgABgAGAAAA
AP/6AAUAAACYAAAAnQAGAAYAAP////oABQAAAJ0AAACjAAYABgAA////+gAFAAAAowAAAKkABgAG
AAD////6AAUAAACpAAAArwAGAAYAAAAA//oABQAAAK8AAAC0AAYABgAA////+gAGAAAAtAAAALsA
BgAGAAAAAP/6AAQAAAC7AAAAvwAGAAYAAP////oABQAAAL8AAADFAAYABgAA////+gAGAAAAxQAA
AMwABgAGAAD////6AAUAAADMAAAA0gAGAAYAAP////oABQAAANIAAADYAAYABgAA////+gAGAAAA
2AAAAN8ABgAGAAAAAP/6AAUAAADfAAAA5AAGAAYAAP////oABQAAAOQAAADqAAYABgAAAAD/+gAF
AAEA6gAAAO8ABwAGAAD////6AAYAAADvAAAA9gAGAAYAAAAA//oABQAAAPYAAAD7AAYABgAA////
+gAFAAAA+wAAAQEABgAGAAD////6AAYAAAEBAAABCAAGAAYAAP////oABgAAAQgAAAEPAAYABgAA
////+gAGAAABDwAAARYABgAGAAAAAP/6AAYAAAEWAAABHAAGAAYAAP////oABgAAARwAAAEjAAYA
BgAAAAD/+gAFAAABIwAAASgABgAGAAAAAf/5AAQAAQEoAAABKwAIAAYAAAAA//kABAABASsAAAEv
AAgABgAAAAH/+QAEAAEBLwAAATIACAAGAAAAAP/5AAX//AEyAAABNwADAAYAAAAAAAEABgACATcA
AAE9AAEABgAAAAH/+QAE//wBPQAAAUAAAwAGAAAAAP/7AAYAAAFAAAABRgAFAAYAAP////kABQAA
AUYAAAFMAAcABgAAAAD/+wAFAAABTAAAAVEABQAGAAAAAP/5AAYAAAFRAAABVwAHAAYAAAAA//sA
BQAAAVcAAAFcAAUABgAAAAD/+QAFAAABXAAAAWEABwAGAAAAAP/7AAYAAgFhAAABZwAHAAYAAP//
//kABQAAAWcAAAFtAAcABgAAAAD/+QAGAAABbQAAAXMABwAGAAAAAP/5AAQAAgFzAAABdwAJAAYA
AP////kABgAAAXcAAAF+AAcABgAAAAD/+QAGAAABfgAAAYQABwAGAAD////7AAUAAAGEAAABigAF
AAYAAP////sABQAAAYoAAAGQAAUABgAAAAD/+wAFAAABkAAAAZUABQAGAAD////7AAUAAgGVAAAB
mwAHAAYAAAAA//sABgACAZsAAAGhAAcABgAAAAD/+wAGAAABoQAAAacABQAGAAAAAP/7AAYAAAGn
AAABrQAFAAYAAAAA//kABgAAAa0AAAGzAAcABgAA////+wAGAAABswAAAboABQAGAAD////7AAUA
AAG6AAABwAAFAAYAAP////sABgAAAcAAAAHHAAUABgAAAAD/+wAGAAABxwAAAc0ABQAGAAD////7
AAYAAgHNAAAB1AAHAAYAAAAA//sABQAAAdQAAAHZAAUABgAAAAH/+QAFAAEB2QAAAd0ACAAGAAAA
Av/6AAMAAQHdAAAB3gAHAAYAAAAA//kABAABAd4AAAHiAAgABgAAAAD/+wAF//0B4gAAAecAAgAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAB
//sAAwACAecAAAHpAAcABgAAAAD/+QAFAAEB6QAAAe4ACAAGAAAAAP/5AAYAAAHuAAAB9AAHAAYA
AAAA//oABf//AfQAAAH5AAUABgAAAAD/+QAGAAAB+QAAAf8ABwAGAAAAAv/5AAMAAgH/AAACAAAJ
AAYAAAAA//kABQABAgAAAAIFAAgABgAAAAH/+gAE//sCBQAAAggAAQAGAAAAAP/5AAYAAAIIAAAC
DgAHAAYAAAAB//kABf/+Ag4AAAISAAUABgAA////+wAGAAACEgAAAhkABQAGAAAAAP/7AAX//gIZ
AAACHgADAAYAAAAA//wABf/9Ah4AAAIjAAEABgAAAAD/+QAHAAACIwAAAioABwAGAAAAAP/6AAT/
+wIqAAACLgABAAYAAAAA//kABP/8Ai4AAAIyAAMABgAAAAD/+gAFAAACMgAAAjcABgAGAAAAAf/5
AAT//QI3AAACOgAEAAYAAAAB//kABP/9AjoAAAI9AAQABgAAAAL/+QAE//sCPQAAAj8AAgAGAAD/
///7AAYAAgI/AAACRgAHAAYAAAAA//kABgABAkYAAAJMAAgABgAAAAH//AAD//0CTAAAAk4AAQAG
AAAAAf//AAQAAgJOAAACUQADAAYAAAAB//kABP/9AlEAAAJUAAQABgAAAAH/+QAF//4CVAAAAlgA
BQAGAAD////7AAYAAAJYAAACXwAFAAYAAP////kABgAAAl8AAAJmAAcABgAA////+QAGAAACZgAA
Am0ABwAGAAD////5AAYAAAJtAAACdAAHAAYAAAAA//sABQACAnQAAAJ5AAcABgAA////9wAGAAAC
eQAAAoAACQAGAAD////3AAYAAAKAAAAChwAJAAYAAP////cABgAAAocAAAKOAAkABgAA////9wAG
AAACjgAAApUACQAGAAD////4AAYAAAKVAAACnAAIAAYAAP////cABgAAApwAAAKjAAkABgAA////
+gAGAAACowAAAqoABgAGAAAAAP/6AAUAAgKqAAACrwAIAAYAAP////cABQAAAq8AAAK1AAkABgAA
////9wAFAAACtQAAArsACQAGAAD////3AAUAAAK7AAACwQAJAAYAAP////gABQAAAsEAAALHAAgA
BgAAAAD/9wAEAAACxwAAAssACQAGAAAAAP/3AAQAAALLAAACzwAJAAYAAAAA//cABAAAAs8AAALT
AAkABgAAAAD/+AAEAAAC0wAAAtcACAAGAAD////6AAUAAALXAAAC3QAGAAYAAP////cABgAAAt0A
AALkAAkABgAAAAD/9wAFAAAC5AAAAukACQAGAAAAAP/3AAUAAALpAAAC7gAJAAYAAAAA//cABQAA
Au4AAALzAAkABgAAAAD/9wAFAAAC8wAAAvgACQAGAAAAAP/4AAUAAAL4AAAC/QAIAAYAAAAA//oA
Bf//Av0AAAMCAAUABgAA////+gAGAAADAgAAAwkABgAGAAD////3AAYAAAMJAAADEAAJAAYAAP//
//cABgAAAxAAAAMXAAkABgAA////9wAGAAADFwAAAx4ACQAGAAD////4AAYAAAAAAAoABwASAAYA
AP////cABgAAAAcACgAOABMABgAA////+gAFAAAADgAKABQAEAAGAAD////6AAYAAAAUAAoAGwAQ
AAYAAAAA//gABgAAABsACgAhABIABgAAAAD/+AAGAAAAIQAKACcAEgAGAAAAAP/4AAYAAAAnAAoA
LQASAAYAAAAA//gABgAAAC0ACgAzABIABgAAAAD/+QAGAAAAMwAKADkAEQAGAAAAAP/3AAYAAAA5
AAoAPwATAAYAAP////sABQAAAD8ACgBFAA8ABgAAAAD/+wAFAAIARQAKAEoAEQAGAAAAAP/4AAUA
AABKAAoATwASAAYAAAAA//gABQAAAE8ACgBUABIABgAAAAD/+AAFAAAAVAAKAFkAEgAGAAAAAP/5
AAUAAABZAAoAXgARAAYAAAAA//gABgAAAF4ACgBkABIABgAAAAD/+AAGAAAAZAAKAGoAEgAGAAAA
AP/4AAYAAABqAAoAcAASAAYAAAAA//kABgAAAHAACgB2ABEABgAAAAD/+AAFAAAAdgAKAHsAEgAG
AAD////4AAYAAAB7AAoAggASAAYAAAAA//gABQAAAIIACgCHABIABgAAAAD/+AAFAAAAhwAKAIwA
EgAGAAAAAP/4AAUAAACMAAoAkQASAAYAAAAA//gABQAAAJEACgCWABIABgAAAAD/+QAFAAAAlgAK
AJsAEQAGAAAAAP/6AAX//wCbAAoAoAAPAAYAAAAA//oABQABAKAACgClABEABgAA////+AAGAAAA
pQAKAKwAEgAGAAD////4AAYAAACsAAoAswASAAYAAP////gABgAAALMACgC6ABIABgAA////+QAG
AAAAugAKAMEAEQAGAAD////4AAYAAgDBAAoAyAAUAAYAAP////kABQACAMgACgDOABMABgAA////
+QAGAAIAzgAKANUAEw==
''')), Image.open(StringIO(base64.decodestring('''
iVBORw0KGgoAAAANSUhEUgAAAx4AAAAUAQAAAAArMtZoAAAEwElEQVR4nABlAJr/AHVE4czCI/4u
Mc4b7vuds/xzjz5/3/7u/n9vMe7vnfH/9++vPn/xyf5zhxzjt8GHw8+2d83u8x27199/nxuQ6Od9
M43/5z2I+9n9ZtmDBwMQECDRQw/eQIQohJXxpBCNVE6QCCAAAAD//wBlAJr/AgALyj1t/wINwq0g
LeNZUworuN1cjTPIzrTX6ofHWeo3v336qPzfEwRmBnHTtf95/fglZK5N0PDgfRTslpGBvz7LFc4F
IUXBWQGjQ5MGCx34EDFPwXiY4YbYxavpnhHFrk14CDAAAAD//wBlAJr/AgKqRooH2gAgPeggvUAA
Bu2WfgPoAwzRAABAAAAAAACQgLz/3Uv4Gv+gX7BJgDeeGP6AAAD1NMDzKHD7ANWr3loYbxsAD791
NAADfcoIDyP44K/jv4Y63/Z+t98Ovt+ub4T48LAAAAD//wBlAJr/AuplMlADJAAAAGuAphWpqhMx
in0A/fRvAYBABPgBwBUgABBQ/sYAyv9g0bCHgOLoGAAAAAAAREAAwI7nr0ArYpow7aX8//9LaP/9
SjdavWA8ePHeBIKB//81/83ndznOaXx379wAAAD//wBlAJr/AqDxW+D3AABAAbUh/QMnbQag/gAY
AYDAAACgtgD/gOqAAAB5IA/8AAAk+n9w0AAA8AAAmFRJuPo27ciC0cD5oeW4E7KA/wD3ECMAn2tt
y8PgwH8AfAxFzC0JzeAMtratAsC/ffwAAAD//wBlAJr/BGKAyCAA4AAAAvgeYTAwHd1kmQF5chkG
ABoMIHcL5xVpTfQbUqzlAAAErwAQBgAAEOClA5D9il08AEh/tUzdCBsXkbgACED+woQg8Si9VeqY
lODCn7lmF6NhnAEYgAAA/NMIAAAAAAD//2JgjLZgVGBg5Pv/Tvpc8hwGBjYGJADjHDrAwPzAjv/H
/Wf3PzCwtzcwHmBgYGcwbZz8wHaCAQMDOwMDQ8MCBgYOC3W7mp+f0w+wHOYxO3OG+e376hsMZjk3
AAAAAP//YmCMY2A4wMAIN5e5gQETPD6AZisDAwMDgzSDAAPjByiHcQMDAwMDg1nOze1lByRu5/47
c4859311AYNZzg0AAAAA//9iYGDBYihOIIMuwIjGL39/fwffA8b//xv/P2BPtzzHwCBjUQAAAAD/
/yLFBrIBAAAA//9i1HhcwdhizX7u8NZNzyLbvT97bfrMf/QHI8evOwcSqGUJAAAA//9iYBB81iSw
pEE170Qrg5MIYydHqwdDQRMrAwcVrQAAAAD//2J4x7j9AAMDn8Q/BgYLBoaiAwwMjPdvMDBYM1Tv
oJodAAAAAP//Yqo/83+dxePWlxl3npsel9lvLfPcqlE9725C+acfVLMEAAAA//9i+s9gwCoaaGMR
evta/58PTEWzr21hufPjA8N+qlnBwAAAAAD//2JiWLci5v1+HmFXDqcnULE/MxgYGBj+f6CaJQAA
AAD//2Ji2FrkY3iYpYC5qDeGgeEMAwPDvwQBBoYvcTwOVLMEAAAA//9isDBgkP///0EOg9z35v//
Gc/eeW7BwPj5+QGZhANUswMAAAD//2JgqGBgYGBgqEMXlvhMPUsAAAAA//8iYDd1AAAAAP//AwDR
w7IkEbzhVQAAAABJRU5ErkJggg==
'''))))
return f
if __name__ == "__main__":
# create font data chunk for embedding
import base64, os, sys
font = "../Images/courB08"
print " f._load_pilfont_data("
print " # %s" % os.path.basename(font)
print " StringIO(base64.decodestring('''"
base64.encode(open(font + ".pil", "rb"), sys.stdout)
print "''')), Image.open(StringIO(base64.decodestring('''"
base64.encode(open(font + ".pbm", "rb"), sys.stdout)
print "'''))))"
```
#### File: axscript/client/error.py
```python
import sys, traceback
from win32com.axscript import axscript
import winerror
import win32com.server.exception
import win32com.server.util
import pythoncom
import re
debugging = 0
def FormatForAX(text):
"""Format a string suitable for an AX Host
"""
# Replace all " with ', so it works OK in HTML (ie, ASP)
return ExpandTabs(AddCR(text))
def ExpandTabs(text):
return re.sub('\t',' ', text)
def AddCR(text):
return re.sub('\n','\r\n',text)
class IActiveScriptError:
"""An implementation of IActiveScriptError
The ActiveX Scripting host calls this client whenever we report
an exception to it. This interface provides the exception details
for the host to report to the user.
"""
_com_interfaces_ = [axscript.IID_IActiveScriptError]
_public_methods_ = ["GetSourceLineText","GetSourcePosition","GetExceptionInfo"]
def _query_interface_(self, iid):
print "IActiveScriptError QI - unknown IID", iid
return 0
def _SetExceptionInfo(self, exc):
self.exception = exc
def GetSourceLineText(self):
return self.exception.linetext
def GetSourcePosition(self):
ctx = self.exception.sourceContext
# Zero based in the debugger (but our columns are too!)
return ctx, self.exception.lineno + self.exception.startLineNo-1, self.exception.colno
def GetExceptionInfo(self):
return self.exception
class AXScriptException(win32com.server.exception.COMException):
"""A class used as a COM exception.
Note this has attributes which conform to the standard attributes
for COM exceptions, plus a few others specific to our IActiveScriptError
object.
"""
def __init__(self, site, codeBlock, exc_type, exc_value, exc_traceback):
# set properties base class shares via base ctor...
win32com.server.exception.COMException.__init__( self, \
description = "Unknown Exception", \
scode = winerror.DISP_E_EXCEPTION, \
source = "Python ActiveX Scripting Engine",
)
# And my other values...
if codeBlock is None:
self.sourceContext = 0
self.startLineNo = 0
else:
self.sourceContext = codeBlock.sourceContextCookie
self.startLineNo = codeBlock.startLineNumber
self.linetext = ""
self.__BuildFromException(site, exc_type, exc_value, exc_traceback)
def __BuildFromException(self, site, type , value, tb):
if debugging:
import linecache
linecache.clearcache()
try:
if issubclass(type, SyntaxError):
self._BuildFromSyntaxError(site, value, tb)
else:
self._BuildFromOther(site, type, value, tb)
except: # Error extracting traceback info!!!
traceback.print_exc()
# re-raise.
raise
def _BuildFromSyntaxError(self, site, exc, tb):
value = exc.args
# All syntax errors should have a message as element 0
try:
msg = value[0]
except:
msg = "Unknown Error (%s)" % (value,)
try:
(filename, lineno, offset, line) = value[1]
# Some of these may be None, which upsets us!
if offset is None:
offset = 0
if line is None:
line = ""
except:
msg = "Unknown"
lineno = 0
offset = 0
line = "Unknown"
self.description=FormatForAX(msg)
self.lineno = lineno
self.colno = offset - 1
self.linetext = ExpandTabs(line.rstrip())
def _BuildFromOther(self, site, exc_type, value, tb):
self.colno = -1
self.lineno = 0
if debugging: # Full traceback if debugging.
list=traceback.format_exception(exc_type, value, tb)
self.description = ExpandTabs(''.join(list))
return
# Run down the traceback list, looking for the first "<Script..>"
# Hide traceback above this. In addition, keep going down
# looking for a "_*_" attribute, and below hide these also.
hide_names = ["r_import","r_reload","r_open"] # hide from these functions down in the traceback.
depth = None
tb_top = tb
while tb_top:
filename, lineno, name, line = self.ExtractTracebackInfo(tb_top, site)
if filename[:7]=="<Script":
break
tb_top = tb_top.tb_next
format_items = []
if tb_top: # found one.
depth = 0
tb_look = tb_top
# Look down for our bottom
while tb_look:
filename, lineno, name, line = self.ExtractTracebackInfo(tb_look, site)
if name in hide_names:
break
# We can report a line-number, but not a filename. Therefore,
# we return the last line-number we find in one of our script
# blocks.
if filename.startswith("<Script"):
self.lineno = lineno
self.linetext = line
format_items.append((filename, lineno, name, line))
depth = depth + 1
tb_look = tb_look.tb_next
else:
depth = None
tb_top = tb
bits = ['Traceback (most recent call last):\n']
bits.extend(traceback.format_list(format_items))
if exc_type==pythoncom.com_error:
desc = "%s (0x%x)" % (value[1], value[0])
if value[0]==winerror.DISP_E_EXCEPTION and value[2] and value[2][2]:
desc = value[2][2]
bits.append("COM Error: "+desc)
else:
bits.extend(traceback.format_exception_only(exc_type, value))
# XXX - this utf8 encoding seems bogus. From well before py3k,
# we had the comment:
# > all items in the list are utf8 courtesy of Python magically
# > converting unicode to utf8 before compilation.
# but that is likely just confusion from early unicode days;
# Python isn't doing it, pywin32 probably was, so 'mbcs' would
# be the default encoding. We should never hit this these days
# anyway, but on py3k, we *never* will, and str objects there
# don't have a decode method...
if sys.version_info < (3,):
for i in xrange(len(bits)):
if type(bits[i]) is str:
#assert type(bits[i]) is str, type(bits[i])
bits[i] = bits[i].decode('utf8')
self.description = ExpandTabs(u''.join(bits))
# Clear tracebacks etc.
tb = tb_top = tb_look = None
def ExtractTracebackInfo(self, tb, site):
import linecache
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
line = linecache.getline(filename, lineno)
if not line:
try:
codeBlock = site.scriptCodeBlocks[filename]
except KeyError:
codeBlock = None
if codeBlock:
# Note: 'line' will now be unicode.
line = codeBlock.GetLineNo(lineno)
if line:
line = line.strip()
else:
line = None
return filename, lineno, name, line
def __repr__(self):
return "AXScriptException Object with description:" + self.description
def ProcessAXScriptException(scriptingSite, debugManager, exceptionInstance):
"""General function to handle any exception in AX code
This function creates an instance of our IActiveScriptError interface, and
gives it to the host, along with out exception class. The host will
likely call back on the IActiveScriptError interface to get the source text
and other information not normally in COM exceptions.
"""
# traceback.print_exc()
instance = IActiveScriptError()
instance._SetExceptionInfo(exceptionInstance)
gateway = win32com.server.util.wrap(instance, axscript.IID_IActiveScriptError)
if debugManager:
fCallOnError = debugManager.HandleRuntimeError()
if not fCallOnError:
return None
try:
result = scriptingSite.OnScriptError(gateway)
except pythoncom.com_error, details:
print "**OnScriptError failed:", details
print "Exception description:'%s'" % (repr(exceptionInstance.description))
print "Exception text:'%s'" % (repr(exceptionInstance.linetext))
result = winerror.S_FALSE
if result==winerror.S_OK:
# If the above returns NOERROR, it is assumed the error has been
# correctly registered and the value SCRIPT_E_REPORTED is returned.
ret = win32com.server.exception.COMException(scode=axscript.SCRIPT_E_REPORTED)
return ret
else:
# The error is taken to be unreported and is propagated up the call stack
# via the IDispatch::Invoke's EXCEPINFO parameter (hr returned is DISP_E_EXCEPTION.
return exceptionInstance
```
#### File: axscript/client/framework.py
```python
import sys
from win32com.axscript import axscript
import win32com.server.util
import win32com.client.connect # Need simple connection point support
import win32api, winerror
import pythoncom
import types
import re
def RemoveCR(text):
# No longer just "RemoveCR" - should be renamed to
# FixNewlines, or something. Idea is to fix arbitary newlines into
# something Python can compile...
return re.sub('(\r\n)|\r|(\n\r)','\n',text)
SCRIPTTEXT_FORCEEXECUTION = -2147483648 # 0x80000000
SCRIPTTEXT_ISEXPRESSION = 0x00000020
SCRIPTTEXT_ISPERSISTENT = 0x00000040
from win32com.server.exception import Exception, IsCOMServerException
import error # ax.client.error
state_map = {
axscript.SCRIPTSTATE_UNINITIALIZED: "SCRIPTSTATE_UNINITIALIZED",
axscript.SCRIPTSTATE_INITIALIZED: "SCRIPTSTATE_INITIALIZED",
axscript.SCRIPTSTATE_STARTED: "SCRIPTSTATE_STARTED",
axscript.SCRIPTSTATE_CONNECTED: "SCRIPTSTATE_CONNECTED",
axscript.SCRIPTSTATE_DISCONNECTED: "SCRIPTSTATE_DISCONNECTED",
axscript.SCRIPTSTATE_CLOSED: "SCRIPTSTATE_CLOSED",
}
def profile(fn, *args):
import profile
prof = profile.Profile()
try:
# roll on 1.6 :-)
# return prof.runcall(fn, *args)
return prof.runcall(*(fn,) + args)
finally:
import pstats
# Damn - really want to send this to Excel!
# width, list = pstats.Stats(prof).strip_dirs().get_print_list([])
pstats.Stats(prof).strip_dirs().sort_stats("time").print_stats()
class SafeOutput:
softspace=1
def __init__(self, redir=None):
if redir is None: redir = sys.stdout
self.redir=redir
def write(self,message):
try:
self.redir.write(message)
except:
win32api.OutputDebugString(message.encode('mbcs'))
def flush(self):
pass
def close(self):
pass
# Make sure we have a valid sys.stdout/stderr, otherwise out
# print and trace statements may raise an exception
def MakeValidSysOuts():
if not isinstance(sys.stdout, SafeOutput):
sys.stdout = sys.stderr = SafeOutput()
# and for the sake of working around something I can't understand...
# prevent keyboard interrupts from killing IIS
import signal
def noOp(a,b):
# it would be nice to get to the bottom of this, so a warning to
# the debug console can't hurt.
print "WARNING: Ignoring keyboard interrupt from ActiveScripting engine"
# If someone else has already redirected, then assume they know what they are doing!
if signal.getsignal(signal.SIGINT) == signal.default_int_handler:
try:
signal.signal(signal.SIGINT, noOp)
except ValueError:
# Not the main thread - can't do much.
pass
def trace(*args):
"""A function used instead of "print" for debugging output.
"""
for arg in args:
print arg,
print
def RaiseAssert(scode, desc):
"""A debugging function that raises an exception considered an "Assertion".
"""
print "**************** ASSERTION FAILED *******************"
print desc
raise Exception(desc, scode)
class AXScriptCodeBlock:
"""An object which represents a chunk of code in an AX Script
"""
def __init__(self, name, codeText, sourceContextCookie, startLineNumber, flags):
self.name = name
self.codeText = codeText
self.codeObject = None
self.sourceContextCookie = sourceContextCookie
self.startLineNumber = startLineNumber
self.flags = flags
self.beenExecuted = 0
def GetFileName(self):
# Gets the "file name" for Python - uses <...> so Python doesnt think
# it is a real file.
return "<%s>" % self.name
def GetDisplayName(self):
return self.name
def GetLineNo(self, no):
pos = -1
for i in range(no-1):
pos = self.codeText.find('\n', pos+1)
if pos==-1: pos=len(self.codeText)
epos = self.codeText.find('\n', pos+1)
if epos==-1:
epos=len(self.codeText)
return self.codeText[pos+1:epos].strip()
class Event:
"""A single event for a ActiveX named object.
"""
def __init__(self):
self.name = "<None>"
def __repr__(self):
return "<%s at %d: %s>" % (self.__class__.__name__, id(self), self.name)
def Reset(self):
pass
def Close(self):
pass
def Build(self, typeinfo, funcdesc):
self.dispid = funcdesc[0]
self.name = typeinfo.GetNames(self.dispid)[0]
# print "Event.Build() - Event Name is ", self.name
class EventSink:
"""A set of events against an item. Note this is a COM client for connection points.
"""
_public_methods_ = []
def __init__(self, myItem, coDispatch):
self.events = {}
self.connection = None
self.coDispatch = coDispatch
self.myScriptItem = myItem
self.myInvokeMethod = myItem.GetEngine().ProcessScriptItemEvent
self.iid = None
def Reset(self):
self.Disconnect()
def Close(self):
self.iid = None
self.myScriptItem = None
self.myInvokeMethod = None
self.coDispatch = None
for event in self.events.itervalues():
event.Reset()
self.events = {}
self.Disconnect()
# COM Connection point methods.
def _query_interface_(self, iid):
if iid==self.iid:
return win32com.server.util.wrap(self)
def _invoke_(self, dispid, lcid, wFlags, args):
try:
event = self.events[dispid]
except:
raise Exception(scode=winerror.DISP_E_MEMBERNOTFOUND)
#print "Invoke for ", event, "on", self.myScriptItem, " - calling", self.myInvokeMethod
return self.myInvokeMethod(self.myScriptItem, event, lcid, wFlags, args)
def GetSourceTypeInfo(self, typeinfo):
"""Gets the typeinfo for the Source Events for the passed typeinfo"""
attr = typeinfo.GetTypeAttr()
cFuncs = attr[6]
typeKind = attr[5]
if typeKind not in [pythoncom.TKIND_COCLASS, pythoncom.TKIND_INTERFACE]:
RaiseAssert(winerror.E_UNEXPECTED, "The typeKind of the object is unexpected")
cImplType = attr[8]
for i in xrange(cImplType):
# Look for the [source, default] interface on the coclass
# that isn't marked as restricted.
flags = typeinfo.GetImplTypeFlags(i)
flagsNeeded = pythoncom.IMPLTYPEFLAG_FDEFAULT | pythoncom.IMPLTYPEFLAG_FSOURCE
if (flags & ( flagsNeeded | pythoncom.IMPLTYPEFLAG_FRESTRICTED))==(flagsNeeded):
# Get the handle to the implemented interface.
href = typeinfo.GetRefTypeOfImplType(i)
return typeinfo.GetRefTypeInfo(href)
def BuildEvents(self):
# See if it is an extender object.
try:
mainTypeInfo = self.coDispatch.QueryInterface(axscript.IID_IProvideMultipleClassInfo)
isMulti = 1
numTypeInfos = mainTypeInfo.GetMultiTypeInfoCount()
except pythoncom.com_error:
isMulti = 0
numTypeInfos = 1
try:
mainTypeInfo = self.coDispatch.QueryInterface(pythoncom.IID_IProvideClassInfo)
except pythoncom.com_error:
numTypeInfos = 0
# Create an event handler for the item.
for item in xrange(numTypeInfos):
if isMulti:
typeinfo, flags = mainTypeInfo.GetInfoOfIndex(item, axscript.MULTICLASSINFO_GETTYPEINFO)
else:
typeinfo = mainTypeInfo.GetClassInfo()
sourceType = self.GetSourceTypeInfo(typeinfo)
cFuncs = 0
if sourceType:
attr = sourceType.GetTypeAttr()
self.iid = attr[0]
cFuncs = attr[6]
for i in xrange(cFuncs):
funcdesc = sourceType.GetFuncDesc(i)
event = Event()
event.Build(sourceType, funcdesc)
self.events[event.dispid] = event
def Connect(self):
if self.connection is not None or self.iid is None: return
# trace("Connect for sink item", self.myScriptItem.name, "with IID",str(self.iid))
self.connection = win32com.client.connect.SimpleConnection(self.coDispatch, self, self.iid)
def Disconnect(self):
if self.connection:
try:
self.connection.Disconnect()
except pythoncom.com_error:
pass # Ignore disconnection errors.
self.connection = None
class ScriptItem:
"""An item (or subitem) that is exposed to the ActiveX script
"""
def __init__(self, parentItem, name, dispatch, flags):
self.parentItem = parentItem
self.dispatch = dispatch
self.name = name
self.flags = flags
self.eventSink = None
self.subItems = {}
self.createdConnections = 0
self.isRegistered = 0
# trace("Creating ScriptItem", name, "of parent", parentItem,"with dispatch", dispatch)
def __repr__(self):
flagsDesc=""
if self.flags is not None and self.flags & axscript.SCRIPTITEM_GLOBALMEMBERS:
flagsDesc = "/Global"
return "<%s at %d: %s%s>" % (self.__class__.__name__, id(self), self.name,flagsDesc)
def _dump_(self, level):
flagDescs = []
if self.flags is not None and self.flags & axscript.SCRIPTITEM_GLOBALMEMBERS:
flagDescs.append("GLOBAL!")
if self.flags is None or self.flags & axscript.SCRIPTITEM_ISVISIBLE == 0:
flagDescs.append("NOT VISIBLE")
if self.flags is not None and self.flags & axscript.SCRIPTITEM_ISSOURCE:
flagDescs.append("EVENT SINK")
if self.flags is not None and self.flags & axscript.SCRIPTITEM_CODEONLY:
flagDescs.append("CODE ONLY")
print " " * level, "Name=", self.name, ", flags=", "/".join(flagDescs), self
for subItem in self.subItems.itervalues():
subItem._dump_(level+1)
def Reset(self):
self.Disconnect()
if self.eventSink:
self.eventSink.Reset()
self.isRegistered = 0
for subItem in self.subItems.itervalues():
subItem.Reset()
def Close(self):
self.Reset()
self.dispatch = None
self.parentItem = None
if self.eventSink:
self.eventSink.Close()
self.eventSink = None
for subItem in self.subItems.itervalues():
subItem.Close()
self.subItems = []
self.createdConnections = 0
def Register(self):
if self.isRegistered: return
# Get the type info to use to build this item.
# if not self.dispatch:
# id = self.parentItem.dispatch.GetIDsOfNames(self.name)
# print "DispID of me is", id
# result = self.parentItem.dispatch.Invoke(id, 0, pythoncom.DISPATCH_PROPERTYGET,1)
# if type(result)==pythoncom.TypeIIDs[pythoncom.IID_IDispatch]:
# self.dispatch = result
# else:
# print "*** No dispatch"
# return
# print "**** Made dispatch"
self.isRegistered = 1
# Register the sub-items.
for item in self.subItems.itervalues():
if not item.isRegistered:
item.Register()
def IsGlobal(self):
return self.flags & axscript.SCRIPTITEM_GLOBALMEMBERS
def IsVisible(self):
return (self.flags & (axscript.SCRIPTITEM_ISVISIBLE | axscript.SCRIPTITEM_ISSOURCE)) != 0
def GetEngine(self):
item = self
while item.parentItem.__class__==self.__class__:
item = item.parentItem
return item.parentItem
def _GetFullItemName(self):
ret = self.name
if self.parentItem:
try:
ret = self.parentItem._GetFullItemName() + "." + ret
except AttributeError:
pass
return ret
def GetSubItemClass(self):
return self.__class__
def GetSubItem(self, name):
return self.subItems[name.lower()]
def GetCreateSubItem(self, parentItem, name, dispatch, flags):
keyName = name.lower()
try:
rc = self.subItems[keyName]
# No changes allowed to existing flags.
if not rc.flags is None and not flags is None and rc.flags != flags:
raise Exception(scode=winerror.E_INVALIDARG)
# Existing item must not have a dispatch.
if not rc.dispatch is None and not dispatch is None:
raise Exception(scode=winerror.E_INVALIDARG)
rc.flags = flags # Setup the real flags.
rc.dispatch = dispatch
except KeyError:
rc = self.subItems[keyName] = self.GetSubItemClass()(parentItem, name, dispatch, flags)
return rc
# if self.dispatch is None:
# RaiseAssert(winerror.E_UNEXPECTED, "??")
def CreateConnections(self):
# Create (but do not connect to) the connection points.
if self.createdConnections: return
self.createdConnections = 1
# Nothing to do unless this is an event source
# This flags means self, _and_ children, are connectable.
if self.flags & axscript.SCRIPTITEM_ISSOURCE:
self.BuildEvents()
self.FindBuildSubItemEvents()
def Connect(self):
# Connect to the already created connection points.
if self.eventSink:
self.eventSink.Connect()
for subItem in self.subItems.itervalues():
subItem.Connect()
def Disconnect(self):
# Disconnect from the connection points.
if self.eventSink:
self.eventSink.Disconnect()
for subItem in self.subItems.itervalues():
subItem.Disconnect()
def BuildEvents(self):
if self.eventSink is not None or self.dispatch is None:
RaiseAssert(winerror.E_UNEXPECTED, "Item already has built events, or no dispatch available?")
# trace("BuildEvents for named item", self._GetFullItemName())
self.eventSink = EventSink(self, self.dispatch)
self.eventSink.BuildEvents()
def FindBuildSubItemEvents(self):
# Called during connection to event source. Seeks out and connects to
# all children. As per the AX spec, this is not recursive
# (ie, children sub-items are not seeked)
try:
multiTypeInfo = self.dispatch.QueryInterface(axscript.IID_IProvideMultipleClassInfo)
numTypeInfos = multiTypeInfo.GetMultiTypeInfoCount()
except pythoncom.com_error:
return
for item in xrange(numTypeInfos):
typeinfo, flags = multiTypeInfo.GetInfoOfIndex(item, axscript.MULTICLASSINFO_GETTYPEINFO)
defaultType = self.GetDefaultSourceTypeInfo(typeinfo)
index = 0
while 1:
try:
fdesc = defaultType.GetFuncDesc(index)
except pythoncom.com_error:
break # No more funcs
index = index + 1
dispid = fdesc[0]
funckind = fdesc[3]
invkind = fdesc[4]
elemdesc = fdesc[8]
funcflags = fdesc[9]
try:
isSubObject = not (funcflags & pythoncom.FUNCFLAG_FRESTRICTED) and \
funckind == pythoncom.FUNC_DISPATCH and \
invkind == pythoncom.INVOKE_PROPERTYGET and \
elemdesc[0][0] == pythoncom.VT_PTR and \
elemdesc[0][1][0] == pythoncom.VT_USERDEFINED
except:
isSubObject = 0
if isSubObject:
try:
# We found a sub-object.
names = typeinfo.GetNames(dispid);
result = self.dispatch.Invoke(dispid, 0x0, pythoncom.DISPATCH_PROPERTYGET, 1)
# IE has an interesting problem - there are lots of synonyms for the same object. Eg
# in a simple form, "window.top", "window.window", "window.parent", "window.self"
# all refer to the same object. Our event implementation code does not differentiate
# eg, "window_onload" will fire for *all* objects named "window". Thus,
# "window" and "window.window" will fire the same event handler :(
# One option would be to check if the sub-object is indeed the
# parent object - however, this would stop "top_onload" from firing,
# as no event handler for "top" would work.
# I think we simply need to connect to a *single* event handler.
# As use in IE is deprecated, I am not solving this now.
if type(result)==pythoncom.TypeIIDs[pythoncom.IID_IDispatch]:
name = names[0]
subObj = self.GetCreateSubItem(self, name, result, axscript.SCRIPTITEM_ISVISIBLE)
#print "subobj", name, "flags are", subObj.flags, "mydisp=", self.dispatch, "result disp=", result, "compare=", self.dispatch==result
subObj.BuildEvents()
subObj.Register()
except pythoncom.com_error:
pass
def GetDefaultSourceTypeInfo(self, typeinfo):
"""Gets the typeinfo for the Default Dispatch for the passed typeinfo"""
attr = typeinfo.GetTypeAttr()
cFuncs = attr[6]
typeKind = attr[5]
if typeKind not in [pythoncom.TKIND_COCLASS, pythoncom.TKIND_INTERFACE]:
RaiseAssert(winerror.E_UNEXPECTED, "The typeKind of the object is unexpected")
cImplType = attr[8]
for i in xrange(cImplType):
# Look for the [source, default] interface on the coclass
# that isn't marked as restricted.
flags = typeinfo.GetImplTypeFlags(i)
if (flags & ( pythoncom.IMPLTYPEFLAG_FDEFAULT | pythoncom.IMPLTYPEFLAG_FSOURCE | pythoncom.IMPLTYPEFLAG_FRESTRICTED))==pythoncom.IMPLTYPEFLAG_FDEFAULT:
# Get the handle to the implemented interface.
href = typeinfo.GetRefTypeOfImplType(i)
defTypeInfo = typeinfo.GetRefTypeInfo(href)
attr = defTypeInfo.GetTypeAttr()
typeKind = attr[5]
typeFlags = attr[11]
if typeKind == pythoncom.TKIND_INTERFACE and typeFlags & pythoncom.TYPEFLAG_FDUAL:
# Get corresponding Disp interface
# -1 is a special value which does this for us.
href = typeinfo.GetRefTypeOfImplType(-1)
return defTypeInfo.GetRefTypeInfo(href)
else:
return defTypeInfo
IActiveScriptMethods = [
"SetScriptSite", "GetScriptSite", "SetScriptState", "GetScriptState",
"Close", "AddNamedItem", "AddTypeLib", "GetScriptDispatch",
"GetCurrentScriptThreadID", "GetScriptThreadID", "GetScriptThreadState",
"InterruptScriptThread", "Clone" ]
IActiveScriptParseMethods = [
"InitNew", "AddScriptlet", "ParseScriptText" ]
IObjectSafetyMethods = [
"GetInterfaceSafetyOptions", "SetInterfaceSafetyOptions"]
# ActiveScriptParseProcedure is a new interface with IIS4/IE4.
IActiveScriptParseProcedureMethods = ['ParseProcedureText']
class COMScript:
"""An ActiveX Scripting engine base class.
This class implements the required COM interfaces for ActiveX scripting.
"""
_public_methods_ = IActiveScriptMethods + IActiveScriptParseMethods + IObjectSafetyMethods + IActiveScriptParseProcedureMethods
_com_interfaces_ = [axscript.IID_IActiveScript, axscript.IID_IActiveScriptParse, axscript.IID_IObjectSafety] #, axscript.IID_IActiveScriptParseProcedure]
def __init__(self):
# Make sure we can print/trace wihout an exception!
MakeValidSysOuts()
# trace("AXScriptEngine object created", self)
self.baseThreadId = -1
self.debugManager = None
self.threadState = axscript.SCRIPTTHREADSTATE_NOTINSCRIPT
self.scriptState = axscript.SCRIPTSTATE_UNINITIALIZED
self.scriptSite = None
self.safetyOptions = 0
self.lcid = 0
self.subItems = {}
self.scriptCodeBlocks = {}
def _query_interface_(self, iid):
if self.debugManager:
return self.debugManager._query_interface_for_debugger_(iid)
# trace("ScriptEngine QI - unknown IID", iid)
return 0
# IActiveScriptParse
def InitNew(self):
if self.scriptSite is not None:
self.SetScriptState(axscript.SCRIPTSTATE_INITIALIZED)
def AddScriptlet(self, defaultName, code, itemName, subItemName, eventName, delimiter, sourceContextCookie, startLineNumber):
# trace ("AddScriptlet", defaultName, code, itemName, subItemName, eventName, delimiter, sourceContextCookie, startLineNumber)
self.DoAddScriptlet(defaultName, code, itemName, subItemName, eventName, delimiter,sourceContextCookie, startLineNumber)
def ParseScriptText(self, code, itemName, context, delimiter, sourceContextCookie, startLineNumber, flags, bWantResult):
# trace ("ParseScriptText", code[:20],"...", itemName, context, delimiter, sourceContextCookie, startLineNumber, flags, bWantResult)
if bWantResult or self.scriptState == axscript.SCRIPTSTATE_STARTED \
or self.scriptState == axscript.SCRIPTSTATE_CONNECTED \
or self.scriptState == axscript.SCRIPTSTATE_DISCONNECTED :
flags = flags | SCRIPTTEXT_FORCEEXECUTION
else:
flags = flags & (~SCRIPTTEXT_FORCEEXECUTION)
if flags & SCRIPTTEXT_FORCEEXECUTION:
# About to execute the code.
self.RegisterNewNamedItems()
return self.DoParseScriptText(code, sourceContextCookie, startLineNumber, bWantResult, flags)
#
# IActiveScriptParseProcedure
def ParseProcedureText( self, code, formalParams, procName, itemName, unkContext, delimiter, contextCookie, startingLineNumber, flags):
trace("ParseProcedureText", code, formalParams, procName, itemName, unkContext, delimiter, contextCookie, startingLineNumber, flags)
# NOTE - this is never called, as we have disabled this interface.
# Problem is, once enabled all even code comes via here, rather than AddScriptlet.
# However, the "procName" is always an empty string - ie, itemName is the object whose event we are handling,
# but no idea what the specific event is!?
# Problem is disabling this block is that AddScriptlet is _not_ passed
# <SCRIPT for="whatever" event="onClick" language="Python">
# (but even for those blocks, the "onClick" information is still missing!?!?!?)
# self.DoAddScriptlet(None, code, itemName, subItemName, eventName, delimiter,sourceContextCookie, startLineNumber)
return None
#
# IActiveScript
def SetScriptSite(self, site):
# We should still work with an existing site (or so MSXML believes :)
self.scriptSite = site
if self.debugManager is not None:
self.debugManager.Close()
import traceback
try:
import win32com.axdebug.axdebug # see if the core exists.
import debug
self.debugManager = debug.DebugManager(self)
except pythoncom.com_error:
# COM errors will occur if the debugger interface has never been
# seen on the target system
trace("Debugging interfaces not available - debugging is disabled..")
self.debugManager = None
except ImportError:
trace("Debugging extensions (axdebug) module does not exist - debugging is disabled..")
self.debugManager = None
except:
traceback.print_exc()
trace("*** Debugger Manager could not initialize - %s: %s" % (sys.exc_info()[0],sys.exc_info()[1]))
self.debugManager = None
try:
self.lcid = site.GetLCID()
except pythoncom.com_error:
self.lcid = win32api.GetUserDefaultLCID()
self.Reset()
def GetScriptSite(self, iid):
if self.scriptSite is None: raise Exception(scode=winerror.S_FALSE)
return self.scriptSite.QueryInterface(iid)
def SetScriptState(self, state):
#print "SetScriptState with %s - currentstate = %s" % (state_map.get(state),state_map.get(self.scriptState))
if state == self.scriptState: return
# If closed, allow no other state transitions
if self.scriptState==axscript.SCRIPTSTATE_CLOSED:
raise Exception(scode=winerror.E_INVALIDARG)
if state==axscript.SCRIPTSTATE_INITIALIZED:
# Re-initialize - shutdown then reset.
if self.scriptState in [axscript.SCRIPTSTATE_CONNECTED, axscript.SCRIPTSTATE_STARTED]:
self.Stop()
elif state==axscript.SCRIPTSTATE_STARTED:
if self.scriptState == axscript.SCRIPTSTATE_CONNECTED:
self.Disconnect()
if self.scriptState == axscript.SCRIPTSTATE_DISCONNECTED:
self.Reset()
self.Run()
self.ChangeScriptState(axscript.SCRIPTSTATE_STARTED)
elif state==axscript.SCRIPTSTATE_CONNECTED:
if self.scriptState in [axscript.SCRIPTSTATE_UNINITIALIZED,axscript.SCRIPTSTATE_INITIALIZED]:
self.ChangeScriptState(axscript.SCRIPTSTATE_STARTED) # report transition through started
self.Run()
if self.scriptState == axscript.SCRIPTSTATE_STARTED:
self.Connect()
self.ChangeScriptState(state)
elif state==axscript.SCRIPTSTATE_DISCONNECTED:
if self.scriptState == axscript.SCRIPTSTATE_CONNECTED:
self.Disconnect()
elif state==axscript.SCRIPTSTATE_CLOSED:
self.Close()
elif state==axscript.SCRIPTSTATE_UNINITIALIZED:
if self.scriptState == axscript.SCRIPTSTATE_STARTED:
self.Stop()
if self.scriptState == axscript.SCRIPTSTATE_CONNECTED:
self.Disconnect()
if self.scriptState == axscript.SCRIPTSTATE_DISCONNECTED:
self.Reset()
self.ChangeScriptState(state)
else:
raise Exception(scode=winerror.E_INVALIDARG)
def GetScriptState(self):
return self.scriptState
def Close(self):
# trace("Close")
if self.scriptState in [axscript.SCRIPTSTATE_CONNECTED, axscript.SCRIPTSTATE_DISCONNECTED]:
self.Stop()
if self.scriptState in [axscript.SCRIPTSTATE_CONNECTED, axscript.SCRIPTSTATE_DISCONNECTED, axscript.SCRIPTSTATE_INITIALIZED, axscript.SCRIPTSTATE_STARTED]:
pass # engine.close??
if self.scriptState in [axscript.SCRIPTSTATE_UNINITIALIZED, axscript.SCRIPTSTATE_CONNECTED, axscript.SCRIPTSTATE_DISCONNECTED, axscript.SCRIPTSTATE_INITIALIZED, axscript.SCRIPTSTATE_STARTED]:
self.ChangeScriptState(axscript.SCRIPTSTATE_CLOSED)
# Completely reset all named items (including persistent)
for item in self.subItems.itervalues():
item.Close()
self.subItems = {}
self.baseThreadId = -1
if self.debugManager:
self.debugManager.Close()
self.debugManager = None
self.scriptSite = None
self.scriptCodeBlocks = {}
self.persistLoaded = 0
def AddNamedItem(self, name, flags):
if self.scriptSite is None: raise Exception(scode=winerror.E_INVALIDARG)
try:
unknown = self.scriptSite.GetItemInfo(name, axscript.SCRIPTINFO_IUNKNOWN)[0]
dispatch = unknown.QueryInterface(pythoncom.IID_IDispatch)
except pythoncom.com_error:
raise Exception(scode=winerror.E_NOINTERFACE, desc="Object has no dispatch interface available.")
newItem = self.subItems[name] = self.GetNamedItemClass()(self, name, dispatch, flags)
if newItem.IsGlobal():
newItem.CreateConnections()
def GetScriptDispatch(self, name):
# Base classes should override.
raise Exception(scode=winerror.E_NOTIMPL)
def GetCurrentScriptThreadID(self):
return self.baseThreadId
def GetScriptThreadID(self, win32ThreadId):
if self.baseThreadId == -1:
raise Exception(scode=winerror.E_UNEXPECTED)
if self.baseThreadId != win32ThreadId:
raise Exception(scode=winerror.E_INVALIDARG)
return self.baseThreadId
def GetScriptThreadState(self, scriptThreadId):
if self.baseThreadId == -1:
raise Exception(scode=winerror.E_UNEXPECTED)
if scriptThreadId != self.baseThreadId:
raise Exception(scode=winerror.E_INVALIDARG)
return self.threadState
def AddTypeLib(self, uuid, major, minor, flags):
# Get the win32com gencache to register this library.
from win32com.client import gencache
gencache.EnsureModule(uuid, self.lcid, major, minor, bForDemand = 1)
# This is never called by the C++ framework - it does magic.
# See PyGActiveScript.cpp
#def InterruptScriptThread(self, stidThread, exc_info, flags):
# raise Exception("Not Implemented", scode=winerror.E_NOTIMPL)
def Clone(self):
raise Exception("Not Implemented", scode=winerror.E_NOTIMPL)
#
# IObjectSafety
# Note that IE seems to insist we say we support all the flags, even tho
# we dont accept them all. If unknown flags come in, they are ignored, and never
# reflected in GetInterfaceSafetyOptions and the QIs obviously fail, but still IE
# allows our engine to initialize.
def SetInterfaceSafetyOptions(self, iid, optionsMask, enabledOptions):
# trace ("SetInterfaceSafetyOptions", iid, optionsMask, enabledOptions)
if optionsMask & enabledOptions == 0:
return
# See comments above.
# if (optionsMask & enabledOptions & \
# ~(axscript.INTERFACESAFE_FOR_UNTRUSTED_DATA | axscript.INTERFACESAFE_FOR_UNTRUSTED_CALLER)):
# # request for options we don't understand
# RaiseAssert(scode=winerror.E_FAIL, desc="Unknown safety options")
if iid in [pythoncom.IID_IPersist, pythoncom.IID_IPersistStream, pythoncom.IID_IPersistStreamInit,
axscript.IID_IActiveScript, axscript.IID_IActiveScriptParse]:
supported = self._GetSupportedInterfaceSafetyOptions()
self.safetyOptions = supported & optionsMask & enabledOptions
else:
raise Exception(scode=winerror.E_NOINTERFACE)
def _GetSupportedInterfaceSafetyOptions(self):
return 0
def GetInterfaceSafetyOptions(self, iid):
if iid in [pythoncom.IID_IPersist, pythoncom.IID_IPersistStream, pythoncom.IID_IPersistStreamInit,
axscript.IID_IActiveScript, axscript.IID_IActiveScriptParse]:
supported = self._GetSupportedInterfaceSafetyOptions()
return supported, self.safetyOptions
else:
raise Exception(scode=winerror.E_NOINTERFACE)
#
# Other helpers.
def ExecutePendingScripts(self):
self.RegisterNewNamedItems()
self.DoExecutePendingScripts()
def ProcessScriptItemEvent(self, item, event, lcid, wFlags, args):
# trace("ProcessScriptItemEvent", item, event, lcid, wFlags, args)
self.RegisterNewNamedItems()
return self.DoProcessScriptItemEvent(item, event, lcid, wFlags, args)
def _DumpNamedItems_(self):
for item in self.subItems.itervalues():
item._dump_(0)
def ResetNamedItems(self):
# Due to the way we work, we re-create persistent ones.
existing = self.subItems
self.subItems = {}
for name, item in existing.iteritems():
item.Close()
if item.flags & axscript.SCRIPTITEM_ISPERSISTENT:
self.AddNamedItem(item.name, item.flags)
def GetCurrentSafetyOptions(self):
return self.safetyOptions
def ProcessNewNamedItemsConnections(self):
# Process all sub-items.
for item in self.subItems.itervalues():
if not item.createdConnections: # Fast-track!
item.CreateConnections()
def RegisterNewNamedItems(self):
# Register all sub-items.
for item in self.subItems.itervalues():
if not item.isRegistered: # Fast-track!
self.RegisterNamedItem(item)
def RegisterNamedItem(self, item):
item.Register()
def CheckConnectedOrDisconnected(self):
if self.scriptState in [axscript.SCRIPTSTATE_CONNECTED, axscript.SCRIPTSTATE_DISCONNECTED]:
return
RaiseAssert(winerror.E_UNEXPECTED, "Not connected or disconnected - %d" % self.scriptState)
def Connect(self):
self.ProcessNewNamedItemsConnections()
self.RegisterNewNamedItems()
self.ConnectEventHandlers()
def Run(self):
# trace("AXScript running...")
if self.scriptState != axscript.SCRIPTSTATE_INITIALIZED and self.scriptState != axscript.SCRIPTSTATE_STARTED:
raise Exception(scode=winerror.E_UNEXPECTED)
# self._DumpNamedItems_()
self.ExecutePendingScripts()
self.DoRun()
def Stop(self):
# Stop all executing scripts, and disconnect.
if self.scriptState == axscript.SCRIPTSTATE_CONNECTED:
self.Disconnect()
# Reset back to initialized.
self.Reset()
def Disconnect(self):
self.CheckConnectedOrDisconnected()
try:
self.DisconnectEventHandlers()
except pythoncom.com_error:
# Ignore errors when disconnecting.
pass
self.ChangeScriptState(axscript.SCRIPTSTATE_DISCONNECTED)
def ConnectEventHandlers(self):
# trace ("Connecting to event handlers")
for item in self.subItems.itervalues():
item.Connect()
self.ChangeScriptState(axscript.SCRIPTSTATE_CONNECTED);
def DisconnectEventHandlers(self):
# trace ("Disconnecting from event handlers")
for item in self.subItems.itervalues():
item.Disconnect()
def Reset(self):
# Keeping persistent engine state, reset back an initialized state
self.ResetNamedItems()
self.ChangeScriptState(axscript.SCRIPTSTATE_INITIALIZED)
def ChangeScriptState(self, state):
#print " ChangeScriptState with %s - currentstate = %s" % (state_map.get(state),state_map.get(self.scriptState))
self.DisableInterrupts()
try:
self.scriptState = state
try:
if self.scriptSite: self.scriptSite.OnStateChange(state)
except pythoncom.com_error, (hr, desc, exc, arg):
pass # Ignore all errors here - E_NOTIMPL likely from scriptlets.
finally:
self.EnableInterrupts()
# This stack frame is debugged - therefore we do as little as possible in it.
def _ApplyInScriptedSection(self, fn, args):
if self.debugManager:
self.debugManager.OnEnterScript()
if self.debugManager.adb.appDebugger:
return self.debugManager.adb.runcall(fn, *args)
else:
return fn(*args)
else:
return fn(*args)
def ApplyInScriptedSection(self, codeBlock, fn, args):
self.BeginScriptedSection()
try:
try:
# print "ApplyInSS", codeBlock, fn, args
return self._ApplyInScriptedSection(fn, args)
finally:
if self.debugManager: self.debugManager.OnLeaveScript()
self.EndScriptedSection()
except:
self.HandleException(codeBlock)
# This stack frame is debugged - therefore we do as little as possible in it.
def _CompileInScriptedSection(self, code, name, type):
if self.debugManager: self.debugManager.OnEnterScript()
return compile(code, name, type)
def CompileInScriptedSection(self, codeBlock, type, realCode = None):
if codeBlock.codeObject is not None: # already compiled
return 1
if realCode is None:
code = codeBlock.codeText
else:
code = realCode
name = codeBlock.GetFileName()
self.BeginScriptedSection()
try:
try:
codeObject = self._CompileInScriptedSection(RemoveCR(code), name, type)
codeBlock.codeObject = codeObject
return 1
finally:
if self.debugManager: self.debugManager.OnLeaveScript()
self.EndScriptedSection()
except:
self.HandleException(codeBlock)
# This stack frame is debugged - therefore we do as little as possible in it.
def _ExecInScriptedSection(self, codeObject, globals, locals = None):
if self.debugManager:
self.debugManager.OnEnterScript()
if self.debugManager.adb.appDebugger:
return self.debugManager.adb.run(codeObject, globals, locals)
else:
exec codeObject in globals, locals
else:
exec codeObject in globals, locals
def ExecInScriptedSection(self, codeBlock, globals, locals = None):
if locals is None: locals = globals
assert not codeBlock.beenExecuted, "This code block should not have been executed"
codeBlock.beenExecuted = 1
self.BeginScriptedSection()
try:
try:
self._ExecInScriptedSection(codeBlock.codeObject, globals, locals)
finally:
if self.debugManager: self.debugManager.OnLeaveScript()
self.EndScriptedSection()
except:
self.HandleException(codeBlock)
def _EvalInScriptedSection(self, codeBlock, globals, locals = None):
if self.debugManager:
self.debugManager.OnEnterScript()
if self.debugManager.adb.appDebugger:
return self.debugManager.adb.runeval(codeBlock, globals, locals)
else:
return eval(codeBlock, globals, locals)
else:
return eval(codeBlock, globals, locals)
def EvalInScriptedSection(self, codeBlock, globals, locals = None):
if locals is None: locals = globals
assert not codeBlock.beenExecuted, "This code block should not have been executed"
codeBlock.beenExecuted = 1
self.BeginScriptedSection()
try:
try:
return self._EvalInScriptedSection(codeBlock.codeObject, globals, locals)
finally:
if self.debugManager: self.debugManager.OnLeaveScript()
self.EndScriptedSection()
except:
self.HandleException(codeBlock)
def HandleException(self, codeBlock):
# NOTE - Never returns - raises a ComException
exc_type, exc_value, exc_traceback = sys.exc_info()
# If a SERVER exception, re-raise it. If a client side COM error, it is
# likely to have originated from the script code itself, and therefore
# needs to be reported like any other exception.
if IsCOMServerException(exc_type):
# Ensure the traceback doesnt cause a cycle.
exc_traceback = None
raise
# It could be an error by another script.
if issubclass(pythoncom.com_error, exc_type) and exc_value[0]==axscript.SCRIPT_E_REPORTED:
# Ensure the traceback doesnt cause a cycle.
exc_traceback = None
raise Exception(scode=exc_value[0])
exception = error.AXScriptException(self, \
codeBlock, exc_type, exc_value, exc_traceback)
# Ensure the traceback doesnt cause a cycle.
exc_traceback = None
result_exception = error.ProcessAXScriptException(self.scriptSite, self.debugManager, exception)
if result_exception is not None:
try:
self.scriptSite.OnScriptTerminate(None, result_exception)
except pythoncom.com_error:
pass # Ignore errors telling engine we stopped.
# reset ourselves to 'connected' so further events continue to fire.
self.SetScriptState(axscript.SCRIPTSTATE_CONNECTED)
raise result_exception
# I think that in some cases this should just return - but the code
# that could return None above is disabled, so it never happens.
RaiseAssert(winerror.E_UNEXPECTED, "Don't have an exception to raise to the caller!")
def BeginScriptedSection(self):
if self.scriptSite is None:
raise Exception(scode=winerror.E_UNEXPECTED)
self.scriptSite.OnEnterScript()
def EndScriptedSection(self):
if self.scriptSite is None:
raise Exception(scode=winerror.E_UNEXPECTED)
self.scriptSite.OnLeaveScript()
def DisableInterrupts(self):
pass
def EnableInterrupts(self):
pass
def GetNamedItem(self, name):
try:
return self.subItems[name]
except KeyError:
raise Exception(scode=winerror.E_INVALIDARG)
def GetNamedItemClass(self):
return ScriptItem
def _AddScriptCodeBlock(self, codeBlock):
self.scriptCodeBlocks[codeBlock.GetFileName()] = codeBlock
if self.debugManager:
self.debugManager.AddScriptBlock(codeBlock)
if __name__=='__main__':
print "This is a framework class - please use pyscript.py etc"
def dumptypeinfo(typeinfo):
return
attr = typeinfo.GetTypeAttr()
# Loop over all methods
print "Methods"
for j in xrange(attr[6]):
fdesc = list(typeinfo.GetFuncDesc(j))
id = fdesc[0]
try:
names = typeinfo.GetNames(id)
except pythoncom.ole_error:
names = None
doc = typeinfo.GetDocumentation(id)
print " ", names, "has attr", fdesc
# Loop over all variables (ie, properties)
print "Variables"
for j in xrange(attr[7]):
fdesc = list(typeinfo.GetVarDesc(j))
names = typeinfo.GetNames(id)
print " ", names, "has attr", fdesc
``` |
{
"source": "jkols99/Osy",
"score": 3
} |
#### File: Osy/tools/check_output.py
```python
import sys
PREFIX_BLOCK_EXPECTED = '[EXPECTED BLOCK]: '
PREFIX_EXPECTED = '[EXPECTED]: '
PREFIX_ACTUAL = '[ ACTUAL ]: '
def print_error(fmt, *args, **kwargs):
print(fmt.format(*args, **kwargs), file=sys.stderr)
def main():
expected_block = []
expected_block_line = 0
expected_block_frozen = False
expected = None
expected_line = 0
line_number = 0
exit_code = 0
for line in sys.stdin:
line = line.rstrip()
line_number = line_number + 1
if line.startswith(PREFIX_BLOCK_EXPECTED):
if expected_block_frozen:
print_error('Actual block on line {} ended too early on {} ("{}" not matched).',
expected_block_line, line_number - 1, expected_block[0])
expected_block = []
exit_code = 1
if not expected_block:
expected_block_line = line_number
expected_block.append(line[len(PREFIX_BLOCK_EXPECTED):])
expected_block_frozen = False
continue
else:
if expected_block:
expected_block_frozen = True
if expected_block:
if line != expected_block[0]:
print_error('Mismatch on lines {} and {} ("{}" != "{}")',
expected_block_line, line_number, expected_block[0], line)
exit_code = 1
expected_block = expected_block[1:]
expected_block_line = expected_block_line + 1
if not expected_block:
expected_block_frozen = False
continue
if line.startswith(PREFIX_EXPECTED):
if not expected is None:
print_error('Missing actual value for expected on line {} ({}).',
expected_line, expected)
exit_code = 1
expected = line[len(PREFIX_EXPECTED):]
expected_line = line_number
elif line.startswith(PREFIX_ACTUAL):
actual = line[len(PREFIX_ACTUAL):]
if expected is None:
print_error('Missing expected value for actual on line {} ({}',
line_number, actual)
exit_code = 1
continue
if actual != expected:
print_error('Mismatch on lines {} and {} ("{}" != "{}")',
expected_line, line_number, expected, actual)
exit_code = 1
expected = None
else:
# Skip other lines
pass
if expected_block:
print_error('Not enough actual lines for expected block at {} ("{}" not mached).',
expected_block_line, expected_block[0])
return exit_code
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jkom-cloud/heartbeat",
"score": 2
} |
#### File: heartbeat/heartbeat/__init__.py
```python
import time
import json
import raven
with open('./credentials.json') as f:
credentials = json.load(f)
dsn = 'https://99da3efb421b47a5ab6d0469327d2b7d:[email protected]/12'
raven_client = raven.Client(dsn)
def loop_executor(func, interval, run_event, **kwargs):
"""execute the given func in a ctrl-c interuptable loop
:func: the function to be executed
:interval: seconds between executions
:run_event: the event that controls the loop to stop
:kwargs: keyword args of func
"""
while run_event.is_set():
try:
func(**kwargs)
print('{} - {} - {} - OK'.format(
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
func.__name__,
kwargs,
))
except Exception as err:
# report every possible exception to Sentry
msg = '{} - {} - {} - FAIL: {}'.format(
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
func.__name__,
kwargs,
err,
)
print(msg)
raven_client.captureMessage(msg, level='error')
finally:
# elegantly stops the sleep function
for i in range(interval):
if run_event.is_set():
time.sleep(1)
``` |
{
"source": "jkomiyama/fairregresion",
"score": 3
} |
#### File: fairregresion/src/compasdata.py
```python
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import time
import datetime
import sys
import os
import copy
import itertools
from sklearn import svm
from sklearn import tree
from sklearn import ensemble
from sklearn import linear_model
from sklearn import metrics
from sklearn import model_selection
from sklearn import preprocessing
import pandas as pd
#from statsmodels.discrete import discrete_model
import math
import random
from io import open
import conf
def strip(text):
try:
return text.strip()
except AttributeError:
return text
def read_compas(filename = os.path.join(conf.datadir, "compas-analysis/compas-scores-two-years.csv"), smlfeatures=False, return_all=False, single_S=False): #read compas dataset file (numeric ver)
lines = [line for line in open(filename, "r").readlines() if line.find("?")==-1]
fo = open(filename, "w")
for line in lines:
fo.write(line)
fo.close()
#pd.set_option("display.max_rows", 100)
#pd.set_option("display.max_colwidth", 100)
#print dir(pd)
data = pd.read_csv(filename, sep=',')
int_values = ["age","juv_fel_count","decile_score","juv_misd_count","juv_other_count","v_decile_score","priors_count"] #,"is_recid"
#string_values = ["sex","race","two_year_recid","c_charge_degree","c_charge_desc"]
string_values = ["sex","two_year_recid","type_of_assessment","v_type_of_assessment"]#,"r_charge_desc"]
date_values=["c_jail_in","c_jail_out","c_offense_date","screening_date","in_custody","out_custody"]
my_attrs = []
for int_val in int_values:
my_attrs.append(data[int_val])
for string_val in string_values:
my_attrs.append( pd.get_dummies(data[string_val], prefix=string_val, drop_first=True) )
for date_val in date_values:
temp = pd.to_datetime(data[date_val])
t_min, t_max = min(temp), max(temp)
my_attrs.append( (temp-t_min)/(t_max-t_min) )
new_data = pd.concat(my_attrs, axis=1)
new_data["African-American"] = (data["race"] == "African-American")
new_data = new_data.dropna()
if return_all:
return new_data
new_data.insert(0, "intercept", 1)
corr_akey = []
for akey in new_data.keys():
corr_akey.append((np.corrcoef(new_data[akey], new_data["two_year_recid_1"])[0,1], akey))
if single_S:
S_keys = ["sex_Male"]
else:
S_keys = ["sex_Male", "African-American"]
#race_Native American race_Asian race_Other race_Hispanic race_Caucasian
S = np.transpose([list(new_data[i]) for i in S_keys])
#S = np.array(S, dtype=np.int_)*2-1
y = [v*2.0-1.0 for v in new_data["two_year_recid_1"]]
X_keys = set(new_data.keys()).difference([]+S_keys)
X_keys_nonrace = set()
for akey in X_keys:
if akey.find("race") != 0:
X_keys_nonrace.add(akey)
X_keys = X_keys_nonrace
print("X_keys=",len(X_keys),X_keys)
#print list(race.keys())
#X2_keys = set()
X2_keys = set(["intercept"]).intersection(X_keys)
print("X2 keys=",X2_keys)
X2 = np.transpose([list(new_data[i]) for i in X2_keys])
#print("X2=",str(X2))
X2 = np.array(X2).reshape([len(new_data),len(X2_keys)])
#print "X2=",X2.shape
#print "X2=",X2
X1_keys = X_keys.difference(X2_keys.union(set(["two_year_recid_1"])))
if smlfeatures:
X1_keys = X1_keys.difference(set(["out_custody","decile_score","in_custody","c_jail_out","c_jail_in","screening_date","v_decile_score"]))
X1 = np.transpose([list(new_data[i]) for i in X1_keys])
print("X1 keys=",X1_keys)
#sys.exit()
#print "S=",S[:10]
return np.array(S), np.array(X1), np.array(X2), np.array(y)
if __name__ == '__main__':
read_compas()
``` |
{
"source": "jkomiyama/multiplaybanditlib",
"score": 3
} |
#### File: jkomiyama/multiplaybanditlib/simpleplot.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib as mpl
import sys, os, copy, math, re
SHOW_THEORETICAL = True
def thin(anarray):
i = 1.0
retarray = []
while(i<=len(anarray)):
retarray.append(anarray[int(i)-1])
i*=1.2
if int(i) != len(anarray):
retarray.append(anarray[-1])
return retarray
def readFile(afile):
lines = [line.strip() for line in file(afile, "r").readlines()]
algs = []
data = []
for line in lines:
if line[0]=="#":
if line.find("#policy") != -1:
algs.append(line.split(" ")[2])
else:
sps = line.split(" ")
data.append(sps)
data = map(list, zip(*data)) #transpose 2d array
data = map(thin, data) #reduce data points to speed-up plotting
return (algs, data)
def getLabel(alg):
if alg == "Random": return False
elif alg == "UCB": return "CUCB"
elif alg == "TS(basic)": return "MP-TS"
elif alg == "TS(extended)": return "IMP-TS"
elif alg == "KL-UCB(basic)": return "MP-KL-UCB"
elif alg == "KL-UCB(extended)": return "IMP-KL-UCB"
else:
return alg
def getMarker(alg):
if alg == "theoretical1": return ":"
elif alg == "Random": return "->"
elif alg == "Exp3M": return "-s"
elif alg == "UCB": return "-o"
elif alg == "TS(basic)": return "-"
elif alg == "TS(extended)": return "-d"
elif alg == "KL-UCB(basic)": return "-v"
elif alg == "KL-UCB(extended)": return "-"
else:
return "-"
def getColor(alg):
if alg == "theoretical1": return "0.25"
elif alg == "Random": return "0.50"
elif alg == "Exp3M": return "r"
elif alg == "UCB": return "y"
elif alg == "TS(basic)": return "k"
elif alg == "TS(extended)": return "b"
elif alg == "KL-UCB(basic)": return "g"
elif alg == "KL-UCB(extended)": return "0.5"
else:
return "0.50"
def getLineWidth(alg):
if alg == "theoretical1": return 2
elif alg == "theoretical2": return 2
elif alg == "Random": return 1
elif alg == "Exp3M": return 1
elif alg == "UCB": return 1
elif alg == "TS(basic)": return 2
elif alg == "TS(extended)": return 1
elif alg == "KL-UCB(basic)": return 1
elif alg == "KL-UCB(extended)": return 1
else:
return 2
def markers(i):
print "i=",i
markers = ("--", "-", "-.", ".", "+")
return markers[i]
def plotRegret((algs, data), mode=0):
#plt.figure(figsize=(4,3))
#plt.subplots_adjust(left=0.2, right=0.9, top=0.9, bottom=0.2)
plt.xlabel("t: round")
plt.ylabel("R(t): regret")
plt.xscale('log')
count=0
if SHOW_THEORETICAL:
plt.plot(data[0], data[1], ":", label="LB", color=getColor("theoretical1"), linewidth=getLineWidth("theoretical1"))
print "lendata=",len(data)
for l in range(4, len(data), 2):
i=l/2-2
print i,l
print algs[i]
label = getLabel(algs[i])
if label:
plt.plot(data[0], data[l], getMarker(algs[i]), color=getColor(algs[i]), label=label, linewidth=getLineWidth(algs[i]))
count+=1
plt.legend(loc="upper left", fontsize=12)
if mode==0:
plt.savefig('regret.pdf')
else:
plt.savefig('regret'+str(mode)+'.pdf')
plt.show()
if __name__ == '__main__':
if len(sys.argv)>=2:
afile = sys.argv[1]
result = readFile(afile)
plotRegret(result)
else:
print "usage: this (filename)"
``` |
{
"source": "JKomskis/eva",
"score": 3
} |
#### File: JKomskis/eva/process_mnist_dataset.py
```python
import numpy as np
import gzip
import struct
from PIL import Image
import os
import cv2
import glob
image_size = 28
# Based on code from https://gist.github.com/xkumiyu/c93222f2dce615f4b264a9e71f6d49e0
def create_img_files(input_path, image_path):
images = None
with gzip.open(input_path) as file:
file.read(4)
N, = struct.unpack('>i', file.read(4))
file.read(8)
images = np.empty((N, 784), dtype=np.uint8)
for i in range(N):
for j in range(784):
images[i, j] = ord(file.read(1))
os.makedirs(image_path, exist_ok=True)
for (i, image) in enumerate(images):
filepath = f'{image_path}/{i}.jpg'
Image.fromarray(image.reshape(image_size, image_size)).save(filepath)
def create_video_from_images(image_path, output_file):
writer = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'avc1'), 30, (image_size, image_size))
for filename in glob.glob(f'{image_path}/*'):
img = cv2.imread(filename)
for i in range(0, 6):
writer.write(img)
if __name__ == "__main__":
input_path = 'data/MNIST/raw/train-images-idx3-ubyte.gz'
image_path = 'data/MNIST/processed/train'
create_img_files(input_path, image_path)
create_video_from_images(image_path, 'data/MNIST/train_long.mp4')
```
#### File: src/udfs/mnist_digit_detector.py
```python
from typing import List, Dict
import pandas as pd
import torchvision
import numpy as np
import os
from random import Random
from torch import Tensor
import torch
from src.models.catalog.frame_info import FrameInfo
from src.models.catalog.properties import ColorSpace
from src.udfs.pytorch_abstract_udf import PytorchAbstractUDF
from src.configuration.dictionary import EVA_DIR
from src.utils.logging_manager import LoggingManager, LoggingLevel
class MNISTDigitDetector(PytorchAbstractUDF):
"""
Arguments:
threshold (float): Threshold for classifier confidence score
"""
@property
def name(self) -> str:
return "mnistnn"
def __init__(self, threshold=0.85):
super().__init__()
self.threshold = threshold
custom_model_path = os.path.join(EVA_DIR, "data", "models", "mnist.pt")
self.model = torch.load(custom_model_path)
self.model.eval()
@property
def input_format(self) -> FrameInfo:
return FrameInfo(-1, -1, 3, ColorSpace.RGB)
@property
def labels(self) -> List[str]:
return [
'0',
'1',
'2',
'3',
'4',
'5',
'6',
'7',
'8',
'9'
]
def _get_predictions(self, frames: Tensor) -> pd.DataFrame:
"""
Performs predictions on input frames
Arguments:
frames (np.ndarray): Frames on which predictions need
to be performed
Returns:
tuple containing predicted_classes (List[List[str]]),
predicted_scores (List[List[float]])
"""
frames = torch.narrow(frames, 1, 0, 1)
frames = frames.view(frames.shape[0], -1)
predictions = self.model(frames)
outcome_list = []
for prediction in predictions:
ps = torch.exp(prediction)
probab = list(ps.detach().numpy())
label = str(probab.index(max(probab)))
outcome_list.append(
{
"labels": [label],
"scores": [max(probab)],
})
return pd.DataFrame.from_dict(outcome_list)
``` |
{
"source": "jkomyno/amplrestapi",
"score": 2
} |
#### File: amplrestapi/amplrestapi/http_validation_error.py
```python
from aiohttp.web_exceptions import HTTPClientError
class HTTPValidationError(HTTPClientError):
status_code = 422
def __init__(self, reason: str):
super().__init__(reason=reason)
```
#### File: amplrestapi/amplwrapper/ampl_error_handler.py
```python
from amplpy import ErrorHandler
import logging
class AMPLErrorHandler(ErrorHandler):
def error(self, exception):
logging.log(logging.DEBUG, 'AMPL Error:', exception.getMessage())
def warning(self, exception):
logging.log(logging.DEBUG, 'AMPL Warning:', exception.getMessage())
``` |
{
"source": "jkomyno/bioalgo-QCluster-vs-isONclust",
"score": 3
} |
#### File: python/preprocess/preprocess.py
```python
import random
import heapq
import numpy as np
from Bio import SeqIO
from itertools import groupby, tee
from typing import List, Tuple, Dict
from .functional import fst, snd
def get_key_length_pairs(input_fasta_file) -> List[Tuple[str, int]]:
"""
Given a fasta file, it computes the (key, length) list of pairs for each sequence,
where key is the sequence identifier and length is the length of the sequence.
The file is accessed in a streaming fashion, to avoid memory issues.
"""
return [
(get_accessor(seq_record.description), len(seq_record))
for seq_record in SeqIO.parse(input_fasta_file, 'fasta')
]
def cluster_by_std_dev(key_length_pairs: List[Tuple[str, int]], threshold: float) -> Dict[int, List[Tuple[str, int]]]:
"""
Group the lengths in `key_length_pairs` according to their standard deviation.
Two pairs are in the same cluster if the distance between their lengths is no more
than the given standard deviation threshold removed from the mean distances between
the lengths in the entire group.
The result of this function is a key-value map where the key indicates the cluster id,
and the value is the list of (sequence key, sequence length) pairs whose lengths are
not too different from each other, according to the standard deviation criterion.
"""
def compute_cluster_by_std_dev(key_length_pairs: List[Tuple[str, int]], threshold: float, key=snd):
# sort the key length pairs according to their length
data = sorted(key_length_pairs, key=key)
# calculate the standard deviation of the gaps between two consecutive values
# in the data
std_dev = np.std([key(y) - key(x) for x, y in zip(data[:-1], data[1:])])
# the first element belongs to the first cluster id, 0
cluster_id = 0
prev = data[0]
yield cluster_id, prev
for x in data[1:]:
# if the gap from the current value to the previous is more than the given
# standard deviation threshold, then create a new cluster
if (key(x) - key(prev)) / std_dev > threshold:
cluster_id += 1
prev = x
yield cluster_id, x
# key-value map where the key `c` indicates the cluster id, and the value is the list of
# (sequence key, sequence length) pairs whose lengths are not too different from each
# other, according to the standard deviation criterion
cluster_dict = {
c: [snd(x) for x in v]
for c, v in groupby(compute_cluster_by_std_dev(key_length_pairs, threshold=threshold), key=fst)
}
return cluster_dict
def select_best_sequences_from_clusters(cluster_dict: Dict[int, List[Tuple[str, int]]]) -> List[Tuple[str, int]]:
"""
Retrieve the best sequences keys and lengths from a given cluster dictionary.
"Best" means that the cluster with the third highest cardinality is preferred.
This is because the cluster with the highest cardinality is composed of very short sequences,
and the elements of the third cluster have a slightly inferior gap compared to the elements
in the second cluster.
"""
def select_best_indexes(data):
indexes = heapq.nlargest(4, range(len(data)), data.__getitem__)
return indexes
data = list(map(len, cluster_dict.values()))
lst = list(map(lambda x: (len(x), x), cluster_dict.values()))
best_indexes = select_best_indexes(data)
first_best_idx, second_best_idx, third_best_idx, fourth_best_idx \
= best_indexes[0], best_indexes[1], best_indexes[2], best_indexes[3]
first_best_len, first_best_list = lst[first_best_idx]
print(f'1st best cluster: ({first_best_len} elements)')
print(f' Min length: {min(first_best_list, key=snd)}')
print(f' Max length: {max(first_best_list, key=snd)}')
print('')
second_best_len, second_best_cluster = lst[second_best_idx]
print(f'2nd best cluster: ({second_best_len} elements)')
print(f' Min length: {min(second_best_cluster, key=snd)}')
print(f' Max length: {max(second_best_cluster, key=snd)}')
print('')
third_best_len, third_best_cluster = lst[third_best_idx]
print(f'3rd best cluster: ({third_best_len} elements)')
print(f' Min length: {min(third_best_cluster)}')
print(f' Max length: {max(third_best_cluster)}')
print('')
fourth_best_len, fourth_best_cluster = lst[fourth_best_idx]
print(f'4th best cluster: ({fourth_best_len} elements)')
print(f' Min length: {min(fourth_best_cluster)}')
print(f' Max length: {max(fourth_best_cluster)}')
print('')
return third_best_cluster
def get_accessor(identifier: str) -> str:
"""
Given a SeqRecord identifier string, return the access number as a string.
e.g. "ENSG00000004776|ENSG00000004776.13|ENST00000004982|ENST00000004982.6" -> "ENST00000004982.6"
"""
parts = identifier.split('|')
assert len(parts) == 4
return parts[3]
def save_best_sequences(best_sequences: List[Tuple[str, int]], n_sequences_to_keep: int,
input_fasta_file: str, output_fasta_file: str):
fasta_header_dict = SeqIO.index(input_fasta_file, 'fasta', key_function=get_accessor)
# extract `n_sequences_to_keep` unique header keys from `best_sequences`
keys_to_extract: List[str] = random.sample(list(map(fst, best_sequences)), n_sequences_to_keep)
# write the selected sequences to `output_fasta_file`
with open(output_fasta_file, 'wb') as f:
for key in keys_to_extract:
seq_record = fasta_header_dict[key]
print(f'>{seq_record.description}')
f.write(fasta_header_dict.get_raw(key))
def preprocess(input_fasta_file: str, output_fasta_file: str,
n_sequences_to_keep: int, std_dev_threshold: float,
seed: int):
random.seed(seed)
key_length_pairs = get_key_length_pairs(input_fasta_file)
cluster_dict = cluster_by_std_dev(key_length_pairs, threshold=std_dev_threshold)
best_sequences = select_best_sequences_from_clusters(cluster_dict)
save_best_sequences(best_sequences, n_sequences_to_keep, \
input_fasta_file, output_fasta_file)
```
#### File: quality/metrics/external.py
```python
import numpy as np
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import fowlkes_mallows_score
from sklearn.metrics.cluster import contingency_matrix
from typing import List
from . import ExternalEvaluation
def compute_external_metrics(labels_true: List[str], labels_pred: List[int]) -> ExternalEvaluation:
if len(labels_true) == 0 and len(labels_pred) == 0:
return None
homogeneity, completeness, v_measure = homogeneity_completeness_v_measure(labels_true, labels_pred)
adjusted_mutual_info = adjusted_mutual_info_score(labels_true, labels_pred)
adjusted_rand_index = adjusted_rand_score(labels_true, labels_pred)
fowlkes_mallows = fowlkes_mallows_score(labels_true, labels_pred)
mat = contingency_matrix(labels_true, labels_pred)
purity = purity_score(mat)
inverse_purity = purity_score(mat, inverse=True)
return ExternalEvaluation(homogeneity=homogeneity, completeness=completeness, v_measure=v_measure,
adjusted_mutual_information=adjusted_mutual_info, adjusted_rand_index=adjusted_rand_index,
fowlkes_mallows=fowlkes_mallows, purity=purity, inverse_purity=inverse_purity)
def purity_score(mat, inverse=False):
"""
Compute purity or inverse purity score.
"""
axis = 0 if inverse else 1
return np.sum(np.amax(mat, axis=axis)) / np.sum(mat)
```
#### File: python/quality/quality.py
```python
import pandas as pd
from os import path
from pathlib import Path
from pysam import AlignmentFile
from collections import defaultdict
from typing import Dict, Set, Tuple, Iterator
from . import isonclust
from . import qcluster
from . import random_cluster
from . import metrics
def parse_true_clusters(args) -> Tuple[Dict[str, str], Dict[str, Set[str]]]:
ground_truth_filename = path.join(args.data, 'simulated', args.simulated, 'simulated.sam')
# reference_filename = path.join(args.data, 'preprocess', 'preprocessed.fasta')
ref_file = AlignmentFile(ground_truth_filename, mode='r', check_sq=True)
# reference_filename=reference_filename)
classes = defaultdict(dict)
chromosome_to_read_ids_map = defaultdict(set)
for read in ref_file.fetch(until_eof=True):
header = read.query_name
# e.g. 'm96770/100/CCS'
read_id = header.split(' ')[0]
# e.g. 'ENSG00000070061|ENSG00000070061.16|ENST00000674938|ENST00000674938.1'
chromosome = header.split(';')[3].split('=')[1]
classes[read_id] = chromosome
chromosome_to_read_ids_map[chromosome].add(read_id)
return classes, chromosome_to_read_ids_map
def compute_trivial_classes(chromosome_to_read_ids_map: Dict[str, Set[str]],
threshold: int) -> Iterator[str]:
"""
A class is considered trivial if a chromosome has been used to generate
at most `threshold` sequences.
"""
for chromosome in chromosome_to_read_ids_map:
read_ids = chromosome_to_read_ids_map[chromosome]
l = len(read_ids)
if l <= threshold:
yield chromosome
def compute_trivial_clusters(cluster_id_to_read_ids_map: Dict[int, Set[str]],
threshold: int) -> Iterator[str]:
"""
A cluster is considered trivial if it contains at most `threshold` sequences.
"""
for cluster_id in cluster_id_to_read_ids_map:
read_ids = cluster_id_to_read_ids_map[cluster_id]
l = len(read_ids)
if l <= threshold:
yield cluster_id
def quality(args):
"""
args.data: Location of the data folder
args.simulated: Name of the simulated dataset
args.result: Location of the cluster result
args.threshold: Clusters which contain at most `threshold` sequences are considered trivial
args.tool: 'isONclust' | 'qCluster' | 'random_cluster'
"""
"""
{
'm99998/100/CCS': 'ENSG00000100150|ENSG00000100150.20|ENST00000646515|ENST00000646515.1',
'm99999/100/CCS': 'ENSG00000187866|ENSG00000187866.10|ENST00000394264|ENST00000394264.7'
}
"""
classes, chromosome_to_read_ids_map = parse_true_clusters(args)
# by simulation we know classes of all reads, they are therefore the same number
tot_nr_reads = len(classes)
tool = args.tool
"""
clusters = {
'm99998/100/CCS': 234,
'm99999/100/CCS': 102,
...
}
"""
if tool == 'isONclust':
clusters, cluster_id_to_read_ids_map, k = isonclust.read_inferred_clusters(args)
elif tool == 'qCluster':
clusters, cluster_id_to_read_ids_map, k = qcluster.read_inferred_clusters(args)
elif tool == 'random_cluster':
clusters, cluster_id_to_read_ids_map, k = random_cluster.read_inferred_clusters(args)
cluster_stats = metrics.compute_cluster_stats(k, cluster_id_to_read_ids_map)
print(f'cluster_stats:{cluster_stats}')
trivial_class_chromosomes = set(compute_trivial_classes(chromosome_to_read_ids_map, threshold=args.threshold))
print(f'# trivial classes: {len(trivial_class_chromosomes)}')
assert len(trivial_class_chromosomes) == 0
labels_true, labels_pred = metrics.compute_cluster_labels(clusters, classes)
external_evaluation = metrics.compute_external_metrics(labels_true, labels_pred)
print(f'External evaluation: {external_evaluation}\n')
if external_evaluation is not None:
df = create_quality_dataframe(external_evaluation=external_evaluation,
cluster_stats=cluster_stats)
write_quality_dataframe_to_csv(df, args)
if tool == 'random_cluster':
return
# metrics for singleton clusters
singleton_cluster_ids = set(compute_trivial_clusters(cluster_id_to_read_ids_map, threshold=1))
print(f'# singleton clusters: {len(singleton_cluster_ids)}')
labels_true_no_singleton, labels_pred_no_singleton = metrics.compute_cluster_labels(clusters, classes, without=singleton_cluster_ids)
external_evaluation_no_singleton = metrics.compute_external_metrics(labels_true_no_singleton, labels_pred_no_singleton)
print(f'External evaluation (no singleton): {external_evaluation_no_singleton}\n')
if external_evaluation_no_singleton is not None:
df = create_quality_dataframe(external_evaluation=external_evaluation_no_singleton,
cluster_stats=cluster_stats)
write_quality_dataframe_to_csv(df, args, prefix='no_singleton_')
# metrics for trivial clusters wrt `args.threshold`
trivial_cluster_ids = set(compute_trivial_clusters(cluster_id_to_read_ids_map, threshold=args.threshold))
print(f'# trivial clusters: {len(trivial_cluster_ids)}')
labels_true_no_trivial, labels_pred_no_trivial = metrics.compute_cluster_labels(clusters, classes, without=trivial_cluster_ids)
external_evaluation_no_trivial = metrics.compute_external_metrics(labels_true_no_trivial, labels_pred_no_trivial)
print(f'External evaluation (no trivial): {external_evaluation_no_trivial}\n')
if external_evaluation_no_trivial is not None:
df = create_quality_dataframe(external_evaluation=external_evaluation_no_trivial,
cluster_stats=cluster_stats)
write_quality_dataframe_to_csv(df, args, prefix='no_trivial_')
# number of clusters by quality types
n_clusters = k
n_clusters_trivial = len(trivial_cluster_ids)
n_clusters_singleton = len(singleton_cluster_ids)
df = create_n_clusters_dataframe(n_clusters, n_clusters_trivial, n_clusters_singleton)
write_n_clusters_dataframe_to_csv(df, args)
def create_n_clusters_dataframe(n_clusters: int,
n_clusters_trivial: int,
n_clusters_singleton: int) -> pd.DataFrame:
data = {
'k': [n_clusters],
'k_non_trivial': [n_clusters - n_clusters_trivial],
'k_trivial': [n_clusters_trivial],
'k_singleton': [n_clusters_singleton],
}
df = pd.DataFrame.from_dict(data)
return df
def write_n_clusters_dataframe_to_csv(df: pd.DataFrame, args):
csv_filename = f'{args.result}_n_clusters.csv'
csv_path = path.join(args.data, 'quality', args.tool, args.simulated)
Path(csv_path).mkdir(parents=True, exist_ok=True)
df.to_csv(path.join(csv_path, csv_filename), sep=',', index=False,
encoding='utf-8', decimal='.')
def write_quality_dataframe_to_csv(df: pd.DataFrame, args, prefix: str = ''):
csv_prefix = f'{prefix}{args.result}'
if len(csv_prefix) > 0:
csv_prefix = f'{csv_prefix}_'
csv_filename = f'{csv_prefix}quality.csv'
csv_path = path.join(args.data, 'quality', args.tool, args.simulated)
Path(csv_path).mkdir(parents=True, exist_ok=True)
df.to_csv(path.join(csv_path, csv_filename), sep=',', index=False,
encoding='utf-8', decimal='.')
def create_quality_dataframe(external_evaluation: metrics.ExternalEvaluation,
cluster_stats: metrics.ClusterStats) -> pd.DataFrame:
metrics_data = {
metric_name: [metric_value] for (metric_name, metric_value) in external_evaluation
}
cluster_stats_data = {
stat_name: [stat_value] for (stat_name, stat_value) in cluster_stats
}
data = {
**metrics_data,
**cluster_stats_data,
}
df = pd.DataFrame.from_dict(data)
return df
``` |
{
"source": "jkomyno/combinatorial-optimization-tsp",
"score": 4
} |
#### File: python/benchmark/ParameterGrid.py
```python
from itertools import product
class ParameterGrid:
"""
Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations.
"""
def __init__(self, grid_params):
# sort keys of a dictionary for reproducibility
self.items = sorted(grid_params.items())
def __iter__(self):
"""
Iterate over the points in the grid.
"""
# sort keys of a dictionary for reproducibility
if not self.items:
yield {}
else:
keys, values = zip(*self.items)
# for each entry v of the cartesian product
for v in product(*values):
params = dict(zip(keys, v))
yield params
```
#### File: python/calibrate/calibrate_final_tuning.py
```python
from python.calibrate.HyperParameters import HyperParameters
from . import utils
import math
import itertools
import subprocess
import pandas as pd
import numpy as np
from os import path
from glob import iglob
from pymoo.algorithms.nsga2 import NSGA2
from pymoo.operators.mixed_variable_operator import MixedVariableSampling
from pymoo.operators.mixed_variable_operator import MixedVariableMutation
from pymoo.operators.mixed_variable_operator import MixedVariableCrossover
from pymoo.model.problem import Problem
from pymoo.factory import get_sampling
from pymoo.factory import get_crossover
from pymoo.factory import get_mutation
TIMEOUT_MS = 60000
ITERATIONS_TIME = '06:00:00'
EXECUTIONS_FOR_ITERATION = 50
var2idx = {
var: i for i, var in enumerate(['max_gen_no_improvement',
'max_gen'])
}
def get_var(x, var):
return x[var2idx[var]]
mask = [
'int', # (0) max_gen_no_improvement
'int', # (1) max_gen
]
bounds = [
(50, 200), # (0) max_gen_no_improvement
(50, 500), # (1) max_gen
]
lower_bounds, upper_bounds = utils.lower_upper_bounds(bounds)
class MetaProblem(Problem):
def __init__(self, args, hyperparameters: HyperParameters, **kwargs):
super().__init__(n_var=len(mask),
n_obj=2,
n_constr=2,
xl=lower_bounds,
xu=upper_bounds,
elementwise_evaluation=True,
**kwargs)
self.args = args
self.hyperparameters = hyperparameters
def _evaluate(self, x, out, *args, **kwargs):
##########################
# minimization functions #
##########################
# we want to minimize our average solutions and the probability of spikes in those solutions
average, stddev = run_ex2_metaheuristic(self.args, x, self.hyperparameters)
###############
# constraints #
###############
max_gen_no_improvement = get_var(x, 'max_gen_no_improvement')
max_gen = get_var(x, 'max_gen')
# max_gen_no_improvement < max_gen
max_gen_no_improvementt_lt_max_gen = max_gen_no_improvement - max_gen - 1
###########
# outputs #
###########
# 'F' represents the problem objectives
out['F'] = [average, stddev]
# 'G' represents the problem constraints in the form g(x) <= 0
out['G'] = [max_gen_no_improvementt_lt_max_gen]
def get_ex2_metaheuristic_result(args, dataset: str, hyperparameters: HyperParameters,
max_gen_no_improvement: int, max_gen: int):
timeout_ms = TIMEOUT_MS
cmd = list(map(str, [
args.program,
'--timeout-ms', timeout_ms,
'--filename', dataset,
'--mutation-probability', hyperparameters.mutation_probability,
'--crossover-rate', hyperparameters.crossover_rate,
'--mu', hyperparameters.mu,
'--lambda', hyperparameters.lambda_,
'-k', hyperparameters.k,
'--max-gen-no-improvement', max_gen_no_improvement,
'--max-gen', max_gen,
]))
# execute program and capture its output
with subprocess.Popen(cmd, stdout=subprocess.PIPE, bufsize=1,
universal_newlines=True) as p:
stdout = itertools.islice(p.stdout, 4, None)
for line in stdout:
if not line.lstrip().startswith('#'):
break
stdout = itertools.islice(stdout, 4, None)
solution_raw = next(stdout).split(': ')[1]
solution = int(float(solution_raw))
return solution
def run_ex2_metaheuristic(args, x, hyperparameters: HyperParameters):
max_gen_no_improvement = get_var(x, 'max_gen_no_improvement')
max_gen = get_var(x, 'max_gen')
if max_gen_no_improvement >= max_gen:
return math.inf, math.inf
solutions = [
get_ex2_metaheuristic_result(args, dataset, hyperparameters, max_gen_no_improvement, max_gen)
for dataset in iglob(path.join(args.datasets, '*.tsp'))
]
average = np.mean(solutions)
stddev = np.std(solutions)
return average, stddev
def calibrate_final_tuning(args, pool, hyperparameters: HyperParameters):
sampling = MixedVariableSampling(mask, {
# Integer numbers are sampled via Uniform Random Sampling
'int': get_sampling('int_random')
})
crossover = MixedVariableCrossover(mask, {
'int': get_crossover('real_sbx', prob=0.9, eta=3.0)
})
mutation = MixedVariableMutation(mask, {
# Integer numbers are mutated via polynomial mutation
'int': get_mutation('int_pm', eta=3.0)
})
problem = MetaProblem(args, hyperparameters=hyperparameters, parallelization=None)
algorithm = NSGA2(
pop_size=EXECUTIONS_FOR_ITERATION,
sampling=sampling,
crossover=crossover,
mutation=mutation,
eliminate_duplicates=True,
)
termination = utils.get_termination_for_final_tuning(time=ITERATIONS_TIME)
res, \
best_solution, \
min_average, \
min_stddev = utils.get_minimizer(problem, algorithm, termination)
save_csv(args, best_solution, min_average, min_stddev)
def save_csv(args, best_solution, min_average, min_stddev):
max_gen_no_improvement = get_var(best_solution, 'max_gen_no_improvement')
max_gen = get_var(best_solution, 'max_gen')
print(f'max_gen_no_improvement: {max_gen_no_improvement}')
print(f'max_gen: {max_gen}')
print(f'min_average: {min_average}')
print(f'min_stddev: {min_stddev}')
df = pd.DataFrame.from_records([{
'max_gen_no_improvement': max_gen_no_improvement,
'max_gen': max_gen,
'min_average': min_average,
'min_stddev': min_stddev,
}])
df.to_csv(path.join(args.output, f'calibration_final_tuning.csv'), sep=',', index=False,
encoding='utf-8', decimal='.')
``` |
{
"source": "jkomyno/lattice-submodular-maximization",
"score": 3
} |
#### File: benchmark/algo/SGL_a.py
```python
import numpy as np
from nptyping import NDArray
from typing import Tuple
from ..objective import Objective
from .. import utils
def SGL_a(rng: np.random.Generator,
f: Objective, r: int, eps: float) -> Tuple[NDArray[int], float]:
"""
Randomized algorithm for integer-lattice submodular maximization of monotone functions with cardinality
constraints in linear time.
This is a generalization of the StochasticGreedy algorithm for set-submodular monotone functions.
:param rng: numpy random generator instance
:param f: integer-lattice submodular function objective
:param r: cardinality constraint
:param eps: non-negative error threshold
"""
# compute s, the sample size
s = utils.compute_sample_size(n=f.n, r=r, eps=eps)
# the solution starts from the zero vector
x = np.zeros((f.n, ), dtype=int)
# prev_value keeps track of the value of f(x)
prev_value = 0
# iteration counter
t = 0
# norm keeps track of the L-1 norm of x
norm = 0
d = max((f.value(utils.char_vector(f, e)) for e in f.V))
theta = d
stop_theta = (eps / r) * d
while norm < r:
# random sub-sampling step
sample_space = np.where(x < f.B)[0]
s_actual = min(s, len(sample_space))
Q = rng.choice(sample_space, size=s_actual, replace=False)
Q_one = list(map(lambda e: utils.char_vector(f, e), Q))
# potentially add multiple copies of every item in Q
for i, e in enumerate(Q):
one_e = Q_one[i]
k_max = np.min([f.B[e] - x[e], r - norm])
k_range = list(range(1, k_max + 1))
# find k in k_interval maximal such that f(k * 1_e | x) >= k * theta
best_t = utils.binary_search(f, k_range, one_e=one_e, x=x,
prev_value=prev_value, theta=theta)
if best_t is None:
# no feasible k was found, nothing gets added to x this iteration.
continue
k, candidate_x, candidate_value = best_t
# We add to x the element in the sample q that increases the value of f
# the most, extracted k times.
x = candidate_x
norm += k
prev_value = candidate_value
# update theta
theta = max(theta * (1 - eps), stop_theta)
# increment iteration counter
t += 1
print(f'SGL-a t={t}; n={f.n}; B={f.B_range}; r={r}; norm={norm}')
assert norm == r
return x, prev_value
```
#### File: benchmark/algo/SGL_II.py
```python
import numpy as np
from nptyping import NDArray
from typing import Tuple
from ..objective import Objective
from .. import utils
def SGL_II(rng: np.random.Generator,
f: Objective, r: int, eps: float) -> Tuple[NDArray[int], float]:
"""
Randomized algorithm for DR-ubmodular maximization of monotone functions
defined on the integer lattice with cardinality constraints.
This is a generalization of the StochasticGreedy algorithm for set-submodular monotone functions.
:param rng: numpy random generator instance
:param f: integer-lattice submodular function objective
:param r: cardinality constraint
:param eps: non-negative error threshold
"""
# compute s, the sample size
s = utils.compute_sample_size(n=f.n, r=r, eps=eps)
# the solution starts from the zero vector
x = np.zeros((f.n, ), dtype=int)
# prev_value keeps track of the value of f(x)
prev_value = 0
for _ in range(r):
V = np.copy(np.where(x < f.B)[0])
rng.shuffle(V)
# split list V in batches of size at most s
batches = utils.split_list(V, s)
for Q in batches:
# lazy list of (e, k_max), where k_max is the highest k such that
# f(x + k * 1_e) >= f(x) while making sure that the cardinality constraint
# is respected
e_k_max: Tuple[int, int] = [
(e, min(f.B[e] - x[e], r - np.sum(x)))
for e in Q
]
# lazy list of (one_e, max_k)
# one_e_k_max = utils.map_fst(lambda e: utils.char_vector(f, e), e_k_max)
one_e_k_max = map(lambda ek: (utils.char_vector(f, ek[0]) , ek[1]), e_k_max)
# We add k copies of the element in the sample q that increases the value of f
# the most to the solution x.
x, prev_value, marginal_gain, k = max((
(
candidate_x := x + k * one_e,
candidate_value := f.value(candidate_x),
candidate_value - prev_value,
k
) for one_e, k in one_e_k_max
), key=utils.trd)
if np.sum(x) == r:
break
if np.sum(x) == r:
break
assert np.sum(x) <= r
print(f'SGL-II n={f.n}; B={f.B_range}; r={r}; norm={np.sum(x)}')
return x, prev_value
```
#### File: benchmark/conf_utils/get_objective.py
```python
from typing import Iterator, Tuple, Dict, List
import numpy as np
from nptyping import NDArray
from omegaconf import DictConfig
from ..objective import Objective, DemoMonotone, DemoMonotoneSkewed, \
DemoNonMonotone, FacilityLocation, BudgetAllocation
from .. import dataset_utils
def compute_B(rng: np.random.Generator,
n: int, B_range: Tuple[int, int]) -> NDArray[int]:
"""
Compute the upper-bound array B.
:param rng: numpy random generator instance
:param n: size of the ground set
:param B_range: inclusive range for the values of each entry of B
"""
low, high = B_range
return rng.integers(low=low, high=high, size=(n, ), endpoint=True)
OBJ_MAP = {
'demo_monotone': lambda **kwargs: load_demo_monotone(**kwargs),
'demo_monotone_skewed': lambda **kwargs: load_demo_monotone_skewed(**kwargs),
'demo_non_monotone': lambda **kwargs: load_demo_non_monotone(**kwargs),
'facility_location': lambda **kwargs: load_facility_location(**kwargs),
'budget_allocation': lambda **kwargs: load_budget_allocation(**kwargs),
}
def load_demo_monotone(rng: np.random.Generator,
params,
**kwargs) -> List[Tuple[Objective, int]]:
"""
Generate a random modular, monotone function
:param rng: numpy random generator instance
:param params: 'params.demo_monotone' dictionary entry in conf/config.yaml
"""
nBr: List[Tuple[int, Tuple[int, int], int]] = params.benchmark.nBr
fr: List[Tuple[Objective, int]] = [None] * len(nBr)
for i, (n, B_range, r) in enumerate(nBr):
B = compute_B(rng, n, B_range)
fr[i] = (DemoMonotone(rng, n=n, B=B, B_range=B_range), r)
return fr
def load_demo_monotone_skewed(rng: np.random.Generator,
params,
**kwargs) -> List[Tuple[Objective, int]]:
"""
Generate a random skewed modular, monotone function
:param rng: numpy random generator instance
:param params: 'params.demo_monotone' dictionary entry in conf/config.yaml
"""
nBr: List[Tuple[int, Tuple[int, int], int]] = params.benchmark.nBr
fr: List[Tuple[Objective, int]] = [None] * len(nBr)
for i, (n, B_range, r) in enumerate(nBr):
B = compute_B(rng, n, B_range)
fr[i] = (DemoMonotoneSkewed(rng, n=n, B=B, B_range=B_range), r)
return fr
def load_demo_non_monotone(rng: np.random.Generator,
params,
**kwargs) -> List[Tuple[Objective, int]]:
"""
Generate a random modular, non_monotone function
:param rng: numpy random generator instance
:param params: 'params.demo_non_monotone' dictionary entry in conf/config.yaml
"""
nBr: List[Tuple[int, Tuple[int, int], int]] = params.benchmark.nBr
fr: List[Tuple[Objective, int]] = [None] * len(nBr)
for i, (n, B_range, r) in enumerate(nBr):
B = compute_B(rng, n, B_range)
fr[i] = (DemoNonMonotone(rng, n=n, B=B, B_range=B_range), r)
return fr
def load_facility_location(rng: np.random.Generator,
dataset_dir: str,
params,
**kwargs) -> List[Tuple[Objective, int]]:
"""
Generate a random integer-lattice submodular, monotone function that models
the Facility Location problem.
:param rng: numpy random generator instance
:param dataset_dir: datasets main directory
:param params: 'params.demo_facility_location' dictionary entry in conf/config.yaml
"""
print(f'Loading Movielens 100k...')
G = dataset_utils.import_movielens_100k(dataset_dir)
print(f'...Movielens 100k successfully loaded')
br: List[Tuple[int, int]] = params.benchmark.br
fr: List[Tuple[Objective, int]] = [None] * len(br)
for i, (b, r) in enumerate(br):
fr[i] = (FacilityLocation(G=G, b=b), r)
return fr
def load_budget_allocation(rng: np.random.Generator,
dataset_dir: str,
params,
**kwargs) -> Iterator[Tuple[Objective, int]]:
"""
Generate a random integer-lattice DR-submodular, monotone function that models
the Budget Allocation problem.
:param rng: numpy random generator instance
:param dataset_dir: datasets main directory
:param params: 'params.demo_facility_location' dictionary entry in conf/config.yaml
"""
# keep track of the vector B generated by the corresponding (n, b_low, b_high)
n_B_range_to_B_map: Dict[Tuple[int, int, int], NDArray[int]] = dict()
trim_graph = dataset_utils.import_wikilens_ratings(rng, dataset_dir)
nBr: List[Tuple[int, Tuple[int, int], int]] = params.benchmark.nBr
fr: List[Tuple[Objective, int]] = [None] * len(nBr)
for i, (n, B_range, r) in enumerate(nBr):
# make sure that all experiments with the same B_range have the same
# random B vector
B = n_B_range_to_B_map.setdefault((n, *B_range), compute_B(rng, n, B_range))
# trim the original bipartite graph G=(V \cup T, E) such that |V| = n
G = trim_graph(n=n)
fr[i] = (BudgetAllocation(G=G, B=B, B_range=B_range), r)
return fr
def get_objective(rng: np.random.Generator,
dataset_dir: str,
cfg: DictConfig) -> List[Tuple[Objective, int]]:
"""
Return an instance of the selected set-submodular objective
:param rng: numpy random generator instance
:param dataset_dir: datasets main directory
:param cfg: Hydra configuration dictionary
"""
objective_name = cfg.obj.name
print(f'Loading f: {objective_name}\n')
return OBJ_MAP[objective_name](rng=rng,
params=cfg.obj,
dataset_dir=dataset_dir)
```
#### File: benchmark/objective/BudgetAllocation.py
```python
import networkx as nx
from nptyping import NDArray
from typing import List, Tuple
from .. import utils
from .Objective import Objective
class BudgetAllocation(Objective):
def __init__(self, G: nx.Graph, B: NDArray[int], B_range: Tuple[int, int]):
"""
Optimal budget allocation is a special case of the influence maximization
problem. It can be modeled as a bipartite graph (S, T; W), where S and T
are collections of advertising channels and customers, respectively. The
edge weight, p_st ∈ W, represents the influence probability of channel s
to customer t. The goal is to distribute the budget (e.g., time for a TV
advertisement, or space of an inline ad) among the source nodes, and to
maximize the expected influence on the potential customers.
The total influence of customer t from all channels can be modeled
by a proper monotone DR-submodular function I_t(x), where x is the
budget assignment among the advertising channels.
A concrete application is for search marketing advertiser bidding, in
which vendors bid for the right to appear alongside the results of
different search keywords.
https://arxiv.org/pdf/1606.05615.pdf (§6, Optimal budget allocation with
continuous assignments)
"""
V: List[int] = [n for n in G.nodes if G.nodes[n]['bipartite'] == 0]
T: List[int] = [m for m in G.nodes if G.nodes[m]['bipartite'] == 1]
super().__init__(V, B, B_range)
# W[s, t] is the influence probability of channel s to customer t.
W = nx.adjacency_matrix(G)
# collect the neighbors s \in S of each t \in T
neighbors: List[List[int]] = [[s for s in G.neighbors(t)] for t in T]
# keep track of (1 - p(s, t), s) for each neighbors s \in S of each t \in T
self.probs_exp_list: List[List[Tuple[float, int]]] = [
[(1 - W[s, t], s) for s in s_neighbors]
for s_neighbors, t in zip(neighbors, T)
]
def value(self, x: NDArray[int]) -> float:
"""
Value oracle for the Budget Allocation problem.
:param x: allotted budget.
:return: expected number of influenced people
"""
super().value(x)
return sum((
1 - utils.prod(
neg_p_st ** x[s]
for neg_p_st, s in probs_exp
) for probs_exp in self.probs_exp_list
))
```
#### File: benchmark/set_algo/stochastic_greedy.py
```python
import math
import numpy as np
from nptyping import NDArray
from typing import Set, Tuple
from ..set_objective import SetObjective
from .. import utils
def stochastic_greedy(rng: np.random.Generator, f: SetObjective,
r: int, eps: float) -> Tuple[Set[int], float]:
"""
Computes a set A \subseteq V such that |A| \leq r.
:param rng: numpy random generator instance
:param f: set-submodular function to maximize
:param r: cardinality constraint
:param eps: error threshold
"""
# compute s, the sample size
s = utils.compute_sample_size(n=f.n, r=r, eps=eps)
# the solution starts from the empty set
A: Set[int] = set()
# prev_value keeps track of the value of f(A)
prev_value = 0
while len(A) < r:
# R is a random subset obtained by sampling s random elements
# from V - A
sample_space = list(f.V - A)
R: NDArray[int] = rng.choice(sample_space, size=min(s, len(sample_space)), replace=False)
prev_value, marginal_gain, a = max((
(candidate_value := f.value(A | {a}), candidate_value - prev_value, a)
for a in R
), key=utils.snd)
A.add(a)
return A, prev_value
```
#### File: benchmark/utils/binary_search.py
```python
from typing import List, Union, Tuple
from nptyping import NDArray
from ..objective import Objective
def binary_search(f: Objective, k_range: List[int], one_e: NDArray[int],
x: NDArray[int], prev_value: float,
theta: float) -> Union[None, Tuple[int, NDArray[int], float]]:
"""
Iterative binary search for the maximum k in k_range such that
f.marginal_gain(k * one_e, x) >= k * theta.
:param f: monotone integer lattice submodular function
:param k_range: sorted range of k to search
:param one_e: n-dimensional characteristic vector of e
:param x: previous iterate
:param prev_value: value of f(x)
:param theta: threshold
:return: (k, x + k * one_e, f(x + k * one_e)) or None of no k such that
f.marginal_gain(k * one_e, x) >= k * theta could be found.
"""
if len(k_range) == 0:
return None
k_max = k_range[-1]
k_min = k_range[0]
best_t = None
while k_min <= k_max:
candidate_k = k_max - (k_max - k_min) // 2
candidate_x = x + candidate_k * one_e
candidate_value = f.value(candidate_x)
marginal_gain = candidate_value - prev_value
if marginal_gain >= candidate_k * theta:
k_min = candidate_k + 1
if best_t is None or best_t[0] < candidate_k:
best_t = (candidate_k, candidate_x, candidate_value)
else:
k_max = candidate_k - 1
return best_t
```
#### File: utils/bridge/to_set.py
```python
from typing import Set
from nptyping import NDArray
from ...objective import Objective
def to_set(f: Objective, x: NDArray[int]) -> Set[int]:
"""
Convert an integer lattice solution to a set submodular solution.
:param f: integer lattice submodular function
:param x: integer lattice solution
"""
S = set()
for i, e in enumerate(x):
for c in range(e):
S.add(i + c * f.n)
return S
```
#### File: benchmark/utils/fst.py
```python
from typing import Tuple, TypeVar, Set, Any
T = TypeVar('T', int, float, Set[int])
def fst(x: Tuple[T, Any]) -> T:
return x[0]
```
#### File: benchmark/utils/prod.py
```python
from functools import reduce # Required in Python 3
from typing import Iterable, TypeVar
import operator
T = TypeVar('T')
def prod(iterable: Iterable[T]) -> T:
"""
Returns the product of the elements in the given iterable.
"""
return reduce(operator.mul, iterable, 1)
```
#### File: benchmark/utils/snd.py
```python
from typing import Tuple, TypeVar, Set, Any
T = TypeVar('T', int, float, Set[int])
def snd(x: Tuple[Any, T]) -> T:
return x[1]
```
#### File: benchmark/utils/trd.py
```python
from typing import Tuple, TypeVar, Set, Any
T = TypeVar('T', int, float, Set[int])
def trd(x: Tuple[Any, Any, T]) -> T:
return x[2]
```
#### File: plotter/utils/boxplot_by_algo.py
```python
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def boxplot_by_algo(data: pd.DataFrame, y: str, ylabel: str, title: str,
plots_folder: str, filename: str, ax):
PROPS = {
'boxprops': {'facecolor':'none', 'edgecolor':'black'},
'medianprops': {'color':'black'},
'whiskerprops': {'color':'black'},
'capprops': {'color':'black'}
}
sns.boxplot(x='algo', y=y, data=data, ax=ax, **PROPS)
# ax.set_title(title)
ax.set_xlabel('Algorithm')
ax.set_ylabel(ylabel, labelpad=10)
output = f'{plots_folder}/{filename}.pdf'
plt.savefig(output, dpi=300, bbox_inches='tight')
print(f'Saved {output}')
sns.despine()
plt.cla()
``` |
{
"source": "jkondic/overcooked_ai",
"score": 2
} |
#### File: overcooked_ai_py/agents/benchmarking.py
```python
import copy
import numpy as np
from overcooked_ai_py.utils import save_pickle, load_pickle, cumulative_rewards_from_rew_list, save_as_json, \
load_from_json, merge_dictionaries, rm_idx_from_dict, take_indexes_from_dict, is_iterable
from overcooked_ai_py.planning.planners import NO_COUNTERS_PARAMS
from overcooked_ai_py.agents.agent import AgentPair, RandomAgent, GreedyHumanModel
from overcooked_ai_py.mdp.overcooked_mdp import OvercookedGridworld, Action, OvercookedState
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
from overcooked_ai_py.mdp.layout_generator import LayoutGenerator
from overcooked_ai_py.mdp.overcooked_trajectory import DEFAULT_TRAJ_KEYS
class AgentEvaluator(object):
"""
Class used to get rollouts and evaluate performance of various types of agents.
TODO: This class currently only fully supports fixed mdps, or variable mdps that can be created with the LayoutGenerator class,
but might break with other types of variable mdps. Some methods currently assume that the AgentEvaluator can be reconstructed
from loaded params (which must be pickleable). However, some custom start_state_fns or mdp_generating_fns will not be easily
pickleable. We should think about possible improvements/what makes most sense to do here.
"""
def __init__(self, env_params, mdp_fn, force_compute=False, mlam_params=NO_COUNTERS_PARAMS, debug=False):
"""
env_params (dict): params for creation of an OvercookedEnv
mdp_fn (callable function): a function that can be used to create mdp
force_compute (bool): whether should re-compute MediumLevelActionManager although matching file is found
mlam_params (dict): the parameters for mlam, the MediumLevelActionManager
debug (bool): whether to display debugging information on init
"""
assert callable(mdp_fn), "mdp generating function must be a callable function"
env_params["mlam_params"] = mlam_params
self.mdp_fn = mdp_fn
self.env = OvercookedEnv(self.mdp_fn, **env_params)
self.force_compute = force_compute
@staticmethod
def from_mdp_params_infinite(mdp_params, env_params, outer_shape=None, mdp_params_schedule_fn=None, force_compute=False,
mlam_params=NO_COUNTERS_PARAMS, debug=False):
"""
mdp_params (dict): params for creation of an OvercookedGridworld instance through the `from_layout_name` method
outer_shape: the outer shape of environment
mdp_params_schedule_fn: the schedule for varying mdp params
Information for the rest of params please refer to the __init__ method above
Infinitely generate mdp using the naive mdp_fn
"""
assert outer_shape is not None, "outer_shape needs to be defined for variable mdp"
assert "num_mdp" in env_params and np.isinf(env_params["num_mdp"]), \
"num_mdp needs to be specified and infinite"
mdp_fn_naive = LayoutGenerator.mdp_gen_fn_from_dict(mdp_params, outer_shape, mdp_params_schedule_fn)
return AgentEvaluator(env_params, mdp_fn_naive, force_compute, mlam_params, debug)
@staticmethod
def from_mdp_params_finite(mdp_params, env_params, outer_shape=None, mdp_params_schedule_fn=None, force_compute=False,
mlam_params=NO_COUNTERS_PARAMS, debug=False):
"""
mdp_params (dict): params for creation of an OvercookedGridworld instance through the `from_layout_name` method
outer_shape: the outer shape of environment
mdp_params_schedule_fn: the schedule for varying mdp params
Information for the rest of params please refer to the __init__ method above
Generate a finite list of mdp (mdp_lst) using the naive mdp_fn, and then use the from_mdp_lst to generate
the AgentEvaluator
"""
assert outer_shape is not None, "outer_shape needs to be defined for variable mdp"
assert "num_mdp" in env_params and not np.isinf(env_params["num_mdp"]), \
"num_mdp needs to be specified and finite"
mdp_fn_naive = LayoutGenerator.mdp_gen_fn_from_dict(mdp_params, outer_shape, mdp_params_schedule_fn)
# finite mdp, random choice
num_mdp = env_params['num_mdp']
assert type(num_mdp) == int and num_mdp > 0, "invalid number of mdp: " + str(num_mdp)
mdp_lst = [mdp_fn_naive() for _ in range(num_mdp)]
return AgentEvaluator.from_mdp_lst(mdp_lst=mdp_lst, env_params=env_params,
force_compute=force_compute, mlam_params=mlam_params, debug=debug)
@staticmethod
def from_mdp(mdp, env_params, force_compute=False, mlam_params=NO_COUNTERS_PARAMS, debug=False):
"""
mdp (OvercookedGridworld): the mdp that we want the AgentEvaluator to always generate
Information for the rest of params please refer to the __init__ method above
"""
assert type(mdp) == OvercookedGridworld, "mdp must be a OvercookedGridworld object"
mdp_fn = lambda _ignored: mdp
return AgentEvaluator(env_params, mdp_fn, force_compute, mlam_params, debug)
@staticmethod
def from_layout_name(mdp_params, env_params, force_compute=False, mlam_params=NO_COUNTERS_PARAMS, debug=False):
"""
mdp_params (dict): params for creation of an OvercookedGridworld instance through the `from_layout_name` method
Information for the rest of params please refer to the __init__ method above
"""
assert type(mdp_params) is dict and "layout_name" in mdp_params
mdp = OvercookedGridworld.from_layout_name(**mdp_params)
return AgentEvaluator.from_mdp(mdp, env_params, force_compute, mlam_params, debug)
@staticmethod
def from_mdp_lst(mdp_lst, env_params, sampling_freq=None, force_compute=False, mlam_params=NO_COUNTERS_PARAMS, debug=False):
"""
mdp_lst (list): a list of mdp (OvercookedGridworld) we would like to
sampling_freq (list): a list of number that signify the sampling frequency of each mdp in the mdp_lst
Information for the rest of params please refer to the __init__ method above
"""
assert is_iterable(mdp_lst), "mdp_lst must be a list"
assert all([type(mdp) == OvercookedGridworld for mdp in mdp_lst]), "some mdps are not OvercookedGridworld objects"
if sampling_freq is None:
sampling_freq = np.ones(len(mdp_lst)) /len(mdp_lst)
mdp_fn = lambda _ignored: np.random.choice(mdp_lst, p=sampling_freq)
return AgentEvaluator(env_params, mdp_fn, force_compute, mlam_params, debug)
def evaluate_random_pair(self, num_games=1, all_actions=True, display=False, native_eval=False):
agent_pair = AgentPair(RandomAgent(all_actions=all_actions), RandomAgent(all_actions=all_actions))
return self.evaluate_agent_pair(agent_pair, num_games=num_games, display=display, native_eval=native_eval)
def evaluate_human_model_pair(self, num_games=1, display=False, native_eval=False):
a0 = GreedyHumanModel(self.env.mlam)
a1 = GreedyHumanModel(self.env.mlam)
agent_pair = AgentPair(a0, a1)
return self.evaluate_agent_pair(agent_pair, num_games=num_games, display=display, native_eval=native_eval)
def evaluate_agent_pair(self, agent_pair, num_games, game_length=None, start_state_fn=None, metadata_fn=None, metadata_info_fn=None, display=False, dir=None,
display_phi=False, info=True, native_eval=False):
# this index has to be 0 because the Agent_Evaluator only has 1 env initiated
# if you would like to evaluate on a different env using rllib, please modifiy
# rllib/ -> rllib.py -> get_rllib_eval_function -> _evaluate
# native eval: using self.env in evaluation instead of creating a copy
# this is particulally helpful with variable MDP, where we want to make sure
# the mdp used in evaluation is the same as the native self.env.mdp
if native_eval:
return self.env.get_rollouts(agent_pair, num_games=num_games, display=display, dir=dir, display_phi=display_phi,
info=info, metadata_fn=metadata_fn, metadata_info_fn=metadata_info_fn)
else:
horizon_env = self.env.copy()
horizon_env.horizon = self.env.horizon if game_length is None else game_length
horizon_env.start_state_fn = self.env.start_state_fn if start_state_fn is None else start_state_fn
horizon_env.reset()
return horizon_env.get_rollouts(agent_pair, num_games=num_games, display=display, dir=dir, display_phi=display_phi,
info=info, metadata_fn=metadata_fn, metadata_info_fn=metadata_info_fn)
def get_agent_pair_trajs(self, a0, a1=None, num_games=100, game_length=None, start_state_fn=None, display=False, info=True):
"""Evaluate agent pair on both indices, and return trajectories by index"""
if a1 is None:
ap = AgentPair(a0, a0, allow_duplicate_agents=True)
trajs_0 = trajs_1 = self.evaluate_agent_pair(ap, num_games=num_games, game_length=game_length, start_state_fn=start_state_fn, display=display, info=info)
else:
trajs_0 = self.evaluate_agent_pair(AgentPair(a0, a1), num_games=num_games, game_length=game_length, start_state_fn=start_state_fn, display=display, info=info)
trajs_1 = self.evaluate_agent_pair(AgentPair(a1, a0), num_games=num_games, game_length=game_length, start_state_fn=start_state_fn, display=display, info=info)
return trajs_0, trajs_1
@staticmethod
def check_trajectories(trajectories, from_json=False, **kwargs):
"""
Checks that of trajectories are in standard format and are consistent with dynamics of mdp.
If the trajectories were saves as json, do not check that they have standard traj keys.
"""
if not from_json:
AgentEvaluator._check_standard_traj_keys(set(trajectories.keys()))
AgentEvaluator._check_right_types(trajectories)
AgentEvaluator._check_trajectories_dynamics(trajectories, **kwargs)
# TODO: Check shapes?
@staticmethod
def _check_standard_traj_keys(traj_keys_set):
default_traj_keys = DEFAULT_TRAJ_KEYS
assert traj_keys_set == set(default_traj_keys), "Keys of traj dict did not match standard form.\nMissing keys: {}\nAdditional keys: {}".format(
[k for k in default_traj_keys if k not in traj_keys_set], [k for k in traj_keys_set if k not in default_traj_keys]
)
@staticmethod
def _check_right_types(trajectories):
for idx in range(len(trajectories["ep_states"])):
states, actions, rewards = trajectories["ep_states"][idx], trajectories["ep_actions"][idx], trajectories["ep_rewards"][idx]
mdp_params, env_params = trajectories["mdp_params"][idx], trajectories["env_params"][idx]
assert all(type(j_a) is tuple for j_a in actions)
assert all(type(s) is OvercookedState for s in states)
assert type(mdp_params) is dict
assert type(env_params) is dict
# TODO: check that are all lists
@staticmethod
def _check_trajectories_dynamics(trajectories, verbose=True):
if any(env_params["_variable_mdp"] for env_params in trajectories["env_params"]):
if verbose:
print("Skipping trajectory consistency checking because MDP was recognized as variable. "
"Trajectory consistency checking is not yet supported for variable MDPs.")
return
_, envs = AgentEvaluator.get_mdps_and_envs_from_trajectories(trajectories)
for idx in range(len(trajectories["ep_states"])):
states, actions, rewards = trajectories["ep_states"][idx], trajectories["ep_actions"][idx], trajectories["ep_rewards"][idx]
simulation_env = envs[idx]
assert len(states) == len(actions) == len(rewards), "# states {}\t# actions {}\t# rewards {}".format(
len(states), len(actions), len(rewards)
)
# Checking that actions would give rise to same behaviour in current MDP
for i in range(len(states) - 1):
curr_state = states[i]
simulation_env.state = curr_state
next_state, reward, done, info = simulation_env.step(actions[i])
assert states[i + 1] == next_state, "States differed (expected vs actual): {}\n\nexpected dict: \t{}\nactual dict: \t{}".format(
simulation_env.display_states(states[i + 1], next_state), states[i+1].to_dict(), next_state.to_dict()
)
assert rewards[i] == reward, "{} \t {}".format(rewards[i], reward)
@staticmethod
def get_mdps_and_envs_from_trajectories(trajectories):
mdps, envs = [], []
for idx in range(len(trajectories["ep_lengths"])):
mdp_params = copy.deepcopy(trajectories["mdp_params"][idx])
env_params = copy.deepcopy(trajectories["env_params"][idx])
mdp = OvercookedGridworld.from_layout_name(**mdp_params)
env = OvercookedEnv.from_mdp(mdp, **env_params)
mdps.append(mdp)
envs.append(env)
return mdps, envs
### I/O METHODS ###
@staticmethod
def save_trajectories(trajectories, filename):
AgentEvaluator.check_trajectories(trajectories)
if any(t["env_params"]["start_state_fn"] is not None for t in trajectories):
print("Saving trajectories with a custom start state. This can currently "
"cause things to break when loading in the trajectories.")
save_pickle(trajectories, filename)
@staticmethod
def load_trajectories(filename):
trajs = load_pickle(filename)
AgentEvaluator.check_trajectories(trajs)
return trajs
@staticmethod
def save_traj_as_json(trajectory, filename):
"""Saves the `idx`th trajectory as a list of state action pairs"""
assert set(DEFAULT_TRAJ_KEYS) == set(trajectory.keys()), "{} vs\n{}".format(DEFAULT_TRAJ_KEYS, trajectory.keys())
AgentEvaluator.check_trajectories(trajectory)
trajectory = AgentEvaluator.make_trajectories_json_serializable(trajectory)
save_as_json(trajectory, filename)
@staticmethod
def make_trajectories_json_serializable(trajectories):
"""
Cannot convert np.arrays or special types of ints to JSON.
This method converts all components of a trajectory to standard types.
"""
dict_traj = copy.deepcopy(trajectories)
dict_traj["ep_states"] = [[ob.to_dict() for ob in one_ep_obs] for one_ep_obs in trajectories["ep_states"]]
for k in dict_traj.keys():
dict_traj[k] = list(dict_traj[k])
dict_traj['ep_actions'] = [list(lst) for lst in dict_traj['ep_actions']]
dict_traj['ep_rewards'] = [list(lst) for lst in dict_traj['ep_rewards']]
dict_traj['ep_dones'] = [list(lst) for lst in dict_traj['ep_dones']]
dict_traj['ep_returns'] = [int(val) for val in dict_traj['ep_returns']]
dict_traj['ep_lengths'] = [int(val) for val in dict_traj['ep_lengths']]
# NOTE: Currently saving to JSON does not support ep_infos (due to nested np.arrays) or metadata
del dict_traj['ep_infos']
del dict_traj['metadatas']
return dict_traj
@staticmethod
def load_traj_from_json(filename):
traj_dict = load_from_json(filename)
traj_dict["ep_states"] = [[OvercookedState.from_dict(ob) for ob in curr_ep_obs] for curr_ep_obs in traj_dict["ep_states"]]
traj_dict["ep_actions"] = [[tuple(tuple(a) if type(a) is list else a for a in j_a) for j_a in ep_acts] for ep_acts in traj_dict["ep_actions"]]
return traj_dict
############################
# TRAJ MANINPULATION UTILS #
############################
# TODO: add more documentation!
@staticmethod
def merge_trajs(trajs_n):
"""
Takes in multiple trajectory objects and appends all the information into one trajectory object
[trajs0, trajs1] -> trajs
"""
metadatas_merged = merge_dictionaries([trajs["metadatas"] for trajs in trajs_n])
merged_trajs = merge_dictionaries(trajs_n)
merged_trajs["metadatas"] = metadatas_merged
return merged_trajs
@staticmethod
def remove_traj_idx(trajs, idx):
# NOTE: MUTATING METHOD for trajs, returns the POPPED IDX
metadatas = trajs["metadatas"]
del trajs["metadatas"]
removed_idx_d = rm_idx_from_dict(trajs, idx)
removed_idx_metas = rm_idx_from_dict(metadatas, idx)
trajs["metadatas"] = metadatas
removed_idx_d["metadatas"] = removed_idx_metas
return removed_idx_d
@staticmethod
def take_traj_indices(trajs, indices):
# NOTE: non mutating method
subset_trajs = take_indexes_from_dict(trajs, indices, keys_to_ignore=["metadatas"])
# TODO: Make metadatas field into additional keys for trajs, rather than having a metadatas field?
subset_trajs["metadatas"] = take_indexes_from_dict(trajs["metadatas"], indices)
return subset_trajs
@staticmethod
def add_metadata_to_traj(trajs, metadata_fn, input_keys):
"""
Add an additional metadata entry to the trajectory, based on manipulating
the trajectory `input_keys` values
"""
metadata_fn_input = [trajs[k] for k in input_keys]
metadata_key, metadata_data = metadata_fn(metadata_fn_input)
assert metadata_key not in trajs["metadatas"].keys()
trajs["metadatas"][metadata_key] = metadata_data
return trajs
@staticmethod
def add_observations_to_trajs_in_metadata(trajs, encoding_fn):
"""Adds processed observations (for both agent indices) in the metadatas"""
def metadata_fn(data):
traj_ep_states = data[0]
obs_metadata = []
for one_traj_states in traj_ep_states:
obs_metadata.append([encoding_fn(s) for s in one_traj_states])
return "ep_obs_for_both_agents", obs_metadata
return AgentEvaluator.add_metadata_to_traj(trajs, metadata_fn, ["ep_states"])
# EVENTS VISUALIZATION METHODS #
@staticmethod
def events_visualization(trajs, traj_index):
# TODO
pass
```
#### File: overcooked_ai/testing/agent_test.py
```python
import unittest
import numpy as np
from overcooked_ai_py.agents.agent import AgentPair, FixedPlanAgent, GreedyHumanModel, RandomAgent, SampleAgent
from overcooked_ai_py.mdp.actions import Direction, Action
from overcooked_ai_py.mdp.overcooked_mdp import OvercookedGridworld, OvercookedState, PlayerState, ObjectState
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
from overcooked_ai_py.planning.planners import MediumLevelActionManager, NO_COUNTERS_PARAMS
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
np.random.seed(42)
n, s = Direction.NORTH, Direction.SOUTH
e, w = Direction.EAST, Direction.WEST
stay, interact = Action.STAY, Action.INTERACT
P, Obj = PlayerState, ObjectState
force_compute_large = False
force_compute = True
DISPLAY = False
simple_mdp = OvercookedGridworld.from_layout_name('cramped_room')
large_mdp = OvercookedGridworld.from_layout_name('corridor')
class TestAgentEvaluator(unittest.TestCase):
def setUp(self):
self.agent_eval = AgentEvaluator.from_layout_name({"layout_name": "cramped_room"}, {"horizon": 100})
def test_human_model_pair(self):
trajs = self.agent_eval.evaluate_human_model_pair()
try:
AgentEvaluator.check_trajectories(trajs, verbose=False)
except AssertionError as e:
self.fail("Trajectories were not returned in standard format:\n{}".format(e))
def test_rollouts(self):
ap = AgentPair(RandomAgent(), RandomAgent())
trajs = self.agent_eval.evaluate_agent_pair(ap, num_games=5)
try:
AgentEvaluator.check_trajectories(trajs, verbose=False)
except AssertionError as e:
self.fail("Trajectories were not returned in standard format:\n{}".format(e))
def test_mlam_computation(self):
try:
self.agent_eval.env.mlam
except Exception as e:
self.fail("Failed to compute MediumLevelActionManager:\n{}".format(e))
class TestBasicAgents(unittest.TestCase):
def setUp(self):
self.mlam_large = MediumLevelActionManager.from_pickle_or_compute(large_mdp, NO_COUNTERS_PARAMS, force_compute=force_compute_large)
def test_fixed_plan_agents(self):
a0 = FixedPlanAgent([s, e, n, w])
a1 = FixedPlanAgent([s, w, n, e])
agent_pair = AgentPair(a0, a1)
env = OvercookedEnv.from_mdp(large_mdp, horizon=10)
trajectory, time_taken, _, _ = env.run_agents(agent_pair, include_final_state=True, display=DISPLAY)
end_state = trajectory[-1][0]
self.assertEqual(time_taken, 10)
self.assertEqual(env.mdp.get_standard_start_state().player_positions, end_state.player_positions)
def test_two_greedy_human_open_map(self):
scenario_2_mdp = OvercookedGridworld.from_layout_name('scenario2')
mlam = MediumLevelActionManager.from_pickle_or_compute(scenario_2_mdp, NO_COUNTERS_PARAMS, force_compute=force_compute)
a0 = GreedyHumanModel(mlam)
a1 = GreedyHumanModel(mlam)
agent_pair = AgentPair(a0, a1)
start_state = OvercookedState(
[P((8, 1), s),
P((1, 1), s)],
{},
all_orders=scenario_2_mdp.start_all_orders
)
env = OvercookedEnv.from_mdp(scenario_2_mdp, start_state_fn=lambda: start_state, horizon=100)
trajectory, time_taken, _, _ = env.run_agents(agent_pair, include_final_state=True, display=DISPLAY)
def test_sample_agent(self):
agent = SampleAgent([RandomAgent(all_actions=False), RandomAgent(all_actions=True)])
probs = agent.action(None)[1]["action_probs"]
expected_probs = np.array([0.18333333, 0.18333333, 0.18333333, 0.18333333, 0.18333333, 0.08333333])
self.assertTrue(np.allclose(probs, expected_probs))
class TestAgentEvaluatorStatic(unittest.TestCase):
layout_name_lst = ["asymmetric_advantages", "asymmetric_advantages_tomato", "bonus_order_test", "bottleneck",
"centre_objects", "centre_pots", "corridor", "forced_coordination_tomato", "unident",
"marshmallow_experiment", "marshmallow_experiment_coordination", "you_shall_not_pass"]
def test_from_mdp(self):
for layout_name in self.layout_name_lst:
orignal_mdp = OvercookedGridworld.from_layout_name(layout_name)
ae = AgentEvaluator.from_mdp(mdp=orignal_mdp, env_params={"horizon": 400})
ae_mdp = ae.env.mdp
self.assertEqual(orignal_mdp, ae_mdp, "mdp with name " + layout_name + " experienced an inconsistency")
def test_from_mdp_params_layout(self):
for layout_name in self.layout_name_lst:
orignal_mdp = OvercookedGridworld.from_layout_name(layout_name)
ae = AgentEvaluator.from_layout_name(mdp_params={"layout_name": layout_name}, env_params={"horizon": 400})
ae_mdp = ae.env.mdp
self.assertEqual(orignal_mdp, ae_mdp, "mdp with name " + layout_name + " experienced an inconsistency")
mdp_gen_params_1 = {
"inner_shape": (10, 7),
"prop_empty": 0.95,
"prop_feats": 0.1,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
}
mdp_gen_params_2 = {
"inner_shape": (10, 7),
"prop_empty": 0.7,
"prop_feats": 0.5,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
}
mdp_gen_params_3 = {
"inner_shape": (10, 7),
"prop_empty": 0.5,
"prop_feats": 0.4,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
}
mdp_gen_params_lst = [mdp_gen_params_1, mdp_gen_params_2, mdp_gen_params_3]
outer_shape = (10, 7)
def test_from_mdp_params_variable_across(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae0 = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
ae1 = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
self.assertFalse(ae0.env.mdp == ae1.env.mdp,
"2 randomly generated layouts across 2 evaluators are the same, which is wrong")
def test_from_mdp_params_variable_infinite(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
mdp_0 = ae.env.mdp.copy()
for _ in range(5):
ae.env.reset(regen_mdp=True)
mdp_1 = ae.env.mdp
self.assertFalse(mdp_0 == mdp_1,
"with infinite layout generator and regen_mdp=True, the 2 layouts should not be the same")
def test_from_mdp_params_variable_infinite_no_regen(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
mdp_0 = ae.env.mdp.copy()
for _ in range(5):
ae.env.reset(regen_mdp=False)
mdp_1 = ae.env.mdp
self.assertTrue(mdp_0 == mdp_1,
"with infinite layout generator and regen_mdp=False, the 2 layouts should be the same")
def test_from_mdp_params_variable_infinite_specified(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
mdp_0 = ae.env.mdp.copy()
for _ in range(5):
ae.env.reset(regen_mdp=True)
mdp_1 = ae.env.mdp
self.assertFalse(mdp_0 == mdp_1,
"with infinite layout generator and regen_mdp=True, the 2 layouts should not be the same")
def test_from_mdp_params_variable_finite(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_finite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": 2},
outer_shape=self.outer_shape)
mdp_0 = ae.env.mdp.copy()
seen = [mdp_0]
for _ in range(20):
ae.env.reset(regen_mdp=True)
mdp_i = ae.env.mdp
if len(seen) == 1:
if mdp_i != seen[0]:
seen.append(mdp_i.copy())
elif len(seen) == 2:
mdp_0, mdp_1 = seen
self.assertTrue((mdp_i == mdp_0 or mdp_i == mdp_1),
"more than 2 mdp was created, the function failed to perform")
else:
self.assertTrue(False, "theoretically unreachable statement")
layout_name_short_lst = ["cramped_room", "cramped_room_tomato", "simple_o", "simple_tomato", "simple_o_t"]
biased = [0.1, 0.15, 0.2, 0.25, 0.3]
num_reset = 200000
def test_from_mdp_lst_default(self):
mdp_lst = [OvercookedGridworld.from_layout_name(name) for name in self.layout_name_short_lst]
ae = AgentEvaluator.from_mdp_lst(mdp_lst=mdp_lst, env_params={"horizon": 400})
counts = {}
for _ in range(self.num_reset):
ae.env.reset(regen_mdp=True)
if ae.env.mdp.layout_name in counts:
counts[ae.env.mdp.layout_name] += 1
else:
counts[ae.env.mdp.layout_name] = 1
for k, v in counts.items():
self.assertAlmostEqual(0.2, v/self.num_reset, 2, "more than 2 places off for " + k)
def test_from_mdp_lst_uniform(self):
mdp_lst = [OvercookedGridworld.from_layout_name(name) for name in self.layout_name_short_lst]
ae = AgentEvaluator.from_mdp_lst(mdp_lst=mdp_lst, env_params={"horizon": 400}, sampling_freq=[0.2, 0.2, 0.2, 0.2, 0.2])
counts = {}
for _ in range(self.num_reset):
ae.env.reset(regen_mdp=True)
if ae.env.mdp.layout_name in counts:
counts[ae.env.mdp.layout_name] += 1
else:
counts[ae.env.mdp.layout_name] = 1
for k, v in counts.items():
self.assertAlmostEqual(0.2, v/self.num_reset, 2, "more than 2 places off for " + k)
def test_from_mdp_lst_biased(self):
mdp_lst = [OvercookedGridworld.from_layout_name(name) for name in self.layout_name_short_lst]
ae = AgentEvaluator.from_mdp_lst(mdp_lst=mdp_lst, env_params={"horizon": 400}, sampling_freq=self.biased)
counts = {}
for _ in range(self.num_reset):
ae.env.reset(regen_mdp=True)
if ae.env.mdp.layout_name in counts:
counts[ae.env.mdp.layout_name] += 1
else:
counts[ae.env.mdp.layout_name] = 1
# construct the ground truth
gt = {self.layout_name_short_lst[i]: self.biased[i] for i in range(len(self.layout_name_short_lst))}
for k, v in counts.items():
self.assertAlmostEqual(gt[k], v/self.num_reset, 2, "more than 2 places off for " + k)
if __name__ == '__main__':
unittest.main()
```
#### File: overcooked_ai/testing/planners_test.py
```python
import unittest
from overcooked_ai_py.planning.planners import MediumLevelActionManager
from overcooked_ai_py.mdp.actions import Direction, Action
from overcooked_ai_py.mdp.overcooked_mdp import OvercookedGridworld, PlayerState, ObjectState, SoupState, OvercookedState
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
from overcooked_ai_py.agents.agent import AgentPair, GreedyHumanModel
large_mdp_tests = False
force_compute = True
force_compute_large = False
n, s = Direction.NORTH, Direction.SOUTH
e, w = Direction.EAST, Direction.WEST
stay, interact = Action.STAY, Action.INTERACT
P, Obj = PlayerState, ObjectState
# Simple MDP Setup
simple_mdp = OvercookedGridworld.from_layout_name('simple_o')
base_params = {
'start_orientations': False,
'wait_allowed': False,
'counter_goals': simple_mdp.terrain_pos_dict['X'],
'counter_drop': simple_mdp.terrain_pos_dict['X'][1:2],
'counter_pickup': simple_mdp.terrain_pos_dict['X'],
'same_motion_goals': True
}
action_manger_filename = "simple_1_am.pkl"
ml_action_manager_simple = MediumLevelActionManager.from_pickle_or_compute(
simple_mdp, mlam_params=base_params, custom_filename=action_manger_filename, force_compute=force_compute)
ml_action_manager_simple.env = OvercookedEnv.from_mdp(simple_mdp)
base_params_start_or = {
'start_orientations': True,
'wait_allowed': False,
'counter_goals': simple_mdp.terrain_pos_dict['X'],
'counter_drop': [],
'counter_pickup': simple_mdp.terrain_pos_dict['X'],
'same_motion_goals': False
}
action_manger_filename = "simple_2_am.pkl"
or_ml_action_manager_simple = MediumLevelActionManager.from_pickle_or_compute(
simple_mdp, mlam_params=base_params_start_or, custom_filename=action_manger_filename, force_compute=force_compute)
if large_mdp_tests:
# Not testing by default
# Large MDP Setup
large_mdp = OvercookedGridworld.from_layout_name('corridor', cook_time=5)
no_counters_params = {
'start_orientations': False,
'wait_allowed': False,
'counter_goals': [],
'counter_drop': [],
'counter_pickup': [],
'same_motion_goals': False
}
action_manger_filename = "corridor_no_shared_motion_goals_am.pkl"
ml_planner_large_no_shared = MediumLevelActionManager.from_pickle_or_compute(
large_mdp, no_counters_params, custom_filename=action_manger_filename, force_compute=force_compute_large)
same_goals_params = {
'start_orientations': False,
'wait_allowed': False,
'counter_goals': [],
'counter_drop': [],
'counter_pickup': [],
'same_motion_goals': True
}
action_manger_filename = "corridor_am.pkl"
ml_planner_large = MediumLevelActionManager.from_pickle_or_compute(
large_mdp, same_goals_params, custom_filename=action_manger_filename, force_compute=force_compute_large)
# Deprecated.
# hlam = HighLevelActionManager(ml_planner_large)
# hlp = HighLevelPlanner(hlam)
def done_soup_obj(soup_loc, num_onion_inside=3):
return soup_obj(soup_loc, num_onion_inside, 20)
def idle_soup_obj(soup_loc, num_onion_inside):
return soup_obj(soup_loc, num_onion_inside, -1)
def cooking_soup_obj(soup_loc, num_onion_inside=3, cooking_tick=0):
assert cooking_tick >= 0
assert num_onion_inside >= 0
return soup_obj(soup_loc, num_onion_inside, cooking_tick)
def soup_obj(soup_loc, num_onion_inside, cooking_tick):
ingredient_obj_lst = [Obj('onion', soup_loc)] * num_onion_inside
return SoupState(soup_loc, ingredient_obj_lst, cooking_tick)
class TestMotionPlanner(unittest.TestCase):
def test_gridworld_distance(self):
planner = ml_action_manager_simple.joint_motion_planner.motion_planner
start = ((2, 1), e)
end = ((1, 1), w)
dist = planner.get_gridworld_distance(start, end)
self.assertEqual(dist, 1)
start = ((2, 1), e)
end = ((1, 1), n)
dist = planner.get_gridworld_distance(start, end)
self.assertEqual(dist, 2)
start = (2, 1)
end = (1, 1)
dist = planner.get_gridworld_pos_distance(start, end)
self.assertEqual(dist, 1)
start = (1, 1)
end = (3, 2)
dist = planner.get_gridworld_pos_distance(start, end)
self.assertEqual(dist, 3)
def test_simple_mdp(self):
planner = ml_action_manager_simple.joint_motion_planner.motion_planner
self.simple_mdp_already_at_goal(planner)
self.simple_mdp_orientation_change(planner)
self.simple_mdp_basic_plan(planner)
self.simple_mdp_orientation_optimization_dependent_plans(planner)
def simple_mdp_already_at_goal(self, planner):
start_status = goal_status = ((1, 1), n)
self.check_single_motion_plan(planner, start_status, goal_status, expected_length=1)
def simple_mdp_orientation_change(self, planner):
start_status = ((1, 1), n)
goal_status = ((1, 1), w)
self.check_single_motion_plan(planner, start_status, goal_status, expected_length=2)
def simple_mdp_basic_plan(self, planner):
start_status = ((1, 1), n)
goal_status = ((3, 1), n)
self.check_single_motion_plan(planner, start_status, goal_status, expected_length=4)
def simple_mdp_orientation_optimization_dependent_plans(self, planner):
start_status = ((2, 1), n)
goal_status = ((1, 2), w)
self.check_single_motion_plan(planner, start_status, goal_status, expected_length=3)
goal_status = ((1, 2), s)
self.check_single_motion_plan(planner, start_status, goal_status, expected_length=3)
def test_larger_mdp(self):
if large_mdp_tests:
planner = ml_planner_large.ml_action_manager.joint_motion_planner.motion_planner
self.large_mdp_basic_plan(planner)
def large_mdp_basic_plan(self, planner):
start_status = ((1, 2), n)
goal_status = ((8, 1), n)
self.check_single_motion_plan(planner, start_status, goal_status)
def check_single_motion_plan(self, motion_planner, start_pos_and_or, goal_pos_and_or, expected_length=None):
dummy_agent = P((3, 2), n)
start_state = OvercookedState([P(*start_pos_and_or), dummy_agent], {}, all_orders=simple_mdp.start_all_orders)
action_plan, pos_and_or_plan, plan_cost = motion_planner.get_plan(start_pos_and_or, goal_pos_and_or)
# Checking that last state obtained matches goal position
self.assertEqual(pos_and_or_plan[-1], goal_pos_and_or)
# In single motion plans the graph cost should be equal to
# the plan cost (= plan length) as agents should never STAY
graph_plan_cost = sum([motion_planner._graph_action_cost(a) for a in action_plan])
self.assertEqual(plan_cost, graph_plan_cost)
joint_action_plan = [(a, stay) for a in action_plan]
env = OvercookedEnv.from_mdp(motion_planner.mdp, horizon=1000)
resulting_state, _ = env.execute_plan(start_state, joint_action_plan)
self.assertEqual(resulting_state.players_pos_and_or[0], goal_pos_and_or)
if expected_length is not None:
self.assertEqual(len(action_plan), expected_length)
class TestJointMotionPlanner(unittest.TestCase):
def test_same_start_and_end_pos_with_no_start_orientations(self):
jm_planner = ml_action_manager_simple.joint_motion_planner
start = (((1, 1), w), ((1, 2), s))
goal = (((1, 1), n), ((2, 1), n))
joint_action_plan, end_jm_state, finshing_times = jm_planner.get_low_level_action_plan(start, goal)
optimal_plan = [(n, e), (interact, n)]
self.assertEqual(joint_action_plan, optimal_plan)
optimal_end_jm_state = (((1, 1), n), ((2, 1), n))
self.assertEqual(end_jm_state, optimal_end_jm_state)
optimal_finshing_times = (2, 3)
self.assertEqual(finshing_times, optimal_finshing_times)
def test_with_start_orientations_simple_mdp(self):
jm_planner = or_ml_action_manager_simple.joint_motion_planner
self.simple_mdp_suite(jm_planner)
def test_without_start_orientations_simple_mdp(self):
jm_planner = ml_action_manager_simple.joint_motion_planner
self.simple_mdp_suite(jm_planner)
def simple_mdp_suite(self, jm_planner):
self.simple_mdp_already_at_goal(jm_planner)
self.simple_mdp_only_orientations_switch(jm_planner)
self.simple_mdp_one_at_goal(jm_planner)
self.simple_mdp_position_swap(jm_planner)
self.simple_mdp_one_at_goal_other_conflicting_path(jm_planner)
self.simple_mdp_test_final_orientation_optimization(jm_planner)
def simple_mdp_already_at_goal(self, planner):
a1_start = a1_goal = ((1, 1), n)
a2_start = a2_goal = ((2, 1), n)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, times=(1, 1), min_t=1)
a1_start = a1_goal = ((1, 1), w)
a2_start = a2_goal = ((1, 2), s)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, times=(1, 1), min_t=1)
def simple_mdp_only_orientations_switch(self, planner):
a1_start = ((1, 1), s)
a1_goal = ((1, 1), w)
a2_start = ((1, 2), s)
a2_goal = ((1, 2), w)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, times=(2, 2), min_t=2)
def simple_mdp_one_at_goal(self, planner):
a1_start = ((3, 2), s)
a1_goal = ((3, 2), s)
a2_start = ((2, 1), w)
a2_goal = ((1, 1), w)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, times=(1, 2))
def simple_mdp_position_swap(self, planner):
a1_start = ((1, 1), w)
a2_start = ((3, 2), s)
a1_goal = a2_start
a2_goal = a1_start
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal)
def simple_mdp_one_at_goal_other_conflicting_path(self, planner):
a1_start = ((1, 1), w)
a1_goal = ((3, 1), e)
a2_start = a2_goal = ((2, 1), n)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, min_t=1)
def simple_mdp_test_final_orientation_optimization(self, planner):
a1_start = ((2, 1), n)
a1_goal = ((1, 2), w)
a2_start = a2_goal = ((3, 2), s)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
# NOTE: Not considering all plans with same cost yet, this won't work
# check_joint_plan(planner, mdp, start, goal, times=(3, 1))
a1_goal = ((1, 2), s)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, times=(3, 1))
def test_large_mdp_suite_shared_motion_goals(self):
if large_mdp_tests:
jmp = ml_planner_large.ml_action_manager.joint_motion_planner
self.large_mdp_test_basic_plan(jmp)
self.large_mdp_test_shared_motion_goal(jmp)
self.large_mdp_test_shared_motion_goal_with_conflict(jmp)
self.large_mdp_test_shared_motion_goal_with_conflict_other(jmp)
def large_mdp_test_basic_plan(self, planner):
a1_start = ((5, 1), n)
a2_start = ((8, 1), n)
a1_goal = a2_start
a2_goal = a1_start
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal)
def large_mdp_test_shared_motion_goal(self, planner):
a1_start = ((4, 1), n)
a2_start = ((1, 1), n)
a1_goal = ((5, 1), n)
a2_goal = ((5, 1), n)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, min_t=3)
def large_mdp_test_shared_motion_goal_with_conflict(self, planner):
assert planner.same_motion_goals
# When paths conflict for same goal, will resolve by making
# one agent wait (the one that results in the shortest plan)
a1_start = ((5, 2), n)
a2_start = ((4, 1), n)
a1_goal = ((5, 1), n)
a2_goal = ((5, 1), n)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, min_t=2)
def large_mdp_test_shared_motion_goal_with_conflict_other(self, planner):
assert planner.same_motion_goals
a1_start = ((4, 2), e)
a2_start = ((4, 1), e)
a1_goal = ((5, 1), n)
a2_goal = ((5, 1), n)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, min_t=3)
def check_joint_plan(self, joint_motion_planner, start, goal, times=None, min_t=None, display=False):
"""Runs the plan in the environment and checks that the intended goals are achieved."""
debug = False
action_plan, end_pos_and_orients, plan_lengths = joint_motion_planner.get_low_level_action_plan(start, goal)
if debug: print("Start state: {}, Goal state: {}, Action plan: {}".format(start, goal, action_plan))
start_state = OvercookedState([P(*start[0]), P(*start[1])], {}, all_orders=simple_mdp.start_all_orders)
env = OvercookedEnv.from_mdp(joint_motion_planner.mdp, horizon=1000)
resulting_state, _ = env.execute_plan(start_state, action_plan, display=display)
self.assertTrue(any([agent_goal in resulting_state.players_pos_and_or for agent_goal in goal]))
self.assertEqual(resulting_state.players_pos_and_or, end_pos_and_orients)
self.assertEqual(len(action_plan), min(plan_lengths))
if min_t is not None: self.assertEqual(len(action_plan), min_t)
if times is not None: self.assertEqual(plan_lengths, times)
# Rewritten because the previous test depended on Heuristic, and Heuristic has been deprecated
class TestMediumLevelActionManagerSimple(unittest.TestCase):
def test_simple_mdp_without_start_orientations(self):
print("Simple - no start orientations (& shared motion goals)")
mlam = ml_action_manager_simple
self.simple_mpd_empty_hands(mlam)
self.simple_mdp_deliver_soup(mlam)
self.simple_mdp_pickup_counter_soup(mlam)
self.simple_mdp_pickup_counter_dish(mlam)
self.simple_mdp_pickup_counter_onion(mlam)
self.simple_mdp_drop_useless_dish_with_soup_idle(mlam)
self.simple_mdp_pickup_soup(mlam)
self.simple_mdp_pickup_dish(mlam)
self.simple_mdp_start_good_soup_cooking(mlam)
self.simple_mdp_start_bad_soup_cooking(mlam)
self.simple_mdp_start_1_onion_soup_cooking(mlam)
self.simple_mdp_drop_useless_onion_good_soup(mlam)
self.simple_mdp_drop_useless_onion_bad_soup(mlam)
self.simple_mdp_add_3rd_onion(mlam)
self.simple_mdp_add_2nd_onion(mlam)
self.simple_mdp_drop_useless_dish(mlam)
def test_simple_mdp_with_start_orientations(self):
print("Simple - with start orientations (no shared motion goals)")
mlam = or_ml_action_manager_simple
self.simple_mpd_empty_hands(mlam, counter_drop_forbidden=True)
self.simple_mdp_deliver_soup(mlam, counter_drop_forbidden=True)
self.simple_mdp_pickup_counter_soup(mlam, counter_drop_forbidden=True)
self.simple_mdp_pickup_counter_dish(mlam, counter_drop_forbidden=True)
self.simple_mdp_pickup_counter_onion(mlam, counter_drop_forbidden=True)
self.simple_mdp_drop_useless_dish_with_soup_idle(mlam, counter_drop_forbidden=True)
self.simple_mdp_pickup_soup(mlam, counter_drop_forbidden=True)
self.simple_mdp_pickup_dish(mlam, counter_drop_forbidden=True)
self.simple_mdp_start_good_soup_cooking(mlam, counter_drop_forbidden=True)
self.simple_mdp_start_bad_soup_cooking(mlam, counter_drop_forbidden=True)
self.simple_mdp_start_1_onion_soup_cooking(mlam, counter_drop_forbidden=True)
self.simple_mdp_drop_useless_onion_good_soup(mlam, counter_drop_forbidden=True)
self.simple_mdp_drop_useless_onion_bad_soup(mlam, counter_drop_forbidden=True)
self.simple_mdp_add_3rd_onion(mlam, counter_drop_forbidden=True)
self.simple_mdp_add_2nd_onion(mlam, counter_drop_forbidden=True)
self.simple_mdp_drop_useless_dish(mlam, counter_drop_forbidden=True)
ONION_PICKUP = ((3, 2), (1, 0))
DISH_PICKUP = ((2, 2), (0, 1))
COUNTER_DROP = ((1, 1), (0, -1))
COUNTER_PICKUP = ((1, 2), (-1, 0))
POT_INTERACT = ((2, 1), (00, -1))
SOUP_DELIVER = ((3, 2), (0, 1))
def simple_mpd_empty_hands(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n),
P((2, 1), n)],
{},
all_orders=simple_mdp.start_all_orders)
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[self.ONION_PICKUP, self.DISH_PICKUP]
)
def simple_mdp_deliver_soup(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n),
P((2, 1), n, done_soup_obj((2, 1)))],
{},
all_orders=simple_mdp.start_all_orders)
if counter_drop_forbidden:
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[self.SOUP_DELIVER]
)
else:
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[self.COUNTER_DROP, self.SOUP_DELIVER]
)
def simple_mdp_pickup_counter_soup(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n),
P((2, 1), n)],
{(0, 2): done_soup_obj((0, 2))},
all_orders=simple_mdp.start_all_orders)
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP],
[self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP]
)
def simple_mdp_pickup_counter_dish(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n),
P((2, 1), n)],
{(0, 2): Obj('dish', (0, 2))},
all_orders=simple_mdp.start_all_orders)
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP],
[self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP]
)
def simple_mdp_pickup_counter_onion(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n),
P((2, 1), n)],
{(0, 2): Obj('onion', (0, 2))},
all_orders=simple_mdp.start_all_orders)
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP],
[self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP]
)
def simple_mdp_drop_useless_dish_with_soup_idle(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n),
P((2, 1), n, Obj('dish', (2, 1)))],
{(2, 0): idle_soup_obj((2, 0), 3)},
all_orders=simple_mdp.start_all_orders)
if counter_drop_forbidden:
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[]
)
else:
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.COUNTER_DROP]
)
def simple_mdp_pickup_soup(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n),
P((2, 1), n, Obj('dish', (2, 1)))],
{(2, 0): done_soup_obj((2, 0))},
all_orders=simple_mdp.start_all_orders)
if counter_drop_forbidden:
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[self.POT_INTERACT]
)
else:
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[self.COUNTER_DROP, self.POT_INTERACT]
)
def simple_mdp_pickup_dish(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n),
P((2, 1), n)],
{(2, 0): done_soup_obj((2, 0))},
all_orders=simple_mdp.start_all_orders)
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[self.ONION_PICKUP, self.DISH_PICKUP]
)
def simple_mdp_start_good_soup_cooking(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n),
P((2, 1), n)],
{(2, 0): idle_soup_obj((2, 0), 3)},
all_orders=simple_mdp.start_all_orders)
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT]
)
def simple_mdp_start_bad_soup_cooking(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n),
P((2, 1), n)],
{(2, 0): idle_soup_obj((2, 0), 2)},
all_orders=simple_mdp.start_all_orders)
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT]
)
def simple_mdp_start_1_onion_soup_cooking(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n),
P((2, 1), n)],
{(2, 0): idle_soup_obj((2, 0), 1)},
all_orders=simple_mdp.start_all_orders)
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT]
)
def simple_mdp_drop_useless_onion_good_soup(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n),
P((2, 1), n, Obj('onion', (2, 1)))],
{(2, 0): done_soup_obj((2, 0))},
all_orders=simple_mdp.start_all_orders)
if counter_drop_forbidden:
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[]
)
else:
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[self.COUNTER_DROP]
)
def simple_mdp_drop_useless_onion_bad_soup(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n),
P((2, 1), n, Obj('onion', (2, 1)))],
{(2, 0): done_soup_obj((2, 0), 2)},
all_orders=simple_mdp.start_all_orders)
if counter_drop_forbidden:
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[]
)
else:
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[self.COUNTER_DROP]
)
def simple_mdp_add_3rd_onion(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n),
P((2, 1), n, Obj('onion', (2, 1)))],
{(2, 0): idle_soup_obj((2, 0), 2)},
all_orders=simple_mdp.start_all_orders)
if counter_drop_forbidden:
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.POT_INTERACT]
)
else:
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.COUNTER_DROP, self.POT_INTERACT]
)
def simple_mdp_add_2nd_onion(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n),
P((2, 1), n, Obj('onion', (2, 1)))],
{(2, 0): idle_soup_obj((2, 0), 1)},
all_orders=simple_mdp.start_all_orders)
if counter_drop_forbidden:
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.POT_INTERACT]
)
else:
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.COUNTER_DROP, self.POT_INTERACT]
)
def simple_mdp_drop_useless_dish(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n),
P((2, 1), n, Obj('dish', (2, 1)))],
{(2, 0): idle_soup_obj((2, 0), 1)},
all_orders=simple_mdp.start_all_orders)
if counter_drop_forbidden:
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.POT_INTERACT]
)
else:
self.check_ml_action_manager(s, planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.COUNTER_DROP, self.POT_INTERACT]
)
def check_ml_action_manager(self, state, am, expected_mla_0, expected_mla_1, debug=False):
"""
args:
state (OvercookedState): an overcooked state
am (MediumLevelActionManager): the planer whose action manager will be tested
This function checks if all the mid-level actions make sense for each player state inside STATE
"""
player_0, player_1 = state.players
mla_0 = am.get_medium_level_actions(state, player_0)
mla_1 = am.get_medium_level_actions(state, player_1)
if debug:
print("Player 0 mla", mla_0)
print("Player 1 mla", mla_1)
print(am.mdp.state_string(state))
self.assertEqual(set(mla_0), set(expected_mla_0),
"player 0's ml_action should be " + str(expected_mla_0) +
" but get " + str(mla_0))
self.assertEqual(set(mla_1), set(expected_mla_1),
"player 0's ml_action should be " + str(expected_mla_1) +
" but get " + str(mla_1))
class TestScenarios(unittest.TestCase):
def repetative_runs(self, evaluator, num_games=10):
trajectory_0 = evaluator.evaluate_human_model_pair(num_games=num_games, native_eval=True)
trajectory_1 = evaluator.evaluate_human_model_pair(num_games=num_games, native_eval=True)
h0 = GreedyHumanModel(evaluator.env.mlam)
h1 = GreedyHumanModel(evaluator.env.mlam)
ap_hh_2 = AgentPair(h0, h1)
trajectory_2 = evaluator.evaluate_agent_pair(agent_pair=ap_hh_2, num_games=num_games, native_eval=True)
h3 = GreedyHumanModel(evaluator.env.mlam)
h4 = GreedyHumanModel(evaluator.env.mlam)
ap_hh_3 = AgentPair(h3, h4)
trajectory_3 = evaluator.evaluate_agent_pair(agent_pair=ap_hh_3, num_games=num_games, native_eval=True)
def test_scenario_3_no_counter(self):
# Asymmetric advantage scenario
#
# X X X X X O X X X X
# S X X P X
# X ↑H X
# D X X X X!X X X
# X →R O
# X X X X X X X X X X
#
# This test does not allow counter by using the default NO_COUNTER_PARAMS when calling from_layout_name
mdp_params = {"layout_name": "scenario3"}
mdp = OvercookedGridworld.from_layout_name(**mdp_params)
start_state = mdp.get_standard_start_state()
env_params = {"start_state_fn": lambda: start_state, "horizon": 1000}
eva = AgentEvaluator.from_layout_name(mdp_params, env_params, force_compute=force_compute)
self.repetative_runs(eva)
def test_scenario_3_yes_counter(self):
# Asymmetric advantage scenario
#
# X X X X X O X X X X
# S X X P X
# X ↑H X
# D X X X X!X X X
# X →R O
# X X X X X X X X X X
#
# This test does not allow only (5. 3) as the only counter
mdp_params = {"layout_name": "scenario3"}
mdp = OvercookedGridworld.from_layout_name(**mdp_params)
start_state = mdp.get_standard_start_state()
valid_counters = [(5, 3)]
one_counter_params = {
'start_orientations': False,
'wait_allowed': False,
'counter_goals': valid_counters,
'counter_drop': valid_counters,
'counter_pickup': [],
'same_motion_goals': True
}
env_params = {"start_state_fn": lambda: start_state, "horizon": 1000}
eva = AgentEvaluator.from_layout_name(mdp_params, env_params, mlam_params=one_counter_params, force_compute=force_compute)
self.repetative_runs(eva)
# # Deprecated. because of Heuristic
# class TestHighLevelPlanner(unittest.TestCase):
# """The HighLevelPlanner class has been mostly discontinued"""
#
# def test_basic_hl_planning(self):
# if large_mdp_tests:
# s = OvercookedState(
# [P((2, 2), n),
# P((2, 1), n)],
# {}, order_list=[])
# h = Heuristic(hlp.mp)
# hlp.get_hl_plan(s, h.simple_heuristic)
#
# s = OvercookedState(
# [P((2, 2), n),
# P((2, 1), n)],
# {}, order_list=['any', 'any', 'any'])
#
# hlp.get_low_level_action_plan(s, h.simple_heuristic)
# # hlp.get_low_level_action_plan(s, h.hard_heuristic)
#
# # heuristic = Heuristic(ml_planner_large.mp)
# # ml_planner_large.get_low_level_action_plan(s, heuristic.simple_heuristic)
# # ml_planner_large.get_low_level_action_plan(s, heuristic.hard_heuristic)
if __name__ == '__main__':
unittest.main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.